code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
import matplotlib as mpl
from matplotlib.colors import to_rgb, to_rgba
from numpy.testing import assert_array_equal
LINE_PROPS = [
"alpha",
"color",
"linewidth",
"linestyle",
"xydata",
"zorder",
]
COLLECTION_PROPS = [
"alpha",
"edgecolor",
"facecolor",
"fill",
"hatch",
"linestyle",
"linewidth",
"paths",
"zorder",
]
BAR_PROPS = [
"alpha",
"edgecolor",
"facecolor",
"fill",
"hatch",
"height",
"linestyle",
"linewidth",
"xy",
"zorder",
]
def assert_colors_equal(a, b, check_alpha=True):
def handle_array(x):
if isinstance(x, np.ndarray):
if x.ndim > 1:
x = np.unique(x, axis=0).squeeze()
if x.ndim > 1:
raise ValueError("Color arrays must be 1 dimensional")
return x
a = handle_array(a)
b = handle_array(b)
f = to_rgba if check_alpha else to_rgb
assert f(a) == f(b)
def assert_artists_equal(list1, list2, properties):
assert len(list1) == len(list2)
for a1, a2 in zip(list1, list2):
prop1 = a1.properties()
prop2 = a2.properties()
for key in properties:
v1 = prop1[key]
v2 = prop2[key]
if key == "paths":
for p1, p2 in zip(v1, v2):
assert_array_equal(p1.vertices, p2.vertices)
assert_array_equal(p1.codes, p2.codes)
elif isinstance(v1, np.ndarray):
assert_array_equal(v1, v2)
elif key == "color":
v1 = mpl.colors.to_rgba(v1)
v2 = mpl.colors.to_rgba(v2)
assert v1 == v2
else:
assert v1 == v2
def assert_legends_equal(leg1, leg2):
assert leg1.get_title().get_text() == leg2.get_title().get_text()
for t1, t2 in zip(leg1.get_texts(), leg2.get_texts()):
assert t1.get_text() == t2.get_text()
assert_artists_equal(
leg1.get_patches(), leg2.get_patches(), BAR_PROPS,
)
assert_artists_equal(
leg1.get_lines(), leg2.get_lines(), LINE_PROPS,
)
def assert_plots_equal(ax1, ax2, labels=True):
assert_artists_equal(ax1.patches, ax2.patches, BAR_PROPS)
assert_artists_equal(ax1.lines, ax2.lines, LINE_PROPS)
poly1 = ax1.findobj(mpl.collections.PolyCollection)
poly2 = ax2.findobj(mpl.collections.PolyCollection)
assert_artists_equal(poly1, poly2, COLLECTION_PROPS)
if labels:
assert ax1.get_xlabel() == ax2.get_xlabel()
assert ax1.get_ylabel() == ax2.get_ylabel()
| [
"matplotlib.colors.to_rgba",
"numpy.unique",
"numpy.testing.assert_array_equal"
] | [((1359, 1403), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['p1.vertices', 'p2.vertices'], {}), '(p1.vertices, p2.vertices)\n', (1377, 1403), False, 'from numpy.testing import assert_array_equal\n'), ((1424, 1462), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['p1.codes', 'p2.codes'], {}), '(p1.codes, p2.codes)\n', (1442, 1462), False, 'from numpy.testing import assert_array_equal\n'), ((1524, 1550), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['v1', 'v2'], {}), '(v1, v2)\n', (1542, 1550), False, 'from numpy.testing import assert_array_equal\n'), ((723, 743), 'numpy.unique', 'np.unique', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (732, 743), True, 'import numpy as np\n'), ((1605, 1627), 'matplotlib.colors.to_rgba', 'mpl.colors.to_rgba', (['v1'], {}), '(v1)\n', (1623, 1627), True, 'import matplotlib as mpl\n'), ((1649, 1671), 'matplotlib.colors.to_rgba', 'mpl.colors.to_rgba', (['v2'], {}), '(v2)\n', (1667, 1671), True, 'import matplotlib as mpl\n')] |
#!/usr/bin/env python
#
# Copyright (C) 2009 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement # for python 2.5
PKG = 'posedetectiondb' # this package name
NAME = 'GatherDetectionResults'
import roslib; roslib.load_manifest(PKG)
import os, sys, time, string, threading
from optparse import OptionParser
import numpy # nice to be able to explicitly call some functions
from numpy import *
import rospy
import posedetection_msgs.msg
from openravepy import *
class VisibilityModel(metaclass.AutoReloader):
def __init__(self,measurements=None,filename=None,symmetricplane=None,kinbodyfile=None):
if measurements is not None:
self.rawmeasurements = measurements
if symmetricplane is not None:
dists = dot(symmetricplane[0:3],transpose(self.rawmeasurements))+symmetricplane[3]
self.rawmeasurements = r_[self.rawmeasurements,self.rawmeasurements-dot(reshape(2.0*dists,(dists.shape[0],1)),reshape(symmetricplane[0:3],(1,3)))]
self.trimesh = None
if kinbodyfile is not None:
self.env = Environment()
self.orobj = self.env.ReadKinBodyXMLFile(kinbodyfile)
if self.orobj is None:
raise ValueError('failed to open %s openrave file'%kinbodyfile)
self.env.AddKinBody(self.orobj)
self.trimesh = self.env.Triangulate(self.orobj)
self.measurements = self.rawmeasurements
def CreateReducedModel(self,bandwidth=0.04,bandthresh=0.01,neighthresh=0.01,showdata=False,savefile=None):
self.measurements,indices = self.Prune(self.rawmeasurements,100,neighthresh**2,1)
uniformpoints,dists,pointscale = self.UniformlySampleSpace(bandwidth,bandthresh)
if showdata:
from enthought.mayavi import mlab
mlab.figure(1,fgcolor=(0,0,0), bgcolor=(1,1,1))
src = mlab.pipeline.scalar_field(dists)
mlab.pipeline.iso_surface(src,contours=[0.01],opacity=0.1)
mlab.pipeline.volume(mlab.pipeline.scalar_field(dists*500))
v = pointscale[0]*self.trimesh.vertices+pointscale[1]
mlab.triangular_mesh(v[:,0],v[:,1],v[:,2],self.trimesh.indices,color=(0,0,0.5))
if savefile is None:
savefile = self.getfilename()
print('saving measurements to %s'%savefile)
mkdir_recursive(os.path.split(savefile)[0])
savetxt(savefile,uniformpoints,'%f')
def getfilename(self):
return os.path.join(self.env.GetHomeDirectory(),'kinbody.'+self.orobj.GetKinematicsGeometryHash(),'visibility.txt')
def UniformlySampleSpace(self,bandwidth,bandthresh,delta=0.02):
maxradius = sqrt(max(sum(self.measurements**2,1)))
nsteps = floor(maxradius/delta)
X,Y,Z = mgrid[-nsteps:nsteps,-nsteps:nsteps,-nsteps:nsteps]
allpoints = c_[X.flat,Y.flat,Z.flat]*delta/bandwidth
sampleinds = flatnonzero(sum(allpoints**2,1)<(maxradius/bandwidth)**2)
samplepoints = allpoints[sampleinds,:]
kdtree = pyANN.KDTree(self.measurements/bandwidth)
sampledists = zeros(samplepoints.shape[0])
goodpoints = []
for i in xrange(samplepoints.shape[0]):
neighs,dists,kball = kdtree.kFRSearchArray(samplepoints[i:(i+1),:],5.0**2,32,0.0001)
sampledists[i] = sum(exp(-dists[neighs>=0]))
uniformpoints = samplepoints[sampledists>bandthresh,:]*bandwidth
alldists = zeros(prod(X.shape))
alldists[sampleinds] = sampledists
return uniformpoints,reshape(alldists,X.shape),array((1.0/delta,nsteps))
def Prune(self,rawposes, nsize, thresh2, neighsize,giveupiters=100):
"""rawposes is Nx7"""
iter = 1
poses = array(rawposes)
indices = range(poses.shape[0])
N = poses.shape[0]
nochange=0
while N > nsize:
ind = numpy.random.randint(N)
g = poses[ind,:]
# check g's neighbors
d = sum((poses[0:N,:] - tile(g, (N,1)))**2,1)
neigh = sum(d < thresh2)
if neigh > neighsize:
# move to the last pose and resize
poses[ind,:] = poses[N-1,:]
indices[ind] = indices[N-1]
nochange=0
N -= 1
nochange += 1
iter += 1
if iter > 5000 or nochange > giveupiters:
break
return poses[0:N,:],indices[0:N]
class OpenRAVEVisualizer(metaclass.AutoReloader):
def __init__(self,kinbodyfile,automaticadd=True,measurementsfilename=None):
self.automaticadd = automaticadd
self.orenv = Environment()
self.orenv.SetViewer('qtcoin')
self.orobj = self.orenv.ReadKinBodyXMLFile(kinbodyfile)
if self.orobj is None:
raise ValueError('failed to open %s openrave file'%kinbodyfile)
self.orenv.AddKinBody(self.orobj)
self.objab = self.orobj.ComputeAABB()
self.Tcamera = None
self.lck = threading.Lock()
self.camerahandle = None
self.measurementhandles = []
self.measurements = []
if measurementsfilename is not None:
f = open(measurementsfilename,'r')
for l in f.readlines():
m = array([string.atof(s) for s in string.split(l)])
self.drawmeasurement(m)
self.measurements.append(m)
f.close()
self.sub_objdet = rospy.Subscriber("ObjectDetection", posedetection_msgs.msg.ObjectDetection,self.objdetcb, queue_size=1)
rospy.init_node(NAME, anonymous=True)#,disable_signals=False)
def __del__(self):
self.sub_objdet.unregister()
self.orenv.Destroy()
def objdetcb(self,msg):
newcamerahandle = None
if len(msg.objects) > 0:
q = msg.objects[0].pose.orientation
t = msg.objects[0].pose.position
with self.lck:
self.Tcamera = linalg.inv(matrixFromPose([q.w,q.x,q.y,q.z,t.x,t.y,t.z]))
# draw in openrave environment
sx = 0.02
sy = 0.02
camlocalpts = array(((sx,sy,0.05),(-sx,sy,0.05),(-sx,-sy,0.05),(sx,-sy,0.05),(sx,sy,0.05),(0,0,0),(-sx,sy,0.05),(0,0,0),(-sx,-sy,0.05),(0,0,0),(sx,-sy,0.05)))
campts = dot(self.Tcamera,r_[transpose(camlocalpts),ones((1,camlocalpts.shape[0]))])
self.camerahandle = self.orenv.drawlinestrip(transpose(campts[0:3,:]),2,array([0,0,1]))
if self.automaticadd:
self.addmeasurement()
def addmeasurement(self):
self.lck.acquire()
Tcamera = self.Tcamera
self.lck.release()
dist = dot(Tcamera[0:3,2],self.objab.pos()-Tcamera[0:3,3:4])
m = Tcamera[0:3,2]*dist
self.drawmeasurement(m)
self.measurements.append(m)
print('num measurements %d'%len(self.measurements))
def drawmeasurement(self,m):
dist = sqrt(sum(m**2))
dir = m/dist
p = self.objab.pos()-dist*dir
self.measurementhandles.append(self.orenv.drawlinestrip(transpose(c_[p-0.02*dir,p+0.02*dir]),4,array((1,min(1,dist/1.0),0))))
def savemeasurements(self,filename):
self.lck.acquire()
print('saving measurements to %s'%filename)
savetxt(filename,self.measurements,'%f')
self.lck.release()
if __name__=='__main__':
parser = OptionParser(description='Gather object detection transformations and filter and display them.')
parser.add_option('--kinbodyfile',
action="store",type='string',dest='kinbodyfile',
help='OpenRAVE object file that represents the incoming object pose. Updates are show in the openrave window')
parser.add_option('-s','--single',
action="store_true",dest='single',default=False,
help='If set, will wait for user input in order to add a measurement')
parser.add_option('-f','--savefile',
action="store",dest="filename",
help='If specified, will save all recorded measurements to this file at exit time')
parser.add_option('-m','--measurements',
action="store",dest="measurements",default=None,
help='If specified, will start with the current measurements file')
(options, args) = parser.parse_args()
if not options.kinbodyfile:
print('Error: Need to specify an openrave kinbody file')
sys.exit(1)
visualizer = OpenRAVEVisualizer(options.kinbodyfile,measurementsfilename=options.measurements,automaticadd=not options.single)
while True:
cmd = raw_input('Enter command (q-quit and save,c-capture): ');
if cmd == 'q':
break
elif cmd == 'c' and options.single:
print('adding measurement')
visualizer.addmeasurement()
else:
print('bad command',cmd)
if options.filename:
visualizer.savemeasurements(options.filename)
def test():
"rosrun posedetectiondb GatherDetectionResults.py --kinbodyfile=scenes/cereal_frootloops.kinbody.xml -f test.txt ObjectDetection:=/CerealDetection"
import GatherDetectionResults
self = GatherDetectionResults.VisibilityModel(measurements=loadtxt('uroncha_raw.txt'),symmetricplane=array([1.0,0,0,0]),kinbodyfile='scenes/uroncha.kinbody.xml')
self = GatherDetectionResults.VisibilityModel(measurements=loadtxt('peachjuice_raw.txt'),kinbodyfile='scenes/peachjuice.kinbody.xml')
self.CreateReducedModel(bandwidth=0.03,bandthresh=0.02)
| [
"enthought.mayavi.mlab.pipeline.scalar_field",
"threading.Lock",
"rospy.init_node",
"string.split",
"enthought.mayavi.mlab.figure",
"optparse.OptionParser",
"string.atof",
"roslib.load_manifest",
"os.path.split",
"numpy.random.randint",
"enthought.mayavi.mlab.pipeline.iso_surface",
"sys.exit",... | [((746, 771), 'roslib.load_manifest', 'roslib.load_manifest', (['PKG'], {}), '(PKG)\n', (766, 771), False, 'import roslib\n'), ((7884, 7985), 'optparse.OptionParser', 'OptionParser', ([], {'description': '"""Gather object detection transformations and filter and display them."""'}), "(description=\n 'Gather object detection transformations and filter and display them.')\n", (7896, 7985), False, 'from optparse import OptionParser\n'), ((5511, 5527), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (5525, 5527), False, 'import os, sys, time, string, threading\n'), ((5959, 6067), 'rospy.Subscriber', 'rospy.Subscriber', (['"""ObjectDetection"""', 'posedetection_msgs.msg.ObjectDetection', 'self.objdetcb'], {'queue_size': '(1)'}), "('ObjectDetection', posedetection_msgs.msg.ObjectDetection,\n self.objdetcb, queue_size=1)\n", (5975, 6067), False, 'import rospy\n'), ((6071, 6108), 'rospy.init_node', 'rospy.init_node', (['NAME'], {'anonymous': '(True)'}), '(NAME, anonymous=True)\n', (6086, 6108), False, 'import rospy\n'), ((8981, 8992), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8989, 8992), False, 'import os, sys, time, string, threading\n'), ((2339, 2391), 'enthought.mayavi.mlab.figure', 'mlab.figure', (['(1)'], {'fgcolor': '(0, 0, 0)', 'bgcolor': '(1, 1, 1)'}), '(1, fgcolor=(0, 0, 0), bgcolor=(1, 1, 1))\n', (2350, 2391), False, 'from enthought.mayavi import mlab\n'), ((2405, 2438), 'enthought.mayavi.mlab.pipeline.scalar_field', 'mlab.pipeline.scalar_field', (['dists'], {}), '(dists)\n', (2431, 2438), False, 'from enthought.mayavi import mlab\n'), ((2451, 2511), 'enthought.mayavi.mlab.pipeline.iso_surface', 'mlab.pipeline.iso_surface', (['src'], {'contours': '[0.01]', 'opacity': '(0.1)'}), '(src, contours=[0.01], opacity=0.1)\n', (2476, 2511), False, 'from enthought.mayavi import mlab\n'), ((2660, 2753), 'enthought.mayavi.mlab.triangular_mesh', 'mlab.triangular_mesh', (['v[:, 0]', 'v[:, 1]', 'v[:, 2]', 'self.trimesh.indices'], {'color': '(0, 0, 0.5)'}), '(v[:, 0], v[:, 1], v[:, 2], self.trimesh.indices, color\n =(0, 0, 0.5))\n', (2680, 2753), False, 'from enthought.mayavi import mlab\n'), ((4389, 4412), 'numpy.random.randint', 'numpy.random.randint', (['N'], {}), '(N)\n', (4409, 4412), False, 'import numpy\n'), ((2543, 2582), 'enthought.mayavi.mlab.pipeline.scalar_field', 'mlab.pipeline.scalar_field', (['(dists * 500)'], {}), '(dists * 500)\n', (2569, 2582), False, 'from enthought.mayavi import mlab\n'), ((2887, 2910), 'os.path.split', 'os.path.split', (['savefile'], {}), '(savefile)\n', (2900, 2910), False, 'import os, sys, time, string, threading\n'), ((5784, 5798), 'string.atof', 'string.atof', (['s'], {}), '(s)\n', (5795, 5798), False, 'import os, sys, time, string, threading\n'), ((5808, 5823), 'string.split', 'string.split', (['l'], {}), '(l)\n', (5820, 5823), False, 'import os, sys, time, string, threading\n')] |
import numpy as np
import tensorflow as tf
def encoder_linear(input_shape, latent_dim):
"""Fully connected linear encoder."""
return tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=input_shape),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(latent_dim, activation=None),
],
)
def decoder_linear(latent_dim, output_shape):
"""Fully connected linear decoder."""
return tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=latent_dim),
tf.keras.layers.Dense(np.prod(output_shape), activation=None),
tf.keras.layers.Reshape(output_shape),
],
)
def encoder_fc(input_shape, latent_dim):
"""Fully connected linear encoder."""
return tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=input_shape),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(4096, activation=tf.nn.relu),
tf.keras.layers.Dense(1024, activation=tf.nn.relu),
tf.keras.layers.Dense(latent_dim, activation=None),
],
)
def decoder_fc(latent_dim, output_shape):
"""Fully connected linear decoder."""
return tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=latent_dim),
tf.keras.layers.Dense(1028, activation=tf.nn.relu),
tf.keras.layers.Dense(4096, activation=tf.nn.relu),
tf.keras.layers.Dense(np.prod(output_shape), activation=None),
tf.keras.layers.Reshape(output_shape),
],
)
def encoder_conv_64x64(channels, latent_dim):
return tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(64, 64, channels)),
# (64, 64, channels) -> (32, 32, 32)
tf.keras.layers.Conv2D(32, 5, 2, "same"),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.LeakyReLU(),
# (32, 32, 32) -> (16, 16, 64)
tf.keras.layers.Conv2D(64, 3, 2, "same"),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.LeakyReLU(),
# (16, 16, 64) -> (8, 8, 128)
tf.keras.layers.Conv2D(128, 3, 2, "same"),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.LeakyReLU(),
# (16, 16, 64) -> (latent_dim, )
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(latent_dim, activation=None),
],
)
def decoder_conv_64x64(latent_dim, channels):
return tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(latent_dim, )),
# (latent_dim, ) -> (8, 8, 128)
tf.keras.layers.Dense(8 * 8 * 128, activation=None),
tf.keras.layers.Reshape([8, 8, 128]),
# (8, 8, 128) -> (16, 16, 64)
tf.keras.layers.Conv2DTranspose(64, 3, 2, "same"),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.LeakyReLU(),
# (16, 16, 64) -> (32, 32, 32)
tf.keras.layers.Conv2DTranspose(32, 3, 2, "same"),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.LeakyReLU(),
# (32, 32, 32) -> (64, 64, channels)
tf.keras.layers.Conv2DTranspose(channels, 5, 2, "same"),
],
)
| [
"numpy.prod",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Conv2DTranspose",
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layer... | [((194, 245), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': 'input_shape'}), '(input_shape=input_shape)\n', (220, 245), True, 'import tensorflow as tf\n'), ((260, 285), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (283, 285), True, 'import tensorflow as tf\n'), ((300, 350), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['latent_dim'], {'activation': 'None'}), '(latent_dim, activation=None)\n', (321, 350), True, 'import tensorflow as tf\n'), ((522, 572), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': 'latent_dim'}), '(input_shape=latent_dim)\n', (548, 572), True, 'import tensorflow as tf\n'), ((663, 700), 'tensorflow.keras.layers.Reshape', 'tf.keras.layers.Reshape', (['output_shape'], {}), '(output_shape)\n', (686, 700), True, 'import tensorflow as tf\n'), ((867, 918), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': 'input_shape'}), '(input_shape=input_shape)\n', (893, 918), True, 'import tensorflow as tf\n'), ((933, 958), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (956, 958), True, 'import tensorflow as tf\n'), ((973, 1023), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(4096)'], {'activation': 'tf.nn.relu'}), '(4096, activation=tf.nn.relu)\n', (994, 1023), True, 'import tensorflow as tf\n'), ((1038, 1088), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1024)'], {'activation': 'tf.nn.relu'}), '(1024, activation=tf.nn.relu)\n', (1059, 1088), True, 'import tensorflow as tf\n'), ((1103, 1153), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['latent_dim'], {'activation': 'None'}), '(latent_dim, activation=None)\n', (1124, 1153), True, 'import tensorflow as tf\n'), ((1321, 1371), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': 'latent_dim'}), '(input_shape=latent_dim)\n', (1347, 1371), True, 'import tensorflow as tf\n'), ((1386, 1436), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1028)'], {'activation': 'tf.nn.relu'}), '(1028, activation=tf.nn.relu)\n', (1407, 1436), True, 'import tensorflow as tf\n'), ((1451, 1501), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(4096)'], {'activation': 'tf.nn.relu'}), '(4096, activation=tf.nn.relu)\n', (1472, 1501), True, 'import tensorflow as tf\n'), ((1592, 1629), 'tensorflow.keras.layers.Reshape', 'tf.keras.layers.Reshape', (['output_shape'], {}), '(output_shape)\n', (1615, 1629), True, 'import tensorflow as tf\n'), ((1758, 1816), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(64, 64, channels)'}), '(input_shape=(64, 64, channels))\n', (1784, 1816), True, 'import tensorflow as tf\n'), ((1881, 1921), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(5)', '(2)', '"""same"""'], {}), "(32, 5, 2, 'same')\n", (1903, 1921), True, 'import tensorflow as tf\n'), ((1936, 1972), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (1970, 1972), True, 'import tensorflow as tf\n'), ((1987, 2014), 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), '()\n', (2012, 2014), True, 'import tensorflow as tf\n'), ((2073, 2113), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(3)', '(2)', '"""same"""'], {}), "(64, 3, 2, 'same')\n", (2095, 2113), True, 'import tensorflow as tf\n'), ((2128, 2164), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (2162, 2164), True, 'import tensorflow as tf\n'), ((2179, 2206), 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), '()\n', (2204, 2206), True, 'import tensorflow as tf\n'), ((2264, 2305), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(128)', '(3)', '(2)', '"""same"""'], {}), "(128, 3, 2, 'same')\n", (2286, 2305), True, 'import tensorflow as tf\n'), ((2320, 2356), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (2354, 2356), True, 'import tensorflow as tf\n'), ((2371, 2398), 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), '()\n', (2396, 2398), True, 'import tensorflow as tf\n'), ((2459, 2484), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (2482, 2484), True, 'import tensorflow as tf\n'), ((2499, 2549), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['latent_dim'], {'activation': 'None'}), '(latent_dim, activation=None)\n', (2520, 2549), True, 'import tensorflow as tf\n'), ((2678, 2731), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(latent_dim,)'}), '(input_shape=(latent_dim,))\n', (2704, 2731), True, 'import tensorflow as tf\n'), ((2792, 2843), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(8 * 8 * 128)'], {'activation': 'None'}), '(8 * 8 * 128, activation=None)\n', (2813, 2843), True, 'import tensorflow as tf\n'), ((2858, 2894), 'tensorflow.keras.layers.Reshape', 'tf.keras.layers.Reshape', (['[8, 8, 128]'], {}), '([8, 8, 128])\n', (2881, 2894), True, 'import tensorflow as tf\n'), ((2952, 3001), 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', (['(64)', '(3)', '(2)', '"""same"""'], {}), "(64, 3, 2, 'same')\n", (2983, 3001), True, 'import tensorflow as tf\n'), ((3016, 3052), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (3050, 3052), True, 'import tensorflow as tf\n'), ((3067, 3094), 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), '()\n', (3092, 3094), True, 'import tensorflow as tf\n'), ((3153, 3202), 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', (['(32)', '(3)', '(2)', '"""same"""'], {}), "(32, 3, 2, 'same')\n", (3184, 3202), True, 'import tensorflow as tf\n'), ((3217, 3253), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (3251, 3253), True, 'import tensorflow as tf\n'), ((3268, 3295), 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), '()\n', (3293, 3295), True, 'import tensorflow as tf\n'), ((3360, 3415), 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', (['channels', '(5)', '(2)', '"""same"""'], {}), "(channels, 5, 2, 'same')\n", (3391, 3415), True, 'import tensorflow as tf\n'), ((609, 630), 'numpy.prod', 'np.prod', (['output_shape'], {}), '(output_shape)\n', (616, 630), True, 'import numpy as np\n'), ((1538, 1559), 'numpy.prod', 'np.prod', (['output_shape'], {}), '(output_shape)\n', (1545, 1559), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import argparse
import logging
import math
import sys
import time
import random
import os
import json
import numpy as np
import pickle
import six
import threading
import pdb
from tqdm import tqdm
import torch
import torch.nn as nn
from pytorch_pretrained_bert import BertTokenizer, OpenAIGPTTokenizer, GPT2Tokenizer, TransfoXLTokenizer
import data_handler as dh
from model.mmseq2seq_model import MMSeq2SeqModel
from model.multimodal_encoder import MMEncoder
from model.lstm_encoder import LSTMEncoder
from model.hlstm_encoder import HLSTMEncoder
from model.hlstm_decoder import HLSTMDecoder
def fetch_batch(dh, data, index, separate_caption, result):
result.append(dh.make_batch(data, index, separate_caption=separate_caption, pretrained_elmo=args.pretrained_elmo, pretrained_bert=args.pretrained_bert, bert_tokenizer=bert_tokenizer, pretrained_all=args.pretrained_all, bert_model=args.bert_model, concat_his=args.concat_his))
# Evaluation routine
def evaluate(model, data, indices, parallel=False):
start_time = time.time()
eval_loss = 0.
eval_num_words = 0
model.eval()
with torch.no_grad():
# fetch the first batch
batch = [dh.make_batch(data, indices[0], separate_caption=args.separate_caption, pretrained_elmo=args.pretrained_elmo, pretrained_bert=args.pretrained_bert, bert_tokenizer=bert_tokenizer, pretrained_all=args.pretrained_all, bert_model=args.bert_model, concat_his=args.concat_his)]
# evaluation loop
it = tqdm(six.moves.range(len(indices)), desc="evaluation", ncols=0)
for j in it:
#if args.separate_caption:
# x_batch, h_batch, q_batch, a_batch_in, a_batch_out, c_batch = batch.pop()
#else:
# x_batch, h_batch, q_batch, a_batch_in, a_batch_out = batch.pop()
b = batch.pop()
if j < len(indices)-1:
prefetch = threading.Thread(target=fetch_batch,
args=([dh, data, indices[j+1], args.separate_caption, batch]))
prefetch.start()
# propagate for training
x = [torch.from_numpy(x) for x in b[0]]
if args.concat_his:
h = [torch.from_numpy(h_i) for h_i in b[1]]
else:
h = [[torch.from_numpy(h) for h in hb] for hb in b[1]]
q = [torch.from_numpy(q) for q in b[2]]
ai = [torch.from_numpy(ai) for ai in b[3]]
ao = [torch.from_numpy(ao) for ao in b[4]]
if args.separate_caption:
c = [torch.from_numpy(c) for c in b[5]]
else:
c = None
if args.pretrained_elmo or args.pretrained_bert:
if args.pretrained_all:
context_q, context_h, context_ai = b[-3:]
else:
context_q = b[-1]
context_h = None
context_ai = None
else:
context_q = None
context_h = None
context_ai = None
if args.exclude_video:
x = None
if parallel:
_, _, loss = model.module.loss(x, h, q, ai, ao, c, context_q, context_h, context_ai)
else:
_, _, loss = model.loss(x, h, q, ai, ao, c, context_q, context_h, context_ai)
num_words = sum([len(s) for s in ao])
eval_loss += loss.cpu().data.numpy() * num_words
eval_num_words += num_words
prefetch.join()
model.train()
wall_time = time.time() - start_time
return math.exp(eval_loss/eval_num_words), wall_time
##################################
# main
if __name__ =="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g', default=0, type=int,
help='GPU ID (negative value indicates CPU)')
# train, dev and test data
parser.add_argument('--vocabfile', default='', type=str,
help='Vocabulary file (.json)')
parser.add_argument('--dictmap', default='', type=str,
help='Dict id-map file (.json)')
parser.add_argument('--fea-type', nargs='+', type=str,
help='Image feature files (.pkl)')
parser.add_argument('--train-path', default='', type=str,
help='Path to training feature files')
parser.add_argument('--train-set', default='', type=str,
help='Filename of train data')
parser.add_argument('--valid-path', default='', type=str,
help='Path to validation feature files')
parser.add_argument('--valid-set', default='', type=str,
help='Filename of validation data')
parser.add_argument('--include-caption', action='store_true',
help='Include caption in the history')
parser.add_argument('--separate-caption', default=False, type=bool,
help='')
parser.add_argument('--exclude-video', action='store_true',
help='')
parser.add_argument('--pretrained-word-emb', default=None, type=str,
help='')
parser.add_argument('--pretrained-weights', default=None, type=str,
help='')
parser.add_argument('--pretrained-elmo', default=False, type=int,
help='')
parser.add_argument('--elmo-num-outputs', default=1, type=int,
help='')
parser.add_argument('--finetune-elmo', default=False, type=int,
help='')
parser.add_argument('--pretrained-bert', default=False, type=int,
help='')
parser.add_argument('--bert-model', default='bert-base-uncased', type=str,
help='')
parser.add_argument('--finetune-bert', default=False, type=int,
help='')
parser.add_argument('--add-word-emb', default=True, type=int,
help='')
parser.add_argument('--pretrained-all', default=True, type=int,
help='')
parser.add_argument('--concat-his', default=False, type=int,
help='')
# model parameters
parser.add_argument('--model', '-m', default='', type=str,
help='Attention model to be output')
# multimodal encoder parameters
parser.add_argument('--enc-psize', '-p', nargs='+', type=int,
help='Number of projection layer units')
parser.add_argument('--enc-hsize', '-u', nargs='+', type=int,
help='Number of hidden units')
parser.add_argument('--att-size', '-a', default=100, type=int,
help='Number of attention layer units')
parser.add_argument('--mout-size', default=100, type=int,
help='Number of output layer units')
parser.add_argument('--mm-att', default='baseline', type=str,
help="")
parser.add_argument('--mm-fusioning', default='baseline', type=str,
help="")
parser.add_argument('--mm-att-hops', default=1, type=int,
help='')
parser.add_argument('--caption-mm-att', action='store_true',
help='')
# input (question/caption) encoder parameters
parser.add_argument('--embed-size', default=200, type=int,
help='Word embedding size')
parser.add_argument('--in-enc-layers', default=2, type=int,
help='Number of input encoder layers')
parser.add_argument('--in-enc-hsize', default=200, type=int,
help='Number of input encoder hidden layer units')
parser.add_argument('--q-att', default=None, type=str,
help='')
parser.add_argument('--c-att', default=None, type=str,
help='')
parser.add_argument('--rnn-type', default='lstm', type=str,
help='')
parser.add_argument('--caption-states-att', default=False, type=bool,
help='')
# history (QA pairs) encoder parameters
parser.add_argument('--hist-enc-layers', nargs='+', type=int,
help='Number of history encoder layers')
parser.add_argument('--hist-enc-hsize', default=200, type=int,
help='History embedding size')
parser.add_argument('--hist-out-size', default=200, type=int,
help='History embedding size')
parser.add_argument('--ft-fusioning', default='baseline', type=str,
help='Fusioning fetures between images and text')
parser.add_argument('--caption-mm-fusion-out-size', default=-1, type=int,
help='')
# response (answer) decoder parameters
parser.add_argument('--dec-layers', default=2, type=int,
help='Number of decoder layers')
parser.add_argument('--dec-psize', '-P', default=200, type=int,
help='Number of decoder projection layer units')
parser.add_argument('--dec-hsize', '-d', default=200, type=int,
help='Number of decoder hidden layer units')
parser.add_argument('--classifier', default='baseline', type=str,
help='')
# Training conditions
parser.add_argument('--optimizer', '-o', default='AdaDelta', type=str,
choices=['SGD', 'Adam', 'AdaDelta', 'RMSprop'],
help="optimizer")
parser.add_argument('--rand-seed', '-s', default=1, type=int,
help="seed for generating random numbers")
parser.add_argument('--batch-size', '-b', default=20, type=int,
help='Batch size in training')
parser.add_argument('--num-epochs', '-e', default=15, type=int,
help='Number of epochs')
parser.add_argument('--max-length', default=20, type=int,
help='Maximum length for controling batch size')
parser.add_argument('--n-batches', default=-1, type=int,
help='Number of batches in training')
parser.add_argument('--weight-decay', default=0, type=float,
help='')
parser.add_argument('--lr-scheduler', action='store_true',
help='')
parser.add_argument('--lr', default=-1, type=float,
help='')
# others
parser.add_argument('--verbose', '-v', default=0, type=int,
help='verbose level')
args = parser.parse_args()
args.pretrained_elmo = bool(args.pretrained_elmo)
args.finetune_elmo = bool(args.finetune_elmo)
args.pretrained_bert = bool(args.pretrained_bert)
args.finetune_bert = bool(args.finetune_bert)
args.add_word_emb = bool(args.add_word_emb)
args.pretrained_all = bool(args.pretrained_all)
random.seed(args.rand_seed)
np.random.seed(args.rand_seed)
if args.dictmap != '':
dictmap = json.load(open(args.dictmap, 'r'))
else:
dictmap = None
if args.verbose >= 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s')
else:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s: %(message)s')
for arg in vars(args):
print("{}={}".format(arg, getattr(args, arg)))
# get vocabulary
if args.pretrained_bert:
if 'bert' in args.bert_model:
bert_tokenizer = BertTokenizer.from_pretrained(args.bert_model)
elif 'openai-gpt' in args.bert_model:
bert_tokenizer = OpenAIGPTTokenizer.from_pretrained(args.bert_model)
elif 'gpt2' in args.bert_model:
bert_tokenizer = GPT2Tokenizer.from_pretrained(args.bert_model)
elif 'transfo-xl' in args.bert_model:
bert_tokenizer = TransfoXLTokenizer.from_pretrained(args.bert_model)
else:
bert_tokenizer = None
logging.info('Extracting words from ' + args.train_set)
vocab = dh.get_vocabulary(args.train_set, include_caption=args.include_caption, tokenizer=bert_tokenizer)
if args.pretrained_word_emb is not None and 'none' not in args.pretrained_word_emb:
pretrained_word_emb = dh.get_word_emb(vocab, args.pretrained_word_emb)
else:
pretrained_word_emb = None
# load data
logging.info('Loading training data from ' + args.train_set)
train_data = dh.load(args.fea_type, args.train_path, args.train_set,
vocabfile=args.vocabfile,
include_caption=args.include_caption, separate_caption=args.separate_caption,
vocab=vocab, dictmap=dictmap,
pretrained_elmo=args.pretrained_elmo, pretrained_bert=args.pretrained_bert,
bert_model=args.bert_model, tokenizer=bert_tokenizer,
pretrained_all=args.pretrained_all, concat_his=args.concat_his)
logging.info('Loading validation data from ' + args.valid_set)
valid_data = dh.load(args.fea_type, args.valid_path, args.valid_set,
vocabfile=args.vocabfile,
include_caption=args.include_caption, separate_caption=args.separate_caption,
vocab=vocab, dictmap=dictmap,
pretrained_elmo=args.pretrained_elmo, pretrained_bert=args.pretrained_bert,
bert_model=args.bert_model, tokenizer=bert_tokenizer,
pretrained_all=args.pretrained_all, concat_his=args.concat_his)
feature_dims = dh.feature_shape(train_data)
logging.info("Detected feature dims: {}".format(feature_dims));
# Prepare RNN model and load data
caption_state_size = -1
if args.pretrained_weights:
pretrained_weights = pickle.load(open(args.pretrained_weights, 'rb'))
pretrained_conf = ('/').join(args.pretrained_weights.split('/')[:-1]) + '/avsd_model.conf'
pretrained_vocab, _ = pickle.load(open(pretrained_conf, 'rb'))
pretrained_weights = dh.align_vocab(pretrained_vocab, vocab, pretrained_weights)
else:
pretrained_weights = None
if args.q_att:
in_size_decoder = args.mout_size + args.hist_out_size + args.in_enc_hsize*2
state_size = args.in_enc_hsize*2
else:
in_size_decoder = args.mout_size + args.hist_out_size + args.in_enc_hsize
state_size = args.in_enc_hsize
if args.separate_caption:
if args.c_att == 'conv_sum':
caption_state_size = args.in_enc_hsize*2
if not args.caption_states_att:
in_size_decoder += caption_state_size
else:
caption_state_size = args.in_enc_hsize
if not args.caption_states_att:
in_size_decoder += caption_state_size
if args.exclude_video:
mm_encoder = None
in_size_decoder -= args.mout_size
else:
if args.caption_mm_att:
mm_state_size = caption_state_size
else:
mm_state_size = state_size
mm_encoder = MMEncoder(feature_dims, args.mout_size, enc_psize=args.enc_psize,
enc_hsize=args.enc_hsize, att_size=args.att_size,
state_size=mm_state_size, attention=args.mm_att, fusioning=args.mm_fusioning,
att_hops=args.mm_att_hops)
if args.ft_fusioning == 'caption_mm_nonlinear_multiply':
in_size_decoder = in_size_decoder - args.mout_size - caption_state_size + args.caption_mm_fusion_out_size
weights_init=pretrained_weights['history_encoder'] if pretrained_weights is not None else None
hlstm_encoder = HLSTMEncoder(args.hist_enc_layers[0], args.hist_enc_layers[1],
len(vocab), args.hist_out_size, args.embed_size,
args.hist_enc_hsize, rnn_type=args.rnn_type, embedding_init=pretrained_word_emb, weights_init=weights_init,
elmo_init=args.pretrained_elmo, elmo_num_outputs=args.elmo_num_outputs, finetune_elmo=args.finetune_elmo,
bert_init=args.pretrained_bert, bert_model=args.bert_model, finetune_bert=args.finetune_bert,
add_word_emb=args.add_word_emb, pretrained_all=args.pretrained_all,
concat_his=args.concat_his)
weights_init=pretrained_weights['input_encoder'] if pretrained_weights is not None else None
input_encoder = LSTMEncoder(args.in_enc_layers, len(vocab), args.in_enc_hsize,
args.embed_size, attention=args.q_att, rnn_type=args.rnn_type, embedding_init=pretrained_word_emb, weights_init=weights_init,
elmo_init=args.pretrained_elmo, elmo_num_outputs=args.elmo_num_outputs, finetune_elmo=args.finetune_elmo,
bert_init=args.pretrained_bert, bert_model=args.bert_model, finetune_bert=args.finetune_bert,
add_word_emb=args.add_word_emb)
weights_init=pretrained_weights['response_decoder'] if pretrained_weights is not None else None
hlstm_decoder = HLSTMDecoder(args.dec_layers, len(vocab), len(vocab), args.embed_size,
in_size_decoder,
args.dec_hsize, args.dec_psize,
independent=True, rnn_type=args.rnn_type,
classifier=args.classifier, states_att=args.caption_states_att, state_size=caption_state_size, embedding_init=pretrained_word_emb, weights_init=weights_init,
elmo_init=args.pretrained_elmo, elmo_num_outputs=args.elmo_num_outputs, finetune_elmo=args.finetune_elmo,
bert_init=args.pretrained_bert, bert_model=args.bert_model, finetune_bert=args.finetune_bert,
add_word_emb=args.add_word_emb, pretrained_all=args.pretrained_all)
if args.separate_caption:
weights_init=pretrained_weights['caption_encoder'] if pretrained_weights is not None else None
caption_encoder = LSTMEncoder(args.in_enc_layers, len(vocab), args.in_enc_hsize,
args.embed_size, attention=args.c_att, rnn_type=args.rnn_type, q_size=state_size, weights_init=weights_init)
else:
caption_encoder = None
model = MMSeq2SeqModel(mm_encoder, hlstm_encoder, input_encoder, hlstm_decoder, fusioning=args.ft_fusioning, caption_encoder=caption_encoder,
caption_states_att = args.caption_states_att, caption_mm_att = args.caption_mm_att, c_in_size=caption_state_size, mm_in_size=args.mout_size, out_size=args.caption_mm_fusion_out_size)
# report data summary
logging.info('#vocab = %d' % len(vocab))
# make batchset for training
logging.info('Making mini batches for training data')
train_indices, train_samples = dh.make_batch_indices(train_data, args.batch_size,
max_length=args.max_length, separate_caption=args.separate_caption)
logging.info('#train sample = %d' % train_samples)
logging.info('#train batch = %d' % len(train_indices))
# make batchset for validation
logging.info('Making mini batches for validation data')
valid_indices, valid_samples = dh.make_batch_indices(valid_data, args.batch_size,
max_length=args.max_length, separate_caption=args.separate_caption)
logging.info('#validation sample = %d' % valid_samples)
logging.info('#validation batch = %d' % len(valid_indices))
# copy model to gpu
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
parallel = False
path = args.model + '.conf'
with open(path, 'wb') as f:
pickle.dump((vocab, args), f, -1)
path2 = args.model + '_params.txt'
with open(path2, "w") as f:
for arg in vars(args):
f.write("{}={}\n".format(arg, getattr(args, arg)))
# start training
logging.info('----------------')
logging.info('Start training')
logging.info('----------------')
# Setup optimizer
if args.optimizer == 'SGD':
optimizer = torch.optim.SGD(model.parameters(), weight_decay=args.weight_decay)
elif args.optimizer == 'Adam':
optimizer = torch.optim.Adam(model.parameters(), weight_decay=args.weight_decay)
elif args.optimizer == 'AdaDelta':
optimizer = torch.optim.Adadelta(model.parameters(), weight_decay=args.weight_decay)
elif args.optimizer == 'RMSprop':
optimizer = torch.optim.RMSprop(model.parameters(), weight_decay=args.weight_decay)
if args.lr > 0:
for g in optim.param_groups:
g['lr'] = args.lr
if args.lr_scheduler:
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.5)
# initialize status parameters
modelext = '.pth.tar'
cur_loss = 0.
cur_num_words = 0
epoch = 0
start_at = time.time()
cur_at = start_at
min_valid_ppl = 1.0e+10
n = 0
report_interval = int(100/args.batch_size)
bestmodel_num = 0
random.shuffle(train_indices)
trace_log_path = args.model+'_trace.csv'
with open(trace_log_path, "w") as f:
f.write('epoch,split,perplexity\n')
train_log_path = args.model+'_train.csv'
with open(train_log_path, "w") as f:
f.write('epoch,step,perplexity\n')
print("Saving training results to {}".format(train_log_path))
print("Saving val results to {}".format(trace_log_path))
# do training iterations
for i in six.moves.range(args.num_epochs):
if args.lr_scheduler:
scheduler.step()
logging.info('-------------------------Epoch %d : %s-----------------------' % (i+1, args.optimizer))
train_loss = 0.
train_num_words = 0
# fetch the first batch
batch = [dh.make_batch(train_data, train_indices[0], separate_caption=args.separate_caption, pretrained_elmo=args.pretrained_elmo, pretrained_bert=args.pretrained_bert, bert_tokenizer=bert_tokenizer,
pretrained_all=args.pretrained_all, bert_model=args.bert_model,
concat_his=args.concat_his)]
# train iterations
if args.n_batches > 0:
n_batches = args.n_batches
else:
n_batches = len(train_indices)
it = tqdm(six.moves.range(n_batches), desc="epoch {}/{}".format(i, args.num_epochs), ncols=0)
for j in it:
b = batch.pop()
# fetch the next batch in parallel
if j < len(train_indices)-1:
prefetch = threading.Thread(target=fetch_batch,
args=([dh, train_data, train_indices[j+1], args.separate_caption, batch]))
prefetch.start()
# propagate for training
x = [torch.from_numpy(x) for x in b[0]]
if args.concat_his:
h = [torch.from_numpy(h_i) for h_i in b[1]]
else:
h = [[torch.from_numpy(h) for h in hb] for hb in b[1]]
q = [torch.from_numpy(q) for q in b[2]]
ai = [torch.from_numpy(ai) for ai in b[3]]
ao = [torch.from_numpy(ao) for ao in b[4]]
if args.separate_caption:
c = [torch.from_numpy(c) for c in b[5]]
else:
c = None
if args.pretrained_elmo or args.pretrained_bert:
if args.pretrained_all:
context_q, context_h, context_ai = b[-3:]
else:
context_q = b[-1]
context_h = None
context_ai = None
else:
context_q = None
context_h = None
context_ai = None
if args.exclude_video:
x = None
if parallel:
_, _, loss = model.module.loss(x, h, q, ai, ao, c, context_q, context_h, context_ai)
else:
_, _, loss = model.loss(x, h, q, ai, ao, c, context_q, context_h, context_ai)
num_words = sum([len(s) for s in ao])
batch_loss = loss.cpu().data.numpy()
train_loss += batch_loss * num_words
train_num_words += num_words
cur_loss += batch_loss * num_words
cur_num_words += num_words
if (n + 1) % report_interval == 0:
now = time.time()
throuput = report_interval / (now - cur_at)
perp = math.exp(cur_loss / cur_num_words)
it.set_postfix(train_perplexity='{:.3f}'.format(perp))
with open(train_log_path, "a") as f:
f.write("{},{},{:e}\n".format(i+1,n+1,perp))
cur_at = now
cur_loss = 0.
cur_num_words = 0
n += 1
# Run truncated BPTT
optimizer.zero_grad()
loss.backward()
optimizer.step()
# wait prefetch completion
prefetch.join()
train_ppl = math.exp(train_loss/train_num_words)
logging.info("epoch: %d train perplexity: %f" % (i+1, train_ppl))
# validation step
logging.info('-------validation--------')
now = time.time()
valid_ppl, valid_time = evaluate(model, valid_data, valid_indices, parallel)
logging.info('validation perplexity: %.4f' % (valid_ppl))
with open(trace_log_path,"a") as f:
f.write("{},train,{:e}\n".format(i+1,train_ppl))
f.write("{},val,{:e}\n".format(i+1,valid_ppl))
# update the model via comparing with the lowest perplexity
modelfile = args.model + '_' + str(i + 1) + modelext
logging.info('writing model params to ' + modelfile)
torch.save(model, modelfile)
if min_valid_ppl > valid_ppl:
bestmodel_num = i+1
logging.info('validation perplexity reduced %.4f -> %.4f' % (min_valid_ppl, valid_ppl))
min_valid_ppl = valid_ppl
logging.info('a symbolic link is made as ' + args.model + '_best' + modelext)
if os.path.exists(args.model + '_best' + modelext):
os.remove(args.model + '_best' + modelext)
os.symlink(os.path.basename(args.model + '_' + str(bestmodel_num) + modelext), args.model + '_best' + modelext)
cur_at += time.time() - now # skip time of evaluation and file I/O
logging.info('----------------')
# make a symlink to the best model
logging.info('the best model is epoch %d.' % bestmodel_num)
| [
"pytorch_pretrained_bert.BertTokenizer.from_pretrained",
"data_handler.get_vocabulary",
"model.multimodal_encoder.MMEncoder",
"data_handler.make_batch_indices",
"torch.from_numpy",
"data_handler.align_vocab",
"pytorch_pretrained_bert.TransfoXLTokenizer.from_pretrained",
"torch.cuda.is_available",
"p... | [((1051, 1062), 'time.time', 'time.time', ([], {}), '()\n', (1060, 1062), False, 'import time\n'), ((3748, 3773), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3771, 3773), False, 'import argparse\n'), ((10962, 10989), 'random.seed', 'random.seed', (['args.rand_seed'], {}), '(args.rand_seed)\n', (10973, 10989), False, 'import random\n'), ((10994, 11024), 'numpy.random.seed', 'np.random.seed', (['args.rand_seed'], {}), '(args.rand_seed)\n', (11008, 11024), True, 'import numpy as np\n'), ((12089, 12144), 'logging.info', 'logging.info', (["('Extracting words from ' + args.train_set)"], {}), "('Extracting words from ' + args.train_set)\n", (12101, 12144), False, 'import logging\n'), ((12157, 12258), 'data_handler.get_vocabulary', 'dh.get_vocabulary', (['args.train_set'], {'include_caption': 'args.include_caption', 'tokenizer': 'bert_tokenizer'}), '(args.train_set, include_caption=args.include_caption,\n tokenizer=bert_tokenizer)\n', (12174, 12258), True, 'import data_handler as dh\n'), ((12489, 12549), 'logging.info', 'logging.info', (["('Loading training data from ' + args.train_set)"], {}), "('Loading training data from ' + args.train_set)\n", (12501, 12549), False, 'import logging\n'), ((12567, 12975), 'data_handler.load', 'dh.load', (['args.fea_type', 'args.train_path', 'args.train_set'], {'vocabfile': 'args.vocabfile', 'include_caption': 'args.include_caption', 'separate_caption': 'args.separate_caption', 'vocab': 'vocab', 'dictmap': 'dictmap', 'pretrained_elmo': 'args.pretrained_elmo', 'pretrained_bert': 'args.pretrained_bert', 'bert_model': 'args.bert_model', 'tokenizer': 'bert_tokenizer', 'pretrained_all': 'args.pretrained_all', 'concat_his': 'args.concat_his'}), '(args.fea_type, args.train_path, args.train_set, vocabfile=args.\n vocabfile, include_caption=args.include_caption, separate_caption=args.\n separate_caption, vocab=vocab, dictmap=dictmap, pretrained_elmo=args.\n pretrained_elmo, pretrained_bert=args.pretrained_bert, bert_model=args.\n bert_model, tokenizer=bert_tokenizer, pretrained_all=args.\n pretrained_all, concat_his=args.concat_his)\n', (12574, 12975), True, 'import data_handler as dh\n'), ((13109, 13171), 'logging.info', 'logging.info', (["('Loading validation data from ' + args.valid_set)"], {}), "('Loading validation data from ' + args.valid_set)\n", (13121, 13171), False, 'import logging\n'), ((13189, 13597), 'data_handler.load', 'dh.load', (['args.fea_type', 'args.valid_path', 'args.valid_set'], {'vocabfile': 'args.vocabfile', 'include_caption': 'args.include_caption', 'separate_caption': 'args.separate_caption', 'vocab': 'vocab', 'dictmap': 'dictmap', 'pretrained_elmo': 'args.pretrained_elmo', 'pretrained_bert': 'args.pretrained_bert', 'bert_model': 'args.bert_model', 'tokenizer': 'bert_tokenizer', 'pretrained_all': 'args.pretrained_all', 'concat_his': 'args.concat_his'}), '(args.fea_type, args.valid_path, args.valid_set, vocabfile=args.\n vocabfile, include_caption=args.include_caption, separate_caption=args.\n separate_caption, vocab=vocab, dictmap=dictmap, pretrained_elmo=args.\n pretrained_elmo, pretrained_bert=args.pretrained_bert, bert_model=args.\n bert_model, tokenizer=bert_tokenizer, pretrained_all=args.\n pretrained_all, concat_his=args.concat_his)\n', (13196, 13597), True, 'import data_handler as dh\n'), ((13747, 13775), 'data_handler.feature_shape', 'dh.feature_shape', (['train_data'], {}), '(train_data)\n', (13763, 13775), True, 'import data_handler as dh\n'), ((18470, 18799), 'model.mmseq2seq_model.MMSeq2SeqModel', 'MMSeq2SeqModel', (['mm_encoder', 'hlstm_encoder', 'input_encoder', 'hlstm_decoder'], {'fusioning': 'args.ft_fusioning', 'caption_encoder': 'caption_encoder', 'caption_states_att': 'args.caption_states_att', 'caption_mm_att': 'args.caption_mm_att', 'c_in_size': 'caption_state_size', 'mm_in_size': 'args.mout_size', 'out_size': 'args.caption_mm_fusion_out_size'}), '(mm_encoder, hlstm_encoder, input_encoder, hlstm_decoder,\n fusioning=args.ft_fusioning, caption_encoder=caption_encoder,\n caption_states_att=args.caption_states_att, caption_mm_att=args.\n caption_mm_att, c_in_size=caption_state_size, mm_in_size=args.mout_size,\n out_size=args.caption_mm_fusion_out_size)\n', (18484, 18799), False, 'from model.mmseq2seq_model import MMSeq2SeqModel\n'), ((18905, 18958), 'logging.info', 'logging.info', (['"""Making mini batches for training data"""'], {}), "('Making mini batches for training data')\n", (18917, 18958), False, 'import logging\n'), ((18994, 19117), 'data_handler.make_batch_indices', 'dh.make_batch_indices', (['train_data', 'args.batch_size'], {'max_length': 'args.max_length', 'separate_caption': 'args.separate_caption'}), '(train_data, args.batch_size, max_length=args.\n max_length, separate_caption=args.separate_caption)\n', (19015, 19117), True, 'import data_handler as dh\n'), ((19174, 19224), 'logging.info', 'logging.info', (["('#train sample = %d' % train_samples)"], {}), "('#train sample = %d' % train_samples)\n", (19186, 19224), False, 'import logging\n'), ((19323, 19378), 'logging.info', 'logging.info', (['"""Making mini batches for validation data"""'], {}), "('Making mini batches for validation data')\n", (19335, 19378), False, 'import logging\n'), ((19414, 19537), 'data_handler.make_batch_indices', 'dh.make_batch_indices', (['valid_data', 'args.batch_size'], {'max_length': 'args.max_length', 'separate_caption': 'args.separate_caption'}), '(valid_data, args.batch_size, max_length=args.\n max_length, separate_caption=args.separate_caption)\n', (19435, 19537), True, 'import data_handler as dh\n'), ((19590, 19645), 'logging.info', 'logging.info', (["('#validation sample = %d' % valid_samples)"], {}), "('#validation sample = %d' % valid_samples)\n", (19602, 19645), False, 'import logging\n'), ((20152, 20184), 'logging.info', 'logging.info', (['"""----------------"""'], {}), "('----------------')\n", (20164, 20184), False, 'import logging\n'), ((20189, 20219), 'logging.info', 'logging.info', (['"""Start training"""'], {}), "('Start training')\n", (20201, 20219), False, 'import logging\n'), ((20224, 20256), 'logging.info', 'logging.info', (['"""----------------"""'], {}), "('----------------')\n", (20236, 20256), False, 'import logging\n'), ((21120, 21131), 'time.time', 'time.time', ([], {}), '()\n', (21129, 21131), False, 'import time\n'), ((21265, 21294), 'random.shuffle', 'random.shuffle', (['train_indices'], {}), '(train_indices)\n', (21279, 21294), False, 'import random\n'), ((21729, 21761), 'six.moves.range', 'six.moves.range', (['args.num_epochs'], {}), '(args.num_epochs)\n', (21744, 21761), False, 'import six\n'), ((26713, 26772), 'logging.info', 'logging.info', (["('the best model is epoch %d.' % bestmodel_num)"], {}), "('the best model is epoch %d.' % bestmodel_num)\n", (26725, 26772), False, 'import logging\n'), ((698, 972), 'data_handler.make_batch', 'dh.make_batch', (['data', 'index'], {'separate_caption': 'separate_caption', 'pretrained_elmo': 'args.pretrained_elmo', 'pretrained_bert': 'args.pretrained_bert', 'bert_tokenizer': 'bert_tokenizer', 'pretrained_all': 'args.pretrained_all', 'bert_model': 'args.bert_model', 'concat_his': 'args.concat_his'}), '(data, index, separate_caption=separate_caption,\n pretrained_elmo=args.pretrained_elmo, pretrained_bert=args.\n pretrained_bert, bert_tokenizer=bert_tokenizer, pretrained_all=args.\n pretrained_all, bert_model=args.bert_model, concat_his=args.concat_his)\n', (711, 972), True, 'import data_handler as dh\n'), ((1131, 1146), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1144, 1146), False, 'import torch\n'), ((3584, 3595), 'time.time', 'time.time', ([], {}), '()\n', (3593, 3595), False, 'import time\n'), ((3620, 3656), 'math.exp', 'math.exp', (['(eval_loss / eval_num_words)'], {}), '(eval_loss / eval_num_words)\n', (3628, 3656), False, 'import math\n'), ((11174, 11292), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"""'}), "(level=logging.DEBUG, format=\n '%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s')\n", (11193, 11292), False, 'import logging\n'), ((11319, 11412), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(levelname)s: %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s %(levelname)s: %(message)s')\n", (11338, 11412), False, 'import logging\n'), ((12374, 12422), 'data_handler.get_word_emb', 'dh.get_word_emb', (['vocab', 'args.pretrained_word_emb'], {}), '(vocab, args.pretrained_word_emb)\n', (12389, 12422), True, 'import data_handler as dh\n'), ((14221, 14280), 'data_handler.align_vocab', 'dh.align_vocab', (['pretrained_vocab', 'vocab', 'pretrained_weights'], {}), '(pretrained_vocab, vocab, pretrained_weights)\n', (14235, 14280), True, 'import data_handler as dh\n'), ((15241, 15475), 'model.multimodal_encoder.MMEncoder', 'MMEncoder', (['feature_dims', 'args.mout_size'], {'enc_psize': 'args.enc_psize', 'enc_hsize': 'args.enc_hsize', 'att_size': 'args.att_size', 'state_size': 'mm_state_size', 'attention': 'args.mm_att', 'fusioning': 'args.mm_fusioning', 'att_hops': 'args.mm_att_hops'}), '(feature_dims, args.mout_size, enc_psize=args.enc_psize, enc_hsize\n =args.enc_hsize, att_size=args.att_size, state_size=mm_state_size,\n attention=args.mm_att, fusioning=args.mm_fusioning, att_hops=args.\n mm_att_hops)\n', (15250, 15475), False, 'from model.multimodal_encoder import MMEncoder\n'), ((19924, 19957), 'pickle.dump', 'pickle.dump', (['(vocab, args)', 'f', '(-1)'], {}), '((vocab, args), f, -1)\n', (19935, 19957), False, 'import pickle\n'), ((20922, 20988), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(1)', 'gamma': '(0.5)'}), '(optimizer, step_size=1, gamma=0.5)\n', (20953, 20988), False, 'import torch\n'), ((21830, 21943), 'logging.info', 'logging.info', (["('-------------------------Epoch %d : %s-----------------------' % (i + 1,\n args.optimizer))"], {}), "(\n '-------------------------Epoch %d : %s-----------------------' % (i + \n 1, args.optimizer))\n", (21842, 21943), False, 'import logging\n'), ((25236, 25274), 'math.exp', 'math.exp', (['(train_loss / train_num_words)'], {}), '(train_loss / train_num_words)\n', (25244, 25274), False, 'import math\n'), ((25281, 25349), 'logging.info', 'logging.info', (["('epoch: %d train perplexity: %f' % (i + 1, train_ppl))"], {}), "('epoch: %d train perplexity: %f' % (i + 1, train_ppl))\n", (25293, 25349), False, 'import logging\n'), ((25384, 25425), 'logging.info', 'logging.info', (['"""-------validation--------"""'], {}), "('-------validation--------')\n", (25396, 25425), False, 'import logging\n'), ((25440, 25451), 'time.time', 'time.time', ([], {}), '()\n', (25449, 25451), False, 'import time\n'), ((25545, 25600), 'logging.info', 'logging.info', (["('validation perplexity: %.4f' % valid_ppl)"], {}), "('validation perplexity: %.4f' % valid_ppl)\n", (25557, 25600), False, 'import logging\n'), ((25915, 25967), 'logging.info', 'logging.info', (["('writing model params to ' + modelfile)"], {}), "('writing model params to ' + modelfile)\n", (25927, 25967), False, 'import logging\n'), ((25976, 26004), 'torch.save', 'torch.save', (['model', 'modelfile'], {}), '(model, modelfile)\n', (25986, 26004), False, 'import torch\n'), ((26636, 26668), 'logging.info', 'logging.info', (['"""----------------"""'], {}), "('----------------')\n", (26648, 26668), False, 'import logging\n'), ((1197, 1481), 'data_handler.make_batch', 'dh.make_batch', (['data', 'indices[0]'], {'separate_caption': 'args.separate_caption', 'pretrained_elmo': 'args.pretrained_elmo', 'pretrained_bert': 'args.pretrained_bert', 'bert_tokenizer': 'bert_tokenizer', 'pretrained_all': 'args.pretrained_all', 'bert_model': 'args.bert_model', 'concat_his': 'args.concat_his'}), '(data, indices[0], separate_caption=args.separate_caption,\n pretrained_elmo=args.pretrained_elmo, pretrained_bert=args.\n pretrained_bert, bert_tokenizer=bert_tokenizer, pretrained_all=args.\n pretrained_all, bert_model=args.bert_model, concat_his=args.concat_his)\n', (1210, 1481), True, 'import data_handler as dh\n'), ((11626, 11672), 'pytorch_pretrained_bert.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['args.bert_model'], {}), '(args.bert_model)\n', (11655, 11672), False, 'from pytorch_pretrained_bert import BertTokenizer, OpenAIGPTTokenizer, GPT2Tokenizer, TransfoXLTokenizer\n'), ((19772, 19797), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (19795, 19797), False, 'import torch\n'), ((22033, 22335), 'data_handler.make_batch', 'dh.make_batch', (['train_data', 'train_indices[0]'], {'separate_caption': 'args.separate_caption', 'pretrained_elmo': 'args.pretrained_elmo', 'pretrained_bert': 'args.pretrained_bert', 'bert_tokenizer': 'bert_tokenizer', 'pretrained_all': 'args.pretrained_all', 'bert_model': 'args.bert_model', 'concat_his': 'args.concat_his'}), '(train_data, train_indices[0], separate_caption=args.\n separate_caption, pretrained_elmo=args.pretrained_elmo, pretrained_bert\n =args.pretrained_bert, bert_tokenizer=bert_tokenizer, pretrained_all=\n args.pretrained_all, bert_model=args.bert_model, concat_his=args.concat_his\n )\n', (22046, 22335), True, 'import data_handler as dh\n'), ((22523, 22549), 'six.moves.range', 'six.moves.range', (['n_batches'], {}), '(n_batches)\n', (22538, 22549), False, 'import six\n'), ((26088, 26179), 'logging.info', 'logging.info', (["('validation perplexity reduced %.4f -> %.4f' % (min_valid_ppl, valid_ppl))"], {}), "('validation perplexity reduced %.4f -> %.4f' % (min_valid_ppl,\n valid_ppl))\n", (26100, 26179), False, 'import logging\n'), ((26226, 26303), 'logging.info', 'logging.info', (["('a symbolic link is made as ' + args.model + '_best' + modelext)"], {}), "('a symbolic link is made as ' + args.model + '_best' + modelext)\n", (26238, 26303), False, 'import logging\n'), ((26319, 26366), 'os.path.exists', 'os.path.exists', (["(args.model + '_best' + modelext)"], {}), "(args.model + '_best' + modelext)\n", (26333, 26366), False, 'import os\n'), ((26570, 26581), 'time.time', 'time.time', ([], {}), '()\n', (26579, 26581), False, 'import time\n'), ((1915, 2019), 'threading.Thread', 'threading.Thread', ([], {'target': 'fetch_batch', 'args': '[dh, data, indices[j + 1], args.separate_caption, batch]'}), '(target=fetch_batch, args=[dh, data, indices[j + 1], args.\n separate_caption, batch])\n', (1931, 2019), False, 'import threading\n'), ((2135, 2154), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (2151, 2154), False, 'import torch\n'), ((2368, 2387), 'torch.from_numpy', 'torch.from_numpy', (['q'], {}), '(q)\n', (2384, 2387), False, 'import torch\n'), ((2421, 2441), 'torch.from_numpy', 'torch.from_numpy', (['ai'], {}), '(ai)\n', (2437, 2441), False, 'import torch\n'), ((2476, 2496), 'torch.from_numpy', 'torch.from_numpy', (['ao'], {}), '(ao)\n', (2492, 2496), False, 'import torch\n'), ((11748, 11799), 'pytorch_pretrained_bert.OpenAIGPTTokenizer.from_pretrained', 'OpenAIGPTTokenizer.from_pretrained', (['args.bert_model'], {}), '(args.bert_model)\n', (11782, 11799), False, 'from pytorch_pretrained_bert import BertTokenizer, OpenAIGPTTokenizer, GPT2Tokenizer, TransfoXLTokenizer\n'), ((22771, 22886), 'threading.Thread', 'threading.Thread', ([], {'target': 'fetch_batch', 'args': '[dh, train_data, train_indices[j + 1], args.separate_caption, batch]'}), '(target=fetch_batch, args=[dh, train_data, train_indices[j +\n 1], args.separate_caption, batch])\n', (22787, 22886), False, 'import threading\n'), ((23016, 23035), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (23032, 23035), False, 'import torch\n'), ((23249, 23268), 'torch.from_numpy', 'torch.from_numpy', (['q'], {}), '(q)\n', (23265, 23268), False, 'import torch\n'), ((23302, 23322), 'torch.from_numpy', 'torch.from_numpy', (['ai'], {}), '(ai)\n', (23318, 23322), False, 'import torch\n'), ((23357, 23377), 'torch.from_numpy', 'torch.from_numpy', (['ao'], {}), '(ao)\n', (23373, 23377), False, 'import torch\n'), ((24593, 24604), 'time.time', 'time.time', ([], {}), '()\n', (24602, 24604), False, 'import time\n'), ((24688, 24722), 'math.exp', 'math.exp', (['(cur_loss / cur_num_words)'], {}), '(cur_loss / cur_num_words)\n', (24696, 24722), False, 'import math\n'), ((26384, 26426), 'os.remove', 'os.remove', (["(args.model + '_best' + modelext)"], {}), "(args.model + '_best' + modelext)\n", (26393, 26426), False, 'import os\n'), ((2223, 2244), 'torch.from_numpy', 'torch.from_numpy', (['h_i'], {}), '(h_i)\n', (2239, 2244), False, 'import torch\n'), ((2572, 2591), 'torch.from_numpy', 'torch.from_numpy', (['c'], {}), '(c)\n', (2588, 2591), False, 'import torch\n'), ((11869, 11915), 'pytorch_pretrained_bert.GPT2Tokenizer.from_pretrained', 'GPT2Tokenizer.from_pretrained', (['args.bert_model'], {}), '(args.bert_model)\n', (11898, 11915), False, 'from pytorch_pretrained_bert import BertTokenizer, OpenAIGPTTokenizer, GPT2Tokenizer, TransfoXLTokenizer\n'), ((23104, 23125), 'torch.from_numpy', 'torch.from_numpy', (['h_i'], {}), '(h_i)\n', (23120, 23125), False, 'import torch\n'), ((23453, 23472), 'torch.from_numpy', 'torch.from_numpy', (['c'], {}), '(c)\n', (23469, 23472), False, 'import torch\n'), ((2302, 2321), 'torch.from_numpy', 'torch.from_numpy', (['h'], {}), '(h)\n', (2318, 2321), False, 'import torch\n'), ((11991, 12042), 'pytorch_pretrained_bert.TransfoXLTokenizer.from_pretrained', 'TransfoXLTokenizer.from_pretrained', (['args.bert_model'], {}), '(args.bert_model)\n', (12025, 12042), False, 'from pytorch_pretrained_bert import BertTokenizer, OpenAIGPTTokenizer, GPT2Tokenizer, TransfoXLTokenizer\n'), ((23183, 23202), 'torch.from_numpy', 'torch.from_numpy', (['h'], {}), '(h)\n', (23199, 23202), False, 'import torch\n')] |
"""
Contributions from:
DSEverything - Mean Mix - Math, Geo, Harmonic (LB 0.493)
https://www.kaggle.com/dongxu027/mean-mix-math-geo-harmonic-lb-0-493
JdPaletto - Surprised Yet? - Part2 - (LB: 0.503)
https://www.kaggle.com/jdpaletto/surprised-yet-part2-lb-0-503
hklee - weighted mean comparisons, LB 0.497, 1ST
https://www.kaggle.com/zeemeen/weighted-mean-comparisons-lb-0-497-1st
Also all comments for changes, encouragement, and forked scripts rock
Keep the Surprise Going
"""
import glob, re
import numpy as np
import pandas as pd
from sklearn import *
from datetime import datetime
from xgboost import XGBRegressor
data = {
'tra': pd.read_csv('../input/air_visit_data.csv'),
'as': pd.read_csv('../input/air_store_info.csv'),
'hs': pd.read_csv('../input/hpg_store_info.csv'),
'ar': pd.read_csv('../input/air_reserve.csv'),
'hr': pd.read_csv('../input/hpg_reserve.csv'),
'id': pd.read_csv('../input/store_id_relation.csv'),
'tes': pd.read_csv('../input/sample_submission.csv'),
'hol': pd.read_csv('../input/date_info.csv').rename(columns={'calendar_date':'visit_date'})
}
data['hr'] = pd.merge(data['hr'], data['id'], how='inner', on=['hpg_store_id'])
for df in ['ar','hr']:
data[df]['visit_datetime'] = pd.to_datetime(data[df]['visit_datetime'])
data[df]['visit_datetime'] = data[df]['visit_datetime'].dt.date
data[df]['reserve_datetime'] = pd.to_datetime(data[df]['reserve_datetime'])
data[df]['reserve_datetime'] = data[df]['reserve_datetime'].dt.date
data[df]['reserve_datetime_diff'] = data[df].apply(lambda r: (r['visit_datetime'] - r['reserve_datetime']).days, axis=1)
tmp1 = data[df].groupby(['air_store_id','visit_datetime'], as_index=False)[['reserve_datetime_diff', 'reserve_visitors']].sum().rename(columns={'visit_datetime':'visit_date', 'reserve_datetime_diff': 'rs1', 'reserve_visitors':'rv1'})
tmp2 = data[df].groupby(['air_store_id','visit_datetime'], as_index=False)[['reserve_datetime_diff', 'reserve_visitors']].mean().rename(columns={'visit_datetime':'visit_date', 'reserve_datetime_diff': 'rs2', 'reserve_visitors':'rv2'})
data[df] = pd.merge(tmp1, tmp2, how='inner', on=['air_store_id','visit_date'])
data['tra']['visit_date'] = pd.to_datetime(data['tra']['visit_date'])
data['tra']['dow'] = data['tra']['visit_date'].dt.dayofweek
data['tra']['year'] = data['tra']['visit_date'].dt.year
data['tra']['month'] = data['tra']['visit_date'].dt.month
data['tra']['visit_date'] = data['tra']['visit_date'].dt.date
data['tes']['visit_date'] = data['tes']['id'].map(lambda x: str(x).split('_')[2])
data['tes']['air_store_id'] = data['tes']['id'].map(lambda x: '_'.join(x.split('_')[:2]))
data['tes']['visit_date'] = pd.to_datetime(data['tes']['visit_date'])
data['tes']['dow'] = data['tes']['visit_date'].dt.dayofweek
data['tes']['year'] = data['tes']['visit_date'].dt.year
data['tes']['month'] = data['tes']['visit_date'].dt.month
data['tes']['visit_date'] = data['tes']['visit_date'].dt.date
unique_stores = data['tes']['air_store_id'].unique()
stores = pd.concat([pd.DataFrame({'air_store_id': unique_stores, 'dow': [i]*len(unique_stores)}) for i in range(7)], axis=0, ignore_index=True).reset_index(drop=True)
#sure it can be compressed...
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].min().rename(columns={'visitors':'min_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].mean().rename(columns={'visitors':'mean_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].median().rename(columns={'visitors':'median_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].max().rename(columns={'visitors':'max_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].count().rename(columns={'visitors':'count_observations'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
stores = pd.merge(stores, data['as'], how='left', on=['air_store_id'])
# NEW FEATURES FROM <NAME>nia
stores['air_genre_name'] = stores['air_genre_name'].map(lambda x: str(str(x).replace('/',' ')))
stores['air_area_name'] = stores['air_area_name'].map(lambda x: str(str(x).replace('-',' ')))
lbl = preprocessing.LabelEncoder()
for i in range(10):
stores['air_genre_name'+str(i)] = lbl.fit_transform(stores['air_genre_name'].map(lambda x: str(str(x).split(' ')[i]) if len(str(x).split(' '))>i else ''))
stores['air_area_name'+str(i)] = lbl.fit_transform(stores['air_area_name'].map(lambda x: str(str(x).split(' ')[i]) if len(str(x).split(' '))>i else ''))
stores['air_genre_name'] = lbl.fit_transform(stores['air_genre_name'])
stores['air_area_name'] = lbl.fit_transform(stores['air_area_name'])
data['hol']['visit_date'] = pd.to_datetime(data['hol']['visit_date'])
data['hol']['day_of_week'] = lbl.fit_transform(data['hol']['day_of_week'])
data['hol']['visit_date'] = data['hol']['visit_date'].dt.date
train = pd.merge(data['tra'], data['hol'], how='left', on=['visit_date'])
test = pd.merge(data['tes'], data['hol'], how='left', on=['visit_date'])
train = pd.merge(train, stores, how='left', on=['air_store_id','dow'])
test = pd.merge(test, stores, how='left', on=['air_store_id','dow'])
for df in ['ar','hr']:
train = pd.merge(train, data[df], how='left', on=['air_store_id','visit_date'])
test = pd.merge(test, data[df], how='left', on=['air_store_id','visit_date'])
train['id'] = train.apply(lambda r: '_'.join([str(r['air_store_id']), str(r['visit_date'])]), axis=1)
train['total_reserv_sum'] = train['rv1_x'] + train['rv1_y']
train['total_reserv_mean'] = (train['rv2_x'] + train['rv2_y']) / 2
train['total_reserv_dt_diff_mean'] = (train['rs2_x'] + train['rs2_y']) / 2
test['total_reserv_sum'] = test['rv1_x'] + test['rv1_y']
test['total_reserv_mean'] = (test['rv2_x'] + test['rv2_y']) / 2
test['total_reserv_dt_diff_mean'] = (test['rs2_x'] + test['rs2_y']) / 2
# NEW FEATURES FROM JMBULL
train['date_int'] = train['visit_date'].apply(lambda x: x.strftime('%Y%m%d')).astype(int)
test['date_int'] = test['visit_date'].apply(lambda x: x.strftime('%Y%m%d')).astype(int)
train['var_max_lat'] = train['latitude'].max() - train['latitude']
train['var_max_long'] = train['longitude'].max() - train['longitude']
test['var_max_lat'] = test['latitude'].max() - test['latitude']
test['var_max_long'] = test['longitude'].max() - test['longitude']
# NEW FEATURES FROM Georgii Vyshnia
train['lon_plus_lat'] = train['longitude'] + train['latitude']
test['lon_plus_lat'] = test['longitude'] + test['latitude']
lbl = preprocessing.LabelEncoder()
train['air_store_id2'] = lbl.fit_transform(train['air_store_id'])
test['air_store_id2'] = lbl.transform(test['air_store_id'])
col = [c for c in train if c not in ['id', 'air_store_id', 'visit_date','visitors']]
train = train.fillna(-1)
test = test.fillna(-1)
def RMSLE(y, pred):
return metrics.mean_squared_error(y, pred)**0.5
model1 = ensemble.GradientBoostingRegressor(learning_rate=0.2, random_state=3, n_estimators=200, subsample=0.8,
max_depth =10)
model2 = neighbors.KNeighborsRegressor(n_jobs=-1, n_neighbors=4)
model3 = XGBRegressor(learning_rate=0.2, random_state=3, n_estimators=280, subsample=0.8,
colsample_bytree=0.8, max_depth =12)
model1.fit(train[col], np.log1p(train['visitors'].values))
model2.fit(train[col], np.log1p(train['visitors'].values))
model3.fit(train[col], np.log1p(train['visitors'].values))
preds1 = model1.predict(train[col])
preds2 = model2.predict(train[col])
preds3 = model3.predict(train[col])
print('RMSE GradientBoostingRegressor: ', RMSLE(np.log1p(train['visitors'].values), preds1))
print('RMSE KNeighborsRegressor: ', RMSLE(np.log1p(train['visitors'].values), preds2))
print('RMSE XGBRegressor: ', RMSLE(np.log1p(train['visitors'].values), preds3))
preds1 = model1.predict(test[col])
preds2 = model2.predict(test[col])
preds3 = model3.predict(test[col])
test['visitors'] = 0.3*preds1+0.3*preds2+0.4*preds3
test['visitors'] = np.expm1(test['visitors']).clip(lower=0.)
sub1 = test[['id','visitors']].copy()
del train; del data;
# from hklee
# https://www.kaggle.com/zeemeen/weighted-mean-comparisons-lb-0-497-1st/code
dfs = { re.search('/([^/\.]*)\.csv', fn).group(1):
pd.read_csv(fn)for fn in glob.glob('../input/*.csv')}
for k, v in dfs.items(): locals()[k] = v
wkend_holidays = date_info.apply(
(lambda x:(x.day_of_week=='Sunday' or x.day_of_week=='Saturday') and x.holiday_flg==1), axis=1)
date_info.loc[wkend_holidays, 'holiday_flg'] = 0
date_info['weight'] = ((date_info.index + 1) / len(date_info)) ** 5
visit_data = air_visit_data.merge(date_info, left_on='visit_date', right_on='calendar_date', how='left')
visit_data.drop('calendar_date', axis=1, inplace=True)
visit_data['visitors'] = visit_data.visitors.map(pd.np.log1p)
wmean = lambda x:( (x.weight * x.visitors).sum() / x.weight.sum() )
visitors = visit_data.groupby(['air_store_id', 'day_of_week', 'holiday_flg']).apply(wmean).reset_index()
visitors.rename(columns={0:'visitors'}, inplace=True) # cumbersome, should be better ways.
sample_submission['air_store_id'] = sample_submission.id.map(lambda x: '_'.join(x.split('_')[:-1]))
sample_submission['calendar_date'] = sample_submission.id.map(lambda x: x.split('_')[2])
sample_submission.drop('visitors', axis=1, inplace=True)
sample_submission = sample_submission.merge(date_info, on='calendar_date', how='left')
sample_submission = sample_submission.merge(visitors, on=[
'air_store_id', 'day_of_week', 'holiday_flg'], how='left')
missings = sample_submission.visitors.isnull()
sample_submission.loc[missings, 'visitors'] = sample_submission[missings].merge(
visitors[visitors.holiday_flg==0], on=('air_store_id', 'day_of_week'),
how='left')['visitors_y'].values
missings = sample_submission.visitors.isnull()
sample_submission.loc[missings, 'visitors'] = sample_submission[missings].merge(
visitors[['air_store_id', 'visitors']].groupby('air_store_id').mean().reset_index(),
on='air_store_id', how='left')['visitors_y'].values
sample_submission['visitors'] = sample_submission.visitors.map(pd.np.expm1)
sub2 = sample_submission[['id', 'visitors']].copy()
sub_merge = pd.merge(sub1, sub2, on='id', how='inner')
sub_merge['visitors'] = 0.7*sub_merge['visitors_x'] + 0.3*sub_merge['visitors_y']* 1.1
sub_merge[['id', 'visitors']].to_csv('submission.csv', index=False) | [
"pandas.read_csv",
"pandas.merge",
"numpy.expm1",
"xgboost.XGBRegressor",
"glob.glob",
"numpy.log1p",
"pandas.to_datetime",
"re.search"
] | [((1128, 1194), 'pandas.merge', 'pd.merge', (["data['hr']", "data['id']"], {'how': '"""inner"""', 'on': "['hpg_store_id']"}), "(data['hr'], data['id'], how='inner', on=['hpg_store_id'])\n", (1136, 1194), True, 'import pandas as pd\n'), ((2229, 2270), 'pandas.to_datetime', 'pd.to_datetime', (["data['tra']['visit_date']"], {}), "(data['tra']['visit_date'])\n", (2243, 2270), True, 'import pandas as pd\n'), ((2708, 2749), 'pandas.to_datetime', 'pd.to_datetime', (["data['tes']['visit_date']"], {}), "(data['tes']['visit_date'])\n", (2722, 2749), True, 'import pandas as pd\n'), ((3375, 3436), 'pandas.merge', 'pd.merge', (['stores', 'tmp'], {'how': '"""left"""', 'on': "['air_store_id', 'dow']"}), "(stores, tmp, how='left', on=['air_store_id', 'dow'])\n", (3383, 3436), True, 'import pandas as pd\n'), ((3576, 3637), 'pandas.merge', 'pd.merge', (['stores', 'tmp'], {'how': '"""left"""', 'on': "['air_store_id', 'dow']"}), "(stores, tmp, how='left', on=['air_store_id', 'dow'])\n", (3584, 3637), True, 'import pandas as pd\n'), ((3780, 3841), 'pandas.merge', 'pd.merge', (['stores', 'tmp'], {'how': '"""left"""', 'on': "['air_store_id', 'dow']"}), "(stores, tmp, how='left', on=['air_store_id', 'dow'])\n", (3788, 3841), True, 'import pandas as pd\n'), ((3978, 4039), 'pandas.merge', 'pd.merge', (['stores', 'tmp'], {'how': '"""left"""', 'on': "['air_store_id', 'dow']"}), "(stores, tmp, how='left', on=['air_store_id', 'dow'])\n", (3986, 4039), True, 'import pandas as pd\n'), ((4184, 4245), 'pandas.merge', 'pd.merge', (['stores', 'tmp'], {'how': '"""left"""', 'on': "['air_store_id', 'dow']"}), "(stores, tmp, how='left', on=['air_store_id', 'dow'])\n", (4192, 4245), True, 'import pandas as pd\n'), ((4256, 4317), 'pandas.merge', 'pd.merge', (['stores', "data['as']"], {'how': '"""left"""', 'on': "['air_store_id']"}), "(stores, data['as'], how='left', on=['air_store_id'])\n", (4264, 4317), True, 'import pandas as pd\n'), ((5079, 5120), 'pandas.to_datetime', 'pd.to_datetime', (["data['hol']['visit_date']"], {}), "(data['hol']['visit_date'])\n", (5093, 5120), True, 'import pandas as pd\n'), ((5266, 5331), 'pandas.merge', 'pd.merge', (["data['tra']", "data['hol']"], {'how': '"""left"""', 'on': "['visit_date']"}), "(data['tra'], data['hol'], how='left', on=['visit_date'])\n", (5274, 5331), True, 'import pandas as pd\n'), ((5340, 5405), 'pandas.merge', 'pd.merge', (["data['tes']", "data['hol']"], {'how': '"""left"""', 'on': "['visit_date']"}), "(data['tes'], data['hol'], how='left', on=['visit_date'])\n", (5348, 5405), True, 'import pandas as pd\n'), ((5416, 5479), 'pandas.merge', 'pd.merge', (['train', 'stores'], {'how': '"""left"""', 'on': "['air_store_id', 'dow']"}), "(train, stores, how='left', on=['air_store_id', 'dow'])\n", (5424, 5479), True, 'import pandas as pd\n'), ((5487, 5549), 'pandas.merge', 'pd.merge', (['test', 'stores'], {'how': '"""left"""', 'on': "['air_store_id', 'dow']"}), "(test, stores, how='left', on=['air_store_id', 'dow'])\n", (5495, 5549), True, 'import pandas as pd\n'), ((7473, 7594), 'xgboost.XGBRegressor', 'XGBRegressor', ([], {'learning_rate': '(0.2)', 'random_state': '(3)', 'n_estimators': '(280)', 'subsample': '(0.8)', 'colsample_bytree': '(0.8)', 'max_depth': '(12)'}), '(learning_rate=0.2, random_state=3, n_estimators=280, subsample\n =0.8, colsample_bytree=0.8, max_depth=12)\n', (7485, 7594), False, 'from xgboost import XGBRegressor\n'), ((10538, 10580), 'pandas.merge', 'pd.merge', (['sub1', 'sub2'], {'on': '"""id"""', 'how': '"""inner"""'}), "(sub1, sub2, on='id', how='inner')\n", (10546, 10580), True, 'import pandas as pd\n'), ((643, 685), 'pandas.read_csv', 'pd.read_csv', (['"""../input/air_visit_data.csv"""'], {}), "('../input/air_visit_data.csv')\n", (654, 685), True, 'import pandas as pd\n'), ((697, 739), 'pandas.read_csv', 'pd.read_csv', (['"""../input/air_store_info.csv"""'], {}), "('../input/air_store_info.csv')\n", (708, 739), True, 'import pandas as pd\n'), ((751, 793), 'pandas.read_csv', 'pd.read_csv', (['"""../input/hpg_store_info.csv"""'], {}), "('../input/hpg_store_info.csv')\n", (762, 793), True, 'import pandas as pd\n'), ((805, 844), 'pandas.read_csv', 'pd.read_csv', (['"""../input/air_reserve.csv"""'], {}), "('../input/air_reserve.csv')\n", (816, 844), True, 'import pandas as pd\n'), ((856, 895), 'pandas.read_csv', 'pd.read_csv', (['"""../input/hpg_reserve.csv"""'], {}), "('../input/hpg_reserve.csv')\n", (867, 895), True, 'import pandas as pd\n'), ((907, 952), 'pandas.read_csv', 'pd.read_csv', (['"""../input/store_id_relation.csv"""'], {}), "('../input/store_id_relation.csv')\n", (918, 952), True, 'import pandas as pd\n'), ((965, 1010), 'pandas.read_csv', 'pd.read_csv', (['"""../input/sample_submission.csv"""'], {}), "('../input/sample_submission.csv')\n", (976, 1010), True, 'import pandas as pd\n'), ((1252, 1294), 'pandas.to_datetime', 'pd.to_datetime', (["data[df]['visit_datetime']"], {}), "(data[df]['visit_datetime'])\n", (1266, 1294), True, 'import pandas as pd\n'), ((1398, 1442), 'pandas.to_datetime', 'pd.to_datetime', (["data[df]['reserve_datetime']"], {}), "(data[df]['reserve_datetime'])\n", (1412, 1442), True, 'import pandas as pd\n'), ((2132, 2200), 'pandas.merge', 'pd.merge', (['tmp1', 'tmp2'], {'how': '"""inner"""', 'on': "['air_store_id', 'visit_date']"}), "(tmp1, tmp2, how='inner', on=['air_store_id', 'visit_date'])\n", (2140, 2200), True, 'import pandas as pd\n'), ((5585, 5657), 'pandas.merge', 'pd.merge', (['train', 'data[df]'], {'how': '"""left"""', 'on': "['air_store_id', 'visit_date']"}), "(train, data[df], how='left', on=['air_store_id', 'visit_date'])\n", (5593, 5657), True, 'import pandas as pd\n'), ((5669, 5740), 'pandas.merge', 'pd.merge', (['test', 'data[df]'], {'how': '"""left"""', 'on': "['air_store_id', 'visit_date']"}), "(test, data[df], how='left', on=['air_store_id', 'visit_date'])\n", (5677, 5740), True, 'import pandas as pd\n'), ((7638, 7672), 'numpy.log1p', 'np.log1p', (["train['visitors'].values"], {}), "(train['visitors'].values)\n", (7646, 7672), True, 'import numpy as np\n'), ((7697, 7731), 'numpy.log1p', 'np.log1p', (["train['visitors'].values"], {}), "(train['visitors'].values)\n", (7705, 7731), True, 'import numpy as np\n'), ((7756, 7790), 'numpy.log1p', 'np.log1p', (["train['visitors'].values"], {}), "(train['visitors'].values)\n", (7764, 7790), True, 'import numpy as np\n'), ((8586, 8601), 'pandas.read_csv', 'pd.read_csv', (['fn'], {}), '(fn)\n', (8597, 8601), True, 'import pandas as pd\n'), ((7950, 7984), 'numpy.log1p', 'np.log1p', (["train['visitors'].values"], {}), "(train['visitors'].values)\n", (7958, 7984), True, 'import numpy as np\n'), ((8037, 8071), 'numpy.log1p', 'np.log1p', (["train['visitors'].values"], {}), "(train['visitors'].values)\n", (8045, 8071), True, 'import numpy as np\n'), ((8117, 8151), 'numpy.log1p', 'np.log1p', (["train['visitors'].values"], {}), "(train['visitors'].values)\n", (8125, 8151), True, 'import numpy as np\n'), ((8339, 8365), 'numpy.expm1', 'np.expm1', (["test['visitors']"], {}), "(test['visitors'])\n", (8347, 8365), True, 'import numpy as np\n'), ((8611, 8638), 'glob.glob', 'glob.glob', (['"""../input/*.csv"""'], {}), "('../input/*.csv')\n", (8620, 8638), False, 'import glob, re\n'), ((1023, 1060), 'pandas.read_csv', 'pd.read_csv', (['"""../input/date_info.csv"""'], {}), "('../input/date_info.csv')\n", (1034, 1060), True, 'import pandas as pd\n'), ((8539, 8573), 're.search', 're.search', (['"""/([^/\\\\.]*)\\\\.csv"""', 'fn'], {}), "('/([^/\\\\.]*)\\\\.csv', fn)\n", (8548, 8573), False, 'import glob, re\n')] |
import sys
from setuptools import setup, find_packages, Extension
import numpy as np
if '--use-cython' in sys.argv:
USE_CYTHON = True
sys.argv.remove('--use-cython')
else:
USE_CYTHON = False
ext = '.pyx' if USE_CYTHON else '.c'
# cppext = '' if USE_CYTHON else 'pp'
extensions = [
Extension(
"deepgraph._triu_indices",
["deepgraph/_triu_indices" + ext],
include_dirs=[np.get_include()],
# language='c++',
),
Extension(
"deepgraph._find_selected_indices",
["deepgraph/_find_selected_indices" + ext],
include_dirs=[np.get_include()]
)
]
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize(
extensions,
compiler_directives={'language_level': sys.version_info[0]}
)
setup(
name="DeepGraph",
version='0.2.3',
packages=find_packages(),
author="<NAME>",
author_email="<EMAIL>",
url='https://github.com/deepgraph/deepgraph/',
download_url='https://github.com/deepgraph/deepgraph/tarball/v0.2.3',
description=("Analyze Data with Pandas-based Networks."),
long_description=open('README.rst').read(),
install_requires=['numpy>=1.6',
'pandas>=0.17.0'],
license="BSD",
classifiers=[
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Cython',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics'],
ext_modules=extensions,
package_data={'deepgraph': ['../tests/*.py',
'../LICENSE.txt',
'./*.pyx',
'./*.c',
'./*.cpp',
]},
)
| [
"Cython.Build.cythonize",
"setuptools.find_packages",
"sys.argv.remove",
"numpy.get_include"
] | [((143, 174), 'sys.argv.remove', 'sys.argv.remove', (['"""--use-cython"""'], {}), "('--use-cython')\n", (158, 174), False, 'import sys\n'), ((694, 781), 'Cython.Build.cythonize', 'cythonize', (['extensions'], {'compiler_directives': "{'language_level': sys.version_info[0]}"}), "(extensions, compiler_directives={'language_level': sys.\n version_info[0]})\n", (703, 781), False, 'from Cython.Build import cythonize\n'), ((863, 878), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (876, 878), False, 'from setuptools import setup, find_packages, Extension\n'), ((411, 427), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (425, 427), True, 'import numpy as np\n'), ((596, 612), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (610, 612), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
def mtr(val, brackets, rates):
"""Calculates the marginal tax rate applied to a value depending on a
tax schedule.
:param val: Value to assess tax on, e.g. wealth or income (list or Series).
:param brackets: Left side of each bracket (list or Series).
:param rates: Rate corresponding to each bracket.
:returns: Series of the size of val representing the marginal tax rate.
"""
df_tax = pd.DataFrame({"brackets": brackets, "rates": rates})
df_tax["base_tax"] = (
df_tax.brackets.sub(df_tax.brackets.shift(fill_value=0))
.mul(df_tax.rates.shift(fill_value=0))
.cumsum()
)
rows = df_tax.brackets.searchsorted(val, side="right") - 1
income_bracket_df = df_tax.loc[rows].reset_index(drop=True)
return income_bracket_df.rates
def tax_from_mtrs(
val,
brackets,
rates,
avoidance_rate=0,
avoidance_elasticity=0,
avoidance_elasticity_flat=0,
):
"""Calculates tax liability based on a marginal tax rate schedule.
:param val: Value to assess tax on, e.g. wealth or income (list or Series).
:param brackets: Left side of each bracket (list or Series).
:param rates: Rate corresponding to each bracket.
:param avoidance_rate: Constant avoidance/evasion rate in percentage terms.
Defaults to zero.
:param avoidance_elasticity: Avoidance/evasion elasticity.
Response of log taxable value with respect
to tax rate.
Defaults to zero. Should be positive.
:param avoidance_elasticity_flat: Response of taxable value with respect
to tax rate.
Use avoidance_elasticity in most cases.
Defaults to zero. Should be positive.
:returns: Series of tax liabilities with the same size as val.
"""
assert (
avoidance_rate == 0
or avoidance_elasticity == 0
or avoidance_elasticity_flat == 0
), "Cannot supply multiple avoidance parameters."
assert (
avoidance_elasticity >= 0
), "Provide nonnegative avoidance_elasticity."
df_tax = pd.DataFrame({"brackets": brackets, "rates": rates})
df_tax["base_tax"] = (
df_tax.brackets.sub(df_tax.brackets.shift(fill_value=0))
.mul(df_tax.rates.shift(fill_value=0))
.cumsum()
)
if avoidance_rate == 0: # Only need MTRs if elasticity is supplied.
mtrs = mtr(val, brackets, rates)
if avoidance_elasticity > 0:
avoidance_rate = 1 - np.exp(-avoidance_elasticity * mtrs)
if avoidance_elasticity_flat > 0:
avoidance_rate = avoidance_elasticity_flat * mtrs
taxable = pd.Series(val) * (1 - avoidance_rate)
rows = df_tax.brackets.searchsorted(taxable, side="right") - 1
income_bracket_df = df_tax.loc[rows].reset_index(drop=True)
return (
pd.Series(taxable)
.sub(income_bracket_df.brackets)
.mul(income_bracket_df.rates)
.add(income_bracket_df.base_tax)
)
| [
"pandas.DataFrame",
"numpy.exp",
"pandas.Series"
] | [((462, 514), 'pandas.DataFrame', 'pd.DataFrame', (["{'brackets': brackets, 'rates': rates}"], {}), "({'brackets': brackets, 'rates': rates})\n", (474, 514), True, 'import pandas as pd\n'), ((2255, 2307), 'pandas.DataFrame', 'pd.DataFrame', (["{'brackets': brackets, 'rates': rates}"], {}), "({'brackets': brackets, 'rates': rates})\n", (2267, 2307), True, 'import pandas as pd\n'), ((2794, 2808), 'pandas.Series', 'pd.Series', (['val'], {}), '(val)\n', (2803, 2808), True, 'import pandas as pd\n'), ((2647, 2683), 'numpy.exp', 'np.exp', (['(-avoidance_elasticity * mtrs)'], {}), '(-avoidance_elasticity * mtrs)\n', (2653, 2683), True, 'import numpy as np\n'), ((2984, 3002), 'pandas.Series', 'pd.Series', (['taxable'], {}), '(taxable)\n', (2993, 3002), True, 'import pandas as pd\n')] |
import numpy as np
class Agent(object):
def __init__(self, k, policy, init_exploration, prior=0, gamma=None):
self.policy = policy
self.k = k
self.prior = prior
self.gamma = gamma
self._value_estimates = prior * np.ones(self.k) # Estimated Mean reward
self.action_attempts = np.zeros(self.k)
self.t = 0
self.last_action = None
self.init_exploration = init_exploration
def reset(self):
"""
Resets the agent's memory to an initial state.
"""
self._value_estimates[:] = self.prior * np.ones(self.k)
self.action_attempts[:] = np.zeros(self.k)
self.last_action = None
self.t = 0
def choose(self):
if self.t < self.init_exploration:
action = np.random.randint(self.k)
else:
action = self.policy.choose(self)
self.last_action = action
return action
def observe(self, reward): # Updating value_estimates ! (calculating mean rewards)
self.action_attempts[self.last_action] += 1
if self.gamma is None:
g = 1 / self.action_attempts[self.last_action]
else:
g = self.gamma
q = self._value_estimates[self.last_action]
self._value_estimates[self.last_action] += g * (reward - q)
self.t += 1
@property
def value_estimates(self):
return self._value_estimates
class ContextualAgent(Agent):
"""
( linUCB disjoint model)
"""
def __init__(self, k, d, policy, init_exploration, prior=0, gamma=None):
super().__init__(k, policy, init_exploration, prior, gamma)
self.d = d
self.memory = {action: {'A': np.identity(self.d),
'b': np.zeros((self.d, 1))} for action in range(self.k)}
self.states = np.array([])
self.reset()
def reset(self):
self._value_estimates[:] = self.prior * np.ones(self.k)
self.action_attempts[:] = 0
self.last_action = None
self.t = 0
self.memory = {action: {'A': np.identity(self.d),
'b': np.zeros((self.d, 1))} for action in range(self.k)}
self.states = np.array([])
# FIXME
def get_state(self, bandit):
self.states = bandit.states
for action, memory in self.memory.items():
A = memory['A']
b = memory['b']
A_inv = np.linalg.inv(A)
theta_hat = np.dot(A_inv, b)
x_t = self.states[action]
self._value_estimates[action] = np.dot(x_t.T, theta_hat)
def observe(self, reward):
self.action_attempts[self.last_action] += 1
self.memory[self.last_action]['A'] += np.outer(self.states[self.last_action],
self.states[self.last_action])
self.memory[self.last_action]['b'] += reward * self.states[self.last_action].reshape((self.d, 1))
self.t += 1
| [
"numpy.identity",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.outer",
"numpy.random.randint",
"numpy.linalg.inv",
"numpy.dot"
] | [((330, 346), 'numpy.zeros', 'np.zeros', (['self.k'], {}), '(self.k)\n', (338, 346), True, 'import numpy as np\n'), ((646, 662), 'numpy.zeros', 'np.zeros', (['self.k'], {}), '(self.k)\n', (654, 662), True, 'import numpy as np\n'), ((1852, 1864), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1860, 1864), True, 'import numpy as np\n'), ((2228, 2240), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2236, 2240), True, 'import numpy as np\n'), ((2745, 2815), 'numpy.outer', 'np.outer', (['self.states[self.last_action]', 'self.states[self.last_action]'], {}), '(self.states[self.last_action], self.states[self.last_action])\n', (2753, 2815), True, 'import numpy as np\n'), ((258, 273), 'numpy.ones', 'np.ones', (['self.k'], {}), '(self.k)\n', (265, 273), True, 'import numpy as np\n'), ((596, 611), 'numpy.ones', 'np.ones', (['self.k'], {}), '(self.k)\n', (603, 611), True, 'import numpy as np\n'), ((801, 826), 'numpy.random.randint', 'np.random.randint', (['self.k'], {}), '(self.k)\n', (818, 826), True, 'import numpy as np\n'), ((1956, 1971), 'numpy.ones', 'np.ones', (['self.k'], {}), '(self.k)\n', (1963, 1971), True, 'import numpy as np\n'), ((2450, 2466), 'numpy.linalg.inv', 'np.linalg.inv', (['A'], {}), '(A)\n', (2463, 2466), True, 'import numpy as np\n'), ((2491, 2507), 'numpy.dot', 'np.dot', (['A_inv', 'b'], {}), '(A_inv, b)\n', (2497, 2507), True, 'import numpy as np\n'), ((2590, 2614), 'numpy.dot', 'np.dot', (['x_t.T', 'theta_hat'], {}), '(x_t.T, theta_hat)\n', (2596, 2614), True, 'import numpy as np\n'), ((1720, 1739), 'numpy.identity', 'np.identity', (['self.d'], {}), '(self.d)\n', (1731, 1739), True, 'import numpy as np\n'), ((1778, 1799), 'numpy.zeros', 'np.zeros', (['(self.d, 1)'], {}), '((self.d, 1))\n', (1786, 1799), True, 'import numpy as np\n'), ((2096, 2115), 'numpy.identity', 'np.identity', (['self.d'], {}), '(self.d)\n', (2107, 2115), True, 'import numpy as np\n'), ((2154, 2175), 'numpy.zeros', 'np.zeros', (['(self.d, 1)'], {}), '((self.d, 1))\n', (2162, 2175), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from trading_gym.envs.portfolio_gym.portfolio_gym import PortfolioTradingGym
np.random.seed(64)
def create_mock_data(order_book_ids, start_date="2019-01-01", end_date="2022-01-02", number_feature=3):
trading_dates = pd.date_range(start=start_date, end=end_date, freq="D")
number = len(trading_dates) * len(order_book_ids)
multi_index = pd.MultiIndex.from_product([order_book_ids, trading_dates], names=["order_book_id", "datetime"])
mock_data = pd.DataFrame(np.random.randn(number, number_feature + 1), index=multi_index,
columns=["feature1", "feature2", "feature3", "returns"])
mock_data["returns"] = mock_data["returns"] / 100 # 当期收益率
mock_data["returns"] = round(mock_data["returns"], 4)
return mock_data
def test_single_array_R():
order_book_ids = ["000001.XSHE"]
mock_data = create_mock_data(order_book_ids=order_book_ids, start_date="2019-01-01", end_date="2019-01-14")
sequence_window = 1
env = PortfolioTradingGym(data_df=mock_data, sequence_window=sequence_window, add_cash=True)
state = env.reset()
h_t_list = []
orderlist = [0.5, 0.8, 0.4, 1.0, 0.0, -0.5, -0.7, -0.4, -1.0, 0.3]
for i in range(len(orderlist)):
next_state, reward, done, info = env.step([orderlist[i], 0])
h_t_list.append(info["h_t"])
'''
000001.XSHE 2019-01-01 0.0219
2019-01-02 -0.0103
2019-01-03 0.0175
2019-01-04 -0.0017
2019-01-05 -0.0039
2019-01-06 0.0059
2019-01-07 -0.0049
2019-01-08 -0.0003
2019-01-09 -0.0136
2019-01-10 0.0068
2019-01-11 0.0077
2019-01-12 0.0136
2019-01-13 -0.0022
2019-01-14 -0.0012
'''
expected_h_t = ([494850, 500050], [809848.6, 198999.898], [402853.3822, 605369.6309],
[1004290.9, 0], [0, 1004391.359], [-499734.9212, 1506737.699], [-704690.4898, 1712075.95],
[-397473.9834, 1410480.594], [-1019895.146, 2026216.001], [304220.9, 704495.1425])
np.testing.assert_almost_equal(h_t_list, expected_h_t, decimal=1)
def test_single_number_R():
order_book_ids = ["000001.XSHE"]
mock_data = create_mock_data(order_book_ids=order_book_ids, start_date="2019-01-01", end_date="2019-01-14")
sequence_window = 1
env = PortfolioTradingGym(data_df=mock_data, sequence_window=sequence_window, add_cash=True)
state = env.reset()
h_t_list = []
orderlist = [0.5, 0.8, 0.4, 1.0, 0.0, -0.5, -0.7, -0.4, -1.0, 0.3]
for i in range(len(orderlist)):
next_state, reward, done, info = env.step(orderlist[i])
h_t_list.append(info["h_t"])
expected_h_t = ([494850, 500050], [809848.6, 198999.898], [402853.3822, 605369.6309],
[1004290.9, 0], [0, 1004391.359], [-499734.9212, 1506737.699], [-704690.4898, 1712075.95],
[-397473.9834, 1410480.594], [-1019895.146, 2026216.001], [304220.9, 704495.1425])
np.testing.assert_almost_equal(h_t_list, expected_h_t, decimal=1)
def test_single_cashfalse_R():
order_book_ids = ["000001.XSHE"]
mock_data = create_mock_data(order_book_ids=order_book_ids, start_date="2019-01-01", end_date="2019-01-14")
sequence_window = 1
env = PortfolioTradingGym(data_df=mock_data, sequence_window=sequence_window, add_cash=False)
state = env.reset()
h_t_list = []
orderlist = [0.5, 0.8, 0.4, 1.0, 0.0, -0.5, -0.7, -0.4, -1.0, 0.3]
for i in range(len(orderlist)):
next_state, reward, done, info = env.step(orderlist[i])
h_t_list.append(info["h_t"])
expected_h_t = ([494850], [809848.6], [402853.3822],[1004290.9], [0], [-499734.9212], [-704690.4898],
[-397473.9834], [-1019895.146], [304220.9])
np.testing.assert_almost_equal(h_t_list, expected_h_t, decimal=1)
if __name__ == "__main__":
#test_single_array_R()
test_single_number_R()
#test_single_cashfalse_R()
'''
Results here are same as those in test_single_stock.py.
''' | [
"pandas.MultiIndex.from_product",
"trading_gym.envs.portfolio_gym.portfolio_gym.PortfolioTradingGym",
"numpy.testing.assert_almost_equal",
"numpy.random.seed",
"numpy.random.randn",
"pandas.date_range"
] | [((117, 135), 'numpy.random.seed', 'np.random.seed', (['(64)'], {}), '(64)\n', (131, 135), True, 'import numpy as np\n'), ((262, 317), 'pandas.date_range', 'pd.date_range', ([], {'start': 'start_date', 'end': 'end_date', 'freq': '"""D"""'}), "(start=start_date, end=end_date, freq='D')\n", (275, 317), True, 'import pandas as pd\n'), ((390, 491), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['[order_book_ids, trading_dates]'], {'names': "['order_book_id', 'datetime']"}), "([order_book_ids, trading_dates], names=[\n 'order_book_id', 'datetime'])\n", (416, 491), True, 'import pandas as pd\n'), ((1020, 1110), 'trading_gym.envs.portfolio_gym.portfolio_gym.PortfolioTradingGym', 'PortfolioTradingGym', ([], {'data_df': 'mock_data', 'sequence_window': 'sequence_window', 'add_cash': '(True)'}), '(data_df=mock_data, sequence_window=sequence_window,\n add_cash=True)\n', (1039, 1110), False, 'from trading_gym.envs.portfolio_gym.portfolio_gym import PortfolioTradingGym\n'), ((2194, 2259), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['h_t_list', 'expected_h_t'], {'decimal': '(1)'}), '(h_t_list, expected_h_t, decimal=1)\n', (2224, 2259), True, 'import numpy as np\n'), ((2473, 2563), 'trading_gym.envs.portfolio_gym.portfolio_gym.PortfolioTradingGym', 'PortfolioTradingGym', ([], {'data_df': 'mock_data', 'sequence_window': 'sequence_window', 'add_cash': '(True)'}), '(data_df=mock_data, sequence_window=sequence_window,\n add_cash=True)\n', (2492, 2563), False, 'from trading_gym.envs.portfolio_gym.portfolio_gym import PortfolioTradingGym\n'), ((3118, 3183), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['h_t_list', 'expected_h_t'], {'decimal': '(1)'}), '(h_t_list, expected_h_t, decimal=1)\n', (3148, 3183), True, 'import numpy as np\n'), ((3400, 3491), 'trading_gym.envs.portfolio_gym.portfolio_gym.PortfolioTradingGym', 'PortfolioTradingGym', ([], {'data_df': 'mock_data', 'sequence_window': 'sequence_window', 'add_cash': '(False)'}), '(data_df=mock_data, sequence_window=sequence_window,\n add_cash=False)\n', (3419, 3491), False, 'from trading_gym.envs.portfolio_gym.portfolio_gym import PortfolioTradingGym\n'), ((3912, 3977), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['h_t_list', 'expected_h_t'], {'decimal': '(1)'}), '(h_t_list, expected_h_t, decimal=1)\n', (3942, 3977), True, 'import numpy as np\n'), ((516, 559), 'numpy.random.randn', 'np.random.randn', (['number', '(number_feature + 1)'], {}), '(number, number_feature + 1)\n', (531, 559), True, 'import numpy as np\n')] |
import numpy as np
from Common import DIRECTION, TURN, BOARD_OBJECT, ALL_TURNS, ALL_DIRECTIONS, DIRECTION_UNIT_VECTORS, DIRECTION_MARKERS, Log
from GameStatistics import GameStatistics
from Network import Network
class SnakeGame:
@staticmethod
def play(board_size, predictor: Network, max_moves, print_sensory = False):
game = SnakeGame(board_size, max_moves=max_moves)
while True:
inputs = game.get_sensory_inputs()
if(print_sensory):
Log(str(inputs))
index = predictor.predict(inputs)
turn = ALL_TURNS[index]
if(game.move(turn)):
break
return game
@staticmethod
def __init_free_positions(D: dict, board_size):
D.clear()
m = board_size
x, y = np.meshgrid(range(0, m), range(0, m))
coordinates = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1)), axis=1)
for point in coordinates:
D[tuple(point)] = 0
def __init__(self, board_size, max_moves):
self.__max_moves = max_moves # defines maximum number of moves consecutively without eating apple
self.__no_apple_counter = 0 # number of consecutive moves without eating apple
self.__history = [] # list of game states for each move
self.__statistics = GameStatistics() # statistics object that holds count for left/right/forward moves and snake size
self.__body = [] # list of snakes body part positions [x,y]. first is tail, last is head
self.__apple = None # position of apple [x,y]
self.__free_positions = dict() # dictionary of free positions. keys are positions (x,y)
self.__board_size = board_size # side length of board
self.__proximities = [1/i for i in range(1,board_size+1)] # list of proximities denoting how much close to an object (apple, wall or body)
self.__init_free_positions(self.__free_positions, board_size)
self.__head_direction = np.random.choice(ALL_DIRECTIONS)
head = np.random.randint(0, board_size, 2)
self.__put_head(head)
apple = self.__get_random_free_position()
self.__put_apple(apple)
self.__save_state()
def __turn_head(self, turn: TURN):
self.__statistics.turn(turn)
current_direction_value = self.__head_direction.value
self.__head_direction = ALL_DIRECTIONS[(current_direction_value + turn.value) % 4]
def __get_random_free_position(self):
frees = list(self.__free_positions.keys())
n_free = len(frees)
if n_free == 0:
return None
index = int(np.random.randint(0, n_free))
free = frees[index]
# todo: faster way to access random item in dictionary
return np.array(free)
def __is_free(self, position):
return tuple(position) in self.__free_positions
def __set_free(self, position):
"""
Adds given position to list of empty positions.
"""
self.__free_positions[tuple(position)] = 0
def __set_occupied(self, position):
"""
Removes given position from list of empty positions.
"""
if position is None:
return
del self.__free_positions[tuple(position)]
def __put_apple(self, position):
self.__apple = position
self.__set_occupied(position)
def __put_head(self, position):
self.__body.append(position)
self.__set_occupied(position)
Log("head at: "+str(position) + " Looking: " + DIRECTION_MARKERS[self.__head_direction], level=0)
def __move_head(self, position, is_apple_eaten):
self.__body.append(position) # add new head to body parts
if is_apple_eaten == False:
self.__no_apple_counter += 1 # increase no apple count
tail = self.__body.pop(0) # remove old tail
self.__set_free(tail)
self.__set_occupied(position)
else:
self.__no_apple_counter = 0 # reset no apple counter
self.__statistics.eat() # update statistics
new_apple = self.__get_random_free_position()
self.__put_apple(new_apple) # put new apple on board
Log("head at: "+str(position) + " Looking: " + DIRECTION_MARKERS[self.__head_direction], level=0)
def __is_outside(self, position):
m = self.__board_size
x, y = position
return x < 0 or x >= m or y < 0 or y >= m
def __is_perfect_game(self):
return len(self.__body) == (self.__board_size**2)
def __save_state(self):
if self.__apple is None:
snapshot = self.__body[:], None, self.__head_direction
else:
snapshot = self.__body[:], self.__apple[:], self.__head_direction
self.__history.append(snapshot)
def __scan_direction(self, position, direction: DIRECTION):
"""
Scans obstacles in given direction starting from position.
Returns [apple_proximity, obstacle_proximity]
"""
i = 0
unit_step = DIRECTION_UNIT_VECTORS[direction]
next = position
while True:
next = next + unit_step
proximity = self.__proximities[i]
if(self.__is_outside(next)): # wall
return [0, proximity]
elif(np.array_equal(next, self.__apple)): # apple
return [proximity, 0]
elif(self.__is_free(next) == False): # body
return [0, proximity]
i += 1
### PUBLIC METHODS
def get_sensory_inputs(self):
"""
Scans 3 directions relative to head and gets proximity measures in order:
Forward Apple, Forward Obstacle, Left Apple, Left Obstacle, Right Apple, Right Obstacle
Returns [f_apple, f_obstacle, l_apple, l_obstacle, r_apple, r_obstacle]
"""
head = self.__body[-1]
d_forward = self.__head_direction
d_left = ALL_DIRECTIONS[(d_forward.value + TURN.LEFT.value) % 4]
d_right = ALL_DIRECTIONS[(d_forward.value + TURN.RIGHT.value) % 4]
s_forward = self.__scan_direction(head, d_forward)
s_left= self.__scan_direction(head, d_left)
s_right = self.__scan_direction(head, d_right)
return s_forward + s_left + s_right
def get_statistics(self):
"""
Returns move counts for left,right,forward and snake size
"""
return self.__statistics.get()
def get_fitness(self):
l,r,f,size = self.get_statistics()
bias_coef = 1
if(l == 0 or r == 0):
bias_coef = 0.1
#total_moves = l+r+f
#delta = abs(l-r)
score = (bias_coef * size) #- delta*(0.1)
return score
def get_board_size(self):
return self.__board_size
def get_history(self):
return self.__history
def move(self, turn: TURN):
Log("Command: "+str(turn), level=0)
is_finished = False
# change snake heads direction
self.__turn_head(turn)
# calculate heads next position
head = self.__body[-1]
next = head + DIRECTION_UNIT_VECTORS[self.__head_direction]
# check if new head position is inside borders
if self.__is_outside(next):
is_finished = True
# check if new head overlaps with apple
elif np.array_equal(next, self.__apple):
self.__move_head(next, is_apple_eaten=True)
if(self.__is_perfect_game()):
is_finished = True
# check if new head overlaps with body
elif self.__is_free(next) == False:
is_finished = True
# head is moving to empty
else:
self.__move_head(next, is_apple_eaten=False)
if(self.__no_apple_counter >= self.__max_moves):
is_finished = True
self.__save_state()
return is_finished
| [
"numpy.random.choice",
"GameStatistics.GameStatistics",
"numpy.array",
"numpy.random.randint",
"numpy.array_equal"
] | [((1382, 1398), 'GameStatistics.GameStatistics', 'GameStatistics', ([], {}), '()\n', (1396, 1398), False, 'from GameStatistics import GameStatistics\n'), ((2110, 2142), 'numpy.random.choice', 'np.random.choice', (['ALL_DIRECTIONS'], {}), '(ALL_DIRECTIONS)\n', (2126, 2142), True, 'import numpy as np\n'), ((2159, 2194), 'numpy.random.randint', 'np.random.randint', (['(0)', 'board_size', '(2)'], {}), '(0, board_size, 2)\n', (2176, 2194), True, 'import numpy as np\n'), ((2901, 2915), 'numpy.array', 'np.array', (['free'], {}), '(free)\n', (2909, 2915), True, 'import numpy as np\n'), ((2765, 2793), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n_free'], {}), '(0, n_free)\n', (2782, 2793), True, 'import numpy as np\n'), ((7598, 7632), 'numpy.array_equal', 'np.array_equal', (['next', 'self.__apple'], {}), '(next, self.__apple)\n', (7612, 7632), True, 'import numpy as np\n'), ((5519, 5553), 'numpy.array_equal', 'np.array_equal', (['next', 'self.__apple'], {}), '(next, self.__apple)\n', (5533, 5553), True, 'import numpy as np\n')] |
import os
import uuid
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
import pytz
from google.protobuf.duration_pb2 import Duration
from pandas.testing import assert_frame_equal
from feast.client import Client
from feast.data_source import BigQuerySource, FileSource, KafkaSource
from feast.entity import Entity
from feast.feature import Feature
from feast.feature_table import FeatureTable
from feast.value_type import ValueType
from feast.wait import wait_retry_backoff
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
PROJECT_NAME = "basic_" + uuid.uuid4().hex.upper()[0:6]
SUFFIX = str(int(datetime.now().timestamp()))
@pytest.fixture(scope="module")
def client(pytestconfig):
core_url = pytestconfig.getoption("core_url")
serving_url = pytestconfig.getoption("serving_url")
client = Client(core_url=core_url, serving_url=serving_url,)
client.set_project(PROJECT_NAME)
return client
@pytest.fixture
def bq_table_id():
return f"kf-feast:feaste2e.table{SUFFIX}"
@pytest.fixture
def customer_entity():
return Entity(
name="customer_id",
description="Customer entity for rides",
value_type=ValueType.STRING,
labels={"team": "customer_service", "common_key": "common_val"},
)
@pytest.fixture
def driver_entity():
return Entity(
name="driver_id",
description="Driver entity for car rides",
value_type=ValueType.STRING,
labels={"team": "matchmaking", "common_key": "common_val"},
)
@pytest.fixture
def basic_featuretable():
batch_source = FileSource(
field_mapping={
"dev_entity": "dev_entity_field",
"dev_feature_float": "dev_feature_float_field",
"dev_feature_string": "dev_feature_string_field",
},
file_format="PARQUET",
file_url="gs://example/feast/*",
timestamp_column="datetime_col",
date_partition_column="datetime",
)
stream_source = KafkaSource(
field_mapping={
"dev_entity": "dev_entity_field",
"dev_feature_float": "dev_feature_float_field",
"dev_feature_string": "dev_feature_string_field",
},
bootstrap_servers="localhost:9094",
class_path="random/path/to/class",
topic="test_topic",
timestamp_column="datetime_col",
)
return FeatureTable(
name="basic_featuretable",
entities=["driver_id", "customer_id"],
features=[
Feature(name="dev_feature_float", dtype=ValueType.FLOAT),
Feature(name="dev_feature_string", dtype=ValueType.STRING),
],
max_age=Duration(seconds=3600),
batch_source=batch_source,
stream_source=stream_source,
labels={"key1": "val1", "key2": "val2"},
)
@pytest.fixture
def bq_dataset():
N_ROWS = 100
time_offset = datetime.utcnow().replace(tzinfo=pytz.utc)
return pd.DataFrame(
{
"datetime": [time_offset] * N_ROWS,
"dev_feature_float": [np.float(row) for row in range(N_ROWS)],
"dev_feature_string": ["feat_" + str(row) for row in range(N_ROWS)],
}
)
@pytest.fixture
def bq_featuretable(bq_table_id):
batch_source = BigQuerySource(table_ref=bq_table_id, timestamp_column="datetime",)
return FeatureTable(
name="basic_featuretable",
entities=["driver_id", "customer_id"],
features=[
Feature(name="dev_feature_float", dtype=ValueType.FLOAT),
Feature(name="dev_feature_string", dtype=ValueType.STRING),
],
max_age=Duration(seconds=3600),
batch_source=batch_source,
)
@pytest.fixture
def alltypes_entity():
return Entity(
name="alltypes_id",
description="Driver entity for car rides",
value_type=ValueType.STRING,
labels={"cat": "alltypes"},
)
@pytest.fixture
def alltypes_featuretable():
batch_source = FileSource(
file_format="parquet",
file_url="file://feast/*",
timestamp_column="ts_col",
date_partition_column="date_partition_col",
)
return FeatureTable(
name="alltypes",
entities=["alltypes_id"],
features=[
Feature(name="float_feature", dtype=ValueType.FLOAT),
Feature(name="int64_feature", dtype=ValueType.INT64),
Feature(name="int32_feature", dtype=ValueType.INT32),
Feature(name="string_feature", dtype=ValueType.STRING),
Feature(name="bytes_feature", dtype=ValueType.BYTES),
Feature(name="bool_feature", dtype=ValueType.BOOL),
Feature(name="double_feature", dtype=ValueType.DOUBLE),
Feature(name="double_list_feature", dtype=ValueType.DOUBLE_LIST),
Feature(name="float_list_feature", dtype=ValueType.FLOAT_LIST),
Feature(name="int64_list_feature", dtype=ValueType.INT64_LIST),
Feature(name="int32_list_feature", dtype=ValueType.INT32_LIST),
Feature(name="string_list_feature", dtype=ValueType.STRING_LIST),
Feature(name="bytes_list_feature", dtype=ValueType.BYTES_LIST),
Feature(name="bool_list_feature", dtype=ValueType.BOOL_LIST),
],
max_age=Duration(seconds=3600),
batch_source=batch_source,
labels={"cat": "alltypes"},
)
def test_get_list_basic(
client: Client,
customer_entity: Entity,
driver_entity: Entity,
basic_featuretable: FeatureTable,
):
# ApplyEntity
client.apply_entity(customer_entity)
client.apply_entity(driver_entity)
# GetEntity Check
assert client.get_entity(name="customer_id") == customer_entity
assert client.get_entity(name="driver_id") == driver_entity
# ListEntities Check
common_filtering_labels = {"common_key": "common_val"}
matchmaking_filtering_labels = {"team": "matchmaking"}
actual_common_entities = client.list_entities(labels=common_filtering_labels)
actual_matchmaking_entities = client.list_entities(
labels=matchmaking_filtering_labels
)
assert len(actual_common_entities) == 2
assert len(actual_matchmaking_entities) == 1
# ApplyFeatureTable
client.apply_feature_table(basic_featuretable)
# GetFeatureTable Check
actual_get_feature_table = client.get_feature_table(name="basic_featuretable")
assert actual_get_feature_table == basic_featuretable
# ListFeatureTables Check
actual_list_feature_table = [
ft for ft in client.list_feature_tables() if ft.name == "basic_featuretable"
][0]
assert actual_list_feature_table == basic_featuretable
def test_get_list_alltypes(
client: Client, alltypes_entity: Entity, alltypes_featuretable: FeatureTable
):
# ApplyEntity
client.apply_entity(alltypes_entity)
# GetEntity Check
assert client.get_entity(name="alltypes_id") == alltypes_entity
# ListEntities Check
alltypes_filtering_labels = {"cat": "alltypes"}
actual_alltypes_entities = client.list_entities(labels=alltypes_filtering_labels)
assert len(actual_alltypes_entities) == 1
# ApplyFeatureTable
client.apply_feature_table(alltypes_featuretable)
# GetFeatureTable Check
actual_get_feature_table = client.get_feature_table(name="alltypes")
assert actual_get_feature_table == alltypes_featuretable
# ListFeatureTables Check
actual_list_feature_table = [
ft for ft in client.list_feature_tables() if ft.name == "alltypes"
][0]
assert actual_list_feature_table == alltypes_featuretable
@pytest.mark.bq
def test_ingest(
client: Client,
customer_entity: Entity,
driver_entity: Entity,
bq_featuretable: FeatureTable,
bq_dataset: pd.DataFrame,
bq_table_id: str,
):
gcp_project, _ = bq_table_id.split(":")
bq_table_id = bq_table_id.replace(":", ".")
# ApplyEntity
client.apply_entity(customer_entity)
client.apply_entity(driver_entity)
# ApplyFeatureTable
client.apply_feature_table(bq_featuretable)
client.ingest(bq_featuretable, bq_dataset, timeout=120)
from google.api_core.exceptions import NotFound
from google.cloud import bigquery
bq_client = bigquery.Client(project=gcp_project)
# Poll BQ for table until the table has been created
def try_get_table():
table_exist = False
table_resp = None
try:
table_resp = bq_client.get_table(bq_table_id)
if table_resp and table_resp.table_id == bq_table_id.split(".")[-1]:
table_exist = True
except NotFound:
pass
return table_resp, table_exist
wait_retry_backoff(
retry_fn=try_get_table,
timeout_secs=30,
timeout_msg="Timed out trying to get bigquery table",
)
query_string = f"SELECT * FROM `{bq_table_id}`"
job = bq_client.query(query_string)
query_df = job.to_dataframe()
assert_frame_equal(query_df, bq_dataset)
bq_client.delete_table(bq_table_id, not_found_ok=True)
| [
"google.protobuf.duration_pb2.Duration",
"numpy.float",
"datetime.datetime.utcnow",
"feast.feature.Feature",
"feast.data_source.BigQuerySource",
"feast.client.Client",
"feast.entity.Entity",
"feast.data_source.FileSource",
"feast.wait.wait_retry_backoff",
"os.path.realpath",
"feast.data_source.K... | [((670, 700), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (684, 700), False, 'import pytest\n'), ((537, 563), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (553, 563), False, 'import os\n'), ((847, 897), 'feast.client.Client', 'Client', ([], {'core_url': 'core_url', 'serving_url': 'serving_url'}), '(core_url=core_url, serving_url=serving_url)\n', (853, 897), False, 'from feast.client import Client\n'), ((1091, 1260), 'feast.entity.Entity', 'Entity', ([], {'name': '"""customer_id"""', 'description': '"""Customer entity for rides"""', 'value_type': 'ValueType.STRING', 'labels': "{'team': 'customer_service', 'common_key': 'common_val'}"}), "(name='customer_id', description='Customer entity for rides',\n value_type=ValueType.STRING, labels={'team': 'customer_service',\n 'common_key': 'common_val'})\n", (1097, 1260), False, 'from feast.entity import Entity\n'), ((1342, 1506), 'feast.entity.Entity', 'Entity', ([], {'name': '"""driver_id"""', 'description': '"""Driver entity for car rides"""', 'value_type': 'ValueType.STRING', 'labels': "{'team': 'matchmaking', 'common_key': 'common_val'}"}), "(name='driver_id', description='Driver entity for car rides',\n value_type=ValueType.STRING, labels={'team': 'matchmaking',\n 'common_key': 'common_val'})\n", (1348, 1506), False, 'from feast.entity import Entity\n'), ((1601, 1899), 'feast.data_source.FileSource', 'FileSource', ([], {'field_mapping': "{'dev_entity': 'dev_entity_field', 'dev_feature_float':\n 'dev_feature_float_field', 'dev_feature_string': 'dev_feature_string_field'\n }", 'file_format': '"""PARQUET"""', 'file_url': '"""gs://example/feast/*"""', 'timestamp_column': '"""datetime_col"""', 'date_partition_column': '"""datetime"""'}), "(field_mapping={'dev_entity': 'dev_entity_field',\n 'dev_feature_float': 'dev_feature_float_field', 'dev_feature_string':\n 'dev_feature_string_field'}, file_format='PARQUET', file_url=\n 'gs://example/feast/*', timestamp_column='datetime_col',\n date_partition_column='datetime')\n", (1611, 1899), False, 'from feast.data_source import BigQuerySource, FileSource, KafkaSource\n'), ((1997, 2297), 'feast.data_source.KafkaSource', 'KafkaSource', ([], {'field_mapping': "{'dev_entity': 'dev_entity_field', 'dev_feature_float':\n 'dev_feature_float_field', 'dev_feature_string': 'dev_feature_string_field'\n }", 'bootstrap_servers': '"""localhost:9094"""', 'class_path': '"""random/path/to/class"""', 'topic': '"""test_topic"""', 'timestamp_column': '"""datetime_col"""'}), "(field_mapping={'dev_entity': 'dev_entity_field',\n 'dev_feature_float': 'dev_feature_float_field', 'dev_feature_string':\n 'dev_feature_string_field'}, bootstrap_servers='localhost:9094',\n class_path='random/path/to/class', topic='test_topic', timestamp_column\n ='datetime_col')\n", (2008, 2297), False, 'from feast.data_source import BigQuerySource, FileSource, KafkaSource\n'), ((3261, 3327), 'feast.data_source.BigQuerySource', 'BigQuerySource', ([], {'table_ref': 'bq_table_id', 'timestamp_column': '"""datetime"""'}), "(table_ref=bq_table_id, timestamp_column='datetime')\n", (3275, 3327), False, 'from feast.data_source import BigQuerySource, FileSource, KafkaSource\n'), ((3741, 3871), 'feast.entity.Entity', 'Entity', ([], {'name': '"""alltypes_id"""', 'description': '"""Driver entity for car rides"""', 'value_type': 'ValueType.STRING', 'labels': "{'cat': 'alltypes'}"}), "(name='alltypes_id', description='Driver entity for car rides',\n value_type=ValueType.STRING, labels={'cat': 'alltypes'})\n", (3747, 3871), False, 'from feast.entity import Entity\n'), ((3973, 4108), 'feast.data_source.FileSource', 'FileSource', ([], {'file_format': '"""parquet"""', 'file_url': '"""file://feast/*"""', 'timestamp_column': '"""ts_col"""', 'date_partition_column': '"""date_partition_col"""'}), "(file_format='parquet', file_url='file://feast/*',\n timestamp_column='ts_col', date_partition_column='date_partition_col')\n", (3983, 4108), False, 'from feast.data_source import BigQuerySource, FileSource, KafkaSource\n'), ((8221, 8257), 'google.cloud.bigquery.Client', 'bigquery.Client', ([], {'project': 'gcp_project'}), '(project=gcp_project)\n', (8236, 8257), False, 'from google.cloud import bigquery\n'), ((8670, 8788), 'feast.wait.wait_retry_backoff', 'wait_retry_backoff', ([], {'retry_fn': 'try_get_table', 'timeout_secs': '(30)', 'timeout_msg': '"""Timed out trying to get bigquery table"""'}), "(retry_fn=try_get_table, timeout_secs=30, timeout_msg=\n 'Timed out trying to get bigquery table')\n", (8688, 8788), False, 'from feast.wait import wait_retry_backoff\n'), ((8948, 8988), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['query_df', 'bq_dataset'], {}), '(query_df, bq_dataset)\n', (8966, 8988), False, 'from pandas.testing import assert_frame_equal\n'), ((2670, 2692), 'google.protobuf.duration_pb2.Duration', 'Duration', ([], {'seconds': '(3600)'}), '(seconds=3600)\n', (2678, 2692), False, 'from google.protobuf.duration_pb2 import Duration\n'), ((2892, 2909), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2907, 2909), False, 'from datetime import datetime\n'), ((3624, 3646), 'google.protobuf.duration_pb2.Duration', 'Duration', ([], {'seconds': '(3600)'}), '(seconds=3600)\n', (3632, 3646), False, 'from google.protobuf.duration_pb2 import Duration\n'), ((5272, 5294), 'google.protobuf.duration_pb2.Duration', 'Duration', ([], {'seconds': '(3600)'}), '(seconds=3600)\n', (5280, 5294), False, 'from google.protobuf.duration_pb2 import Duration\n'), ((638, 652), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (650, 652), False, 'from datetime import datetime\n'), ((2513, 2569), 'feast.feature.Feature', 'Feature', ([], {'name': '"""dev_feature_float"""', 'dtype': 'ValueType.FLOAT'}), "(name='dev_feature_float', dtype=ValueType.FLOAT)\n", (2520, 2569), False, 'from feast.feature import Feature\n'), ((2583, 2641), 'feast.feature.Feature', 'Feature', ([], {'name': '"""dev_feature_string"""', 'dtype': 'ValueType.STRING'}), "(name='dev_feature_string', dtype=ValueType.STRING)\n", (2590, 2641), False, 'from feast.feature import Feature\n'), ((3052, 3065), 'numpy.float', 'np.float', (['row'], {}), '(row)\n', (3060, 3065), True, 'import numpy as np\n'), ((3467, 3523), 'feast.feature.Feature', 'Feature', ([], {'name': '"""dev_feature_float"""', 'dtype': 'ValueType.FLOAT'}), "(name='dev_feature_float', dtype=ValueType.FLOAT)\n", (3474, 3523), False, 'from feast.feature import Feature\n'), ((3537, 3595), 'feast.feature.Feature', 'Feature', ([], {'name': '"""dev_feature_string"""', 'dtype': 'ValueType.STRING'}), "(name='dev_feature_string', dtype=ValueType.STRING)\n", (3544, 3595), False, 'from feast.feature import Feature\n'), ((4259, 4311), 'feast.feature.Feature', 'Feature', ([], {'name': '"""float_feature"""', 'dtype': 'ValueType.FLOAT'}), "(name='float_feature', dtype=ValueType.FLOAT)\n", (4266, 4311), False, 'from feast.feature import Feature\n'), ((4325, 4377), 'feast.feature.Feature', 'Feature', ([], {'name': '"""int64_feature"""', 'dtype': 'ValueType.INT64'}), "(name='int64_feature', dtype=ValueType.INT64)\n", (4332, 4377), False, 'from feast.feature import Feature\n'), ((4391, 4443), 'feast.feature.Feature', 'Feature', ([], {'name': '"""int32_feature"""', 'dtype': 'ValueType.INT32'}), "(name='int32_feature', dtype=ValueType.INT32)\n", (4398, 4443), False, 'from feast.feature import Feature\n'), ((4457, 4511), 'feast.feature.Feature', 'Feature', ([], {'name': '"""string_feature"""', 'dtype': 'ValueType.STRING'}), "(name='string_feature', dtype=ValueType.STRING)\n", (4464, 4511), False, 'from feast.feature import Feature\n'), ((4525, 4577), 'feast.feature.Feature', 'Feature', ([], {'name': '"""bytes_feature"""', 'dtype': 'ValueType.BYTES'}), "(name='bytes_feature', dtype=ValueType.BYTES)\n", (4532, 4577), False, 'from feast.feature import Feature\n'), ((4591, 4641), 'feast.feature.Feature', 'Feature', ([], {'name': '"""bool_feature"""', 'dtype': 'ValueType.BOOL'}), "(name='bool_feature', dtype=ValueType.BOOL)\n", (4598, 4641), False, 'from feast.feature import Feature\n'), ((4655, 4709), 'feast.feature.Feature', 'Feature', ([], {'name': '"""double_feature"""', 'dtype': 'ValueType.DOUBLE'}), "(name='double_feature', dtype=ValueType.DOUBLE)\n", (4662, 4709), False, 'from feast.feature import Feature\n'), ((4723, 4787), 'feast.feature.Feature', 'Feature', ([], {'name': '"""double_list_feature"""', 'dtype': 'ValueType.DOUBLE_LIST'}), "(name='double_list_feature', dtype=ValueType.DOUBLE_LIST)\n", (4730, 4787), False, 'from feast.feature import Feature\n'), ((4801, 4863), 'feast.feature.Feature', 'Feature', ([], {'name': '"""float_list_feature"""', 'dtype': 'ValueType.FLOAT_LIST'}), "(name='float_list_feature', dtype=ValueType.FLOAT_LIST)\n", (4808, 4863), False, 'from feast.feature import Feature\n'), ((4877, 4939), 'feast.feature.Feature', 'Feature', ([], {'name': '"""int64_list_feature"""', 'dtype': 'ValueType.INT64_LIST'}), "(name='int64_list_feature', dtype=ValueType.INT64_LIST)\n", (4884, 4939), False, 'from feast.feature import Feature\n'), ((4953, 5015), 'feast.feature.Feature', 'Feature', ([], {'name': '"""int32_list_feature"""', 'dtype': 'ValueType.INT32_LIST'}), "(name='int32_list_feature', dtype=ValueType.INT32_LIST)\n", (4960, 5015), False, 'from feast.feature import Feature\n'), ((5029, 5093), 'feast.feature.Feature', 'Feature', ([], {'name': '"""string_list_feature"""', 'dtype': 'ValueType.STRING_LIST'}), "(name='string_list_feature', dtype=ValueType.STRING_LIST)\n", (5036, 5093), False, 'from feast.feature import Feature\n'), ((5107, 5169), 'feast.feature.Feature', 'Feature', ([], {'name': '"""bytes_list_feature"""', 'dtype': 'ValueType.BYTES_LIST'}), "(name='bytes_list_feature', dtype=ValueType.BYTES_LIST)\n", (5114, 5169), False, 'from feast.feature import Feature\n'), ((5183, 5243), 'feast.feature.Feature', 'Feature', ([], {'name': '"""bool_list_feature"""', 'dtype': 'ValueType.BOOL_LIST'}), "(name='bool_list_feature', dtype=ValueType.BOOL_LIST)\n", (5190, 5243), False, 'from feast.feature import Feature\n'), ((591, 603), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (601, 603), False, 'import uuid\n')] |
import numpy as np
from bunch import Bunch
from analysis import bunch_coord_array
from analysis import BunchStats
from analysis import DanilovEnvelopeBunch
from orbit.teapot import DriftTEAPOT
class AnalysisNode(DriftTEAPOT):
def __init__(self, name, skip=0):
DriftTEAPOT.__init__(self, name)
self.setLength(0.0)
self.position = None
self.data = []
self.turn = 0
self.turns_stored = []
self.skip = skip
self.active = True
def set_position(self, position):
self.position = position
def get_data(self, turn='all'):
if turn == 'all':
return self.data
return self.data[turn]
def clear_data(self):
self.data = []
def should_store(self):
return self.turn % (self.skip + 1) == 0
def set_active(self, active):
self.active = active
class BunchMonitorNode(AnalysisNode):
def __init__(self, name='bunch_monitor', mm_mrad=False, transverse_only=True, skip=0):
AnalysisNode.__init__(self, name, skip)
self.mm_mrad = mm_mrad
self.transverse_only = transverse_only
def track(self, params_dict):
if self.should_store() and self.active:
bunch = params_dict['bunch']
X = bunch_coord_array(bunch, self.mm_mrad, self.transverse_only)
self.data.append(X)
self.turns_stored.append(self.turn)
self.turn += 1
class BunchStatsNode(AnalysisNode):
def __init__(self, name='bunch_stats', mm_mrad=False, skip=0):
AnalysisNode.__init__(self, name, skip)
self.mm_mrad = mm_mrad
def track(self, params_dict):
if self.should_store() and self.active:
bunch = params_dict['bunch']
X = bunch_coord_array(bunch, self.mm_mrad, transverse_only=True)
Sigma = np.cov(X.T)
bunch_stats = BunchStats(Sigma)
self.data.append(bunch_stats)
self.turns_stored.append(self.turn)
self.turn += 1
class DanilovEnvelopeBunchMonitorNode(AnalysisNode):
def __init__(self, name='envelope_monitor', mm_mrad=False, skip=0):
AnalysisNode.__init__(self, name, skip)
self.mm_mrad = mm_mrad
def track(self, params_dict):
if self.should_store() and self.active:
bunch = params_dict['bunch']
X = bunch_coord_array(bunch, self.mm_mrad, transverse_only=True)
env_bunch = DanilovEnvelopeBunch(X)
self.data.append(env_bunch)
self.turns_stored.append(self.turn)
self.turn += 1 | [
"orbit.teapot.DriftTEAPOT.__init__",
"analysis.bunch_coord_array",
"analysis.DanilovEnvelopeBunch",
"analysis.BunchStats",
"numpy.cov"
] | [((280, 312), 'orbit.teapot.DriftTEAPOT.__init__', 'DriftTEAPOT.__init__', (['self', 'name'], {}), '(self, name)\n', (300, 312), False, 'from orbit.teapot import DriftTEAPOT\n'), ((1358, 1418), 'analysis.bunch_coord_array', 'bunch_coord_array', (['bunch', 'self.mm_mrad', 'self.transverse_only'], {}), '(bunch, self.mm_mrad, self.transverse_only)\n', (1375, 1418), False, 'from analysis import bunch_coord_array\n'), ((1871, 1931), 'analysis.bunch_coord_array', 'bunch_coord_array', (['bunch', 'self.mm_mrad'], {'transverse_only': '(True)'}), '(bunch, self.mm_mrad, transverse_only=True)\n', (1888, 1931), False, 'from analysis import bunch_coord_array\n'), ((1952, 1963), 'numpy.cov', 'np.cov', (['X.T'], {}), '(X.T)\n', (1958, 1963), True, 'import numpy as np\n'), ((1990, 2007), 'analysis.BunchStats', 'BunchStats', (['Sigma'], {}), '(Sigma)\n', (2000, 2007), False, 'from analysis import BunchStats\n'), ((2492, 2552), 'analysis.bunch_coord_array', 'bunch_coord_array', (['bunch', 'self.mm_mrad'], {'transverse_only': '(True)'}), '(bunch, self.mm_mrad, transverse_only=True)\n', (2509, 2552), False, 'from analysis import bunch_coord_array\n'), ((2577, 2600), 'analysis.DanilovEnvelopeBunch', 'DanilovEnvelopeBunch', (['X'], {}), '(X)\n', (2597, 2600), False, 'from analysis import DanilovEnvelopeBunch\n')] |
import numpy as np
import matplotlib.pyplot as plt
GENERATE_POINTS_DIST = './data/generatePoints_distance.txt'
GENERATE_POINTS = './data/generatePoints.txt'
r = np.random.RandomState(24)
o = r.randn(400, 2)
o[:, 0] += 2
o[:, 1] += 6
u = r.randn(400, 2)
u[:, 0] += 4
u[:, 1] -= 0.5
v = r.randn(400, 2)
v[:, 0] += 7
v[:, 1] -= 0.5
p = r.randn(400, 2)
q = r.randn(400, 2) + 3
# q[:, 0] += 3
# q[:, 1] += 9
s = r.randn(400, 2) + 6
t = np.concatenate((o, p, q, s, u, v), axis=0)
with open(GENERATE_POINTS, 'w', encoding='utf-8') as f:
for pos in range(len(t)):
cor = t[pos]
f.write(str(pos) + ' ' + str(cor[0]) + ' ' + str(cor[1]) + '\n')
d = lambda x, y: np.sqrt(np.power((x[0] - y[0]), 2) + np.power((x[1] - y[1]), 2))
with open(GENERATE_POINTS_DIST, 'w', encoding='utf-8') as f:
for i in range(len(t)):
for j in range(i + 1, len(t)):
distance = d(t[i], t[j])
f.write(str(i) + ' ' + str(j) + ' ' + str(distance) + '\n')
# Without labels
x, y = t[:, 0], t[:, 1]
plt.plot(x, y, 'ok', markersize=1, alpha=0.5)
# plt.axis([-3, 10, -3, 9])
plt.xlabel('x')
plt.ylabel('y')
plt.title('Generated Points Plot')
plt.savefig('./images/generatedPoints.png')
plt.close()
color = {0: 'c', 1: 'r', 2: 'g', 3: 'b', 4: 'm', 5: 'y'}
cluster = [o, p, q, s, u, v]
for i in range(len(cluster)):
cur = cluster[i]
x, y = cur[:, 0], cur[:, 1]
plt.scatter(x, y, s=1, c=color[i], alpha=0.7, label=i + 1)
plt.legend()
plt.xlabel('x')
plt.ylabel('y')
plt.title('Generated Points with Lable')
plt.savefig('./images/generatedColoredPoints.png')
plt.show() | [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"numpy.power",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.concatenate",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"numpy.random.RandomState",
"matplot... | [((163, 188), 'numpy.random.RandomState', 'np.random.RandomState', (['(24)'], {}), '(24)\n', (184, 188), True, 'import numpy as np\n'), ((434, 476), 'numpy.concatenate', 'np.concatenate', (['(o, p, q, s, u, v)'], {'axis': '(0)'}), '((o, p, q, s, u, v), axis=0)\n', (448, 476), True, 'import numpy as np\n'), ((1021, 1066), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""ok"""'], {'markersize': '(1)', 'alpha': '(0.5)'}), "(x, y, 'ok', markersize=1, alpha=0.5)\n", (1029, 1066), True, 'import matplotlib.pyplot as plt\n'), ((1095, 1110), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1105, 1110), True, 'import matplotlib.pyplot as plt\n'), ((1111, 1126), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (1121, 1126), True, 'import matplotlib.pyplot as plt\n'), ((1127, 1161), 'matplotlib.pyplot.title', 'plt.title', (['"""Generated Points Plot"""'], {}), "('Generated Points Plot')\n", (1136, 1161), True, 'import matplotlib.pyplot as plt\n'), ((1162, 1205), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./images/generatedPoints.png"""'], {}), "('./images/generatedPoints.png')\n", (1173, 1205), True, 'import matplotlib.pyplot as plt\n'), ((1206, 1217), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1215, 1217), True, 'import matplotlib.pyplot as plt\n'), ((1451, 1463), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1461, 1463), True, 'import matplotlib.pyplot as plt\n'), ((1464, 1479), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1474, 1479), True, 'import matplotlib.pyplot as plt\n'), ((1480, 1495), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (1490, 1495), True, 'import matplotlib.pyplot as plt\n'), ((1496, 1536), 'matplotlib.pyplot.title', 'plt.title', (['"""Generated Points with Lable"""'], {}), "('Generated Points with Lable')\n", (1505, 1536), True, 'import matplotlib.pyplot as plt\n'), ((1537, 1587), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./images/generatedColoredPoints.png"""'], {}), "('./images/generatedColoredPoints.png')\n", (1548, 1587), True, 'import matplotlib.pyplot as plt\n'), ((1588, 1598), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1596, 1598), True, 'import matplotlib.pyplot as plt\n'), ((1392, 1450), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'s': '(1)', 'c': 'color[i]', 'alpha': '(0.7)', 'label': '(i + 1)'}), '(x, y, s=1, c=color[i], alpha=0.7, label=i + 1)\n', (1403, 1450), True, 'import matplotlib.pyplot as plt\n'), ((684, 708), 'numpy.power', 'np.power', (['(x[0] - y[0])', '(2)'], {}), '(x[0] - y[0], 2)\n', (692, 708), True, 'import numpy as np\n'), ((713, 737), 'numpy.power', 'np.power', (['(x[1] - y[1])', '(2)'], {}), '(x[1] - y[1], 2)\n', (721, 737), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import torch
from PIL import Image, ImageDraw
import torch.utils.data as data
import numpy as np
import random
import sys; sys.path.append('../')
from utils.augmentations import preprocess
class WIDERDetection(data.Dataset):
"""docstring for WIDERDetection"""
def __init__(self, list_file, mode='train', mono_mode=False):
super(WIDERDetection, self).__init__()
self.mode = mode
self.mono_mode = mono_mode
self.fnames = []
self.boxes = []
self.labels = []
with open(list_file) as f:
lines = f.readlines()
for line in lines:
line = line.strip().split()
num_faces = int(line[1])
box = []
label = []
for i in range(num_faces):
x = float(line[2 + 5 * i])
y = float(line[3 + 5 * i])
w = float(line[4 + 5 * i])
h = float(line[5 + 5 * i])
c = int(line[6 + 5 * i])
if w <= 0 or h <= 0:
continue
box.append([x, y, x + w, y + h])
label.append(c)
if len(box) > 0:
self.fnames.append(line[0])
self.boxes.append(box)
self.labels.append(label)
self.num_samples = len(self.boxes)
def __len__(self):
return self.num_samples
def __getitem__(self, index):
img, target, h, w = self.pull_item(index)
return img, target
def pull_item(self, index):
while True:
image_path = self.fnames[index]
img = Image.open(image_path)
if img.mode == 'L':
img = img.convert('RGB')
im_width, im_height = img.size
boxes = self.annotransform(
np.array(self.boxes[index]), im_width, im_height)
label = np.array(self.labels[index])
bbox_labels = np.hstack((label[:, np.newaxis], boxes)).tolist()
img, sample_labels = preprocess(
img, bbox_labels, self.mode, image_path)
sample_labels = np.array(sample_labels)
if len(sample_labels) > 0:
target = np.hstack(
(sample_labels[:, 1:], sample_labels[:, 0][:, np.newaxis]))
assert (target[:, 2] > target[:, 0]).any()
assert (target[:, 3] > target[:, 1]).any()
break
else:
index = random.randrange(0, self.num_samples)
if self.mono_mode==True:
im = 0.299 * img[0] + 0.587 * img[1] + 0.114 * img[2]
return torch.from_numpy(np.expand_dims(im,axis=0)), target, im_height, im_width
return torch.from_numpy(img), target, im_height, im_width
def annotransform(self, boxes, im_width, im_height):
boxes[:, 0] /= im_width
boxes[:, 1] /= im_height
boxes[:, 2] /= im_width
boxes[:, 3] /= im_height
return boxes
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on
0 dim
"""
targets = []
imgs = []
for sample in batch:
imgs.append(sample[0])
targets.append(torch.FloatTensor(sample[1]))
return torch.stack(imgs, 0), targets
| [
"PIL.Image.open",
"numpy.hstack",
"random.randrange",
"torch.stack",
"torch.from_numpy",
"numpy.array",
"utils.augmentations.preprocess",
"numpy.expand_dims",
"sys.path.append",
"torch.FloatTensor"
] | [((218, 240), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (233, 240), False, 'import sys\n'), ((3733, 3753), 'torch.stack', 'torch.stack', (['imgs', '(0)'], {}), '(imgs, 0)\n', (3744, 3753), False, 'import torch\n'), ((1708, 1730), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (1718, 1730), False, 'from PIL import Image, ImageDraw\n'), ((1974, 2002), 'numpy.array', 'np.array', (['self.labels[index]'], {}), '(self.labels[index])\n', (1982, 2002), True, 'import numpy as np\n'), ((2112, 2163), 'utils.augmentations.preprocess', 'preprocess', (['img', 'bbox_labels', 'self.mode', 'image_path'], {}), '(img, bbox_labels, self.mode, image_path)\n', (2122, 2163), False, 'from utils.augmentations import preprocess\n'), ((2209, 2232), 'numpy.array', 'np.array', (['sample_labels'], {}), '(sample_labels)\n', (2217, 2232), True, 'import numpy as np\n'), ((2819, 2840), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (2835, 2840), False, 'import torch\n'), ((3692, 3720), 'torch.FloatTensor', 'torch.FloatTensor', (['sample[1]'], {}), '(sample[1])\n', (3709, 3720), False, 'import torch\n'), ((1904, 1931), 'numpy.array', 'np.array', (['self.boxes[index]'], {}), '(self.boxes[index])\n', (1912, 1931), True, 'import numpy as np\n'), ((2297, 2366), 'numpy.hstack', 'np.hstack', (['(sample_labels[:, 1:], sample_labels[:, 0][:, np.newaxis])'], {}), '((sample_labels[:, 1:], sample_labels[:, 0][:, np.newaxis]))\n', (2306, 2366), True, 'import numpy as np\n'), ((2572, 2609), 'random.randrange', 'random.randrange', (['(0)', 'self.num_samples'], {}), '(0, self.num_samples)\n', (2588, 2609), False, 'import random\n'), ((2029, 2069), 'numpy.hstack', 'np.hstack', (['(label[:, np.newaxis], boxes)'], {}), '((label[:, np.newaxis], boxes))\n', (2038, 2069), True, 'import numpy as np\n'), ((2747, 2773), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(0)'}), '(im, axis=0)\n', (2761, 2773), True, 'import numpy as np\n')] |
from inference.gp_tools import GpOptimiser
import matplotlib.pyplot as plt
import matplotlib as mpl
from numpy import sin, cos, linspace, array, meshgrid
mpl.rcParams['axes.autolimit_mode'] = 'round_numbers'
mpl.rcParams['axes.xmargin'] = 0
mpl.rcParams['axes.ymargin'] = 0
def example_plot_1d():
mu, sig = GP(x_gp)
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, gridspec_kw={'height_ratios': [1, 3, 1]}, figsize = (10,8))
ax1.plot(evaluations, max_values, marker = 'o', ls = 'solid', c = 'orange', label = 'highest observed value', zorder = 5)
ax1.plot([2,12], [max(y_func), max(y_func)], ls = 'dashed', label = 'actual max', c = 'black')
ax1.set_xlabel('function evaluations')
ax1.set_xlim([2,12])
ax1.set_ylim([max(y)-0.3, max(y_func)+0.3])
ax1.xaxis.set_label_position('top')
ax1.yaxis.set_label_position('right')
ax1.xaxis.tick_top()
ax1.set_yticks([])
ax1.legend(loc=4)
ax2.plot(GP.x, GP.y, 'o', c = 'red', label = 'observations', zorder = 5)
ax2.plot(x_gp, y_func, lw = 1.5, c = 'red', ls = 'dashed', label = 'actual function')
ax2.plot(x_gp, mu, lw = 2, c = 'blue', label = 'GP prediction')
ax2.fill_between(x_gp, (mu-2*sig), y2=(mu+2*sig), color = 'blue', alpha = 0.15, label = '95% confidence interval')
ax2.set_ylim([-1.5,4])
ax2.set_ylabel('y')
ax2.set_xticks([])
aq = array([abs(GP.acquisition(array([k]))) for k in x_gp]).squeeze()
proposal = x_gp[aq.argmax()]
ax3.fill_between(x_gp, 0.9*aq/aq.max(), color = 'green', alpha = 0.15)
ax3.plot(x_gp, 0.9*aq/aq.max(), color = 'green', label = 'acquisition function')
ax3.plot([proposal]*2, [0.,1.], c = 'green', ls = 'dashed', label = 'acquisition maximum')
ax2.plot([proposal]*2, [-1.5,search_function(proposal)], c = 'green', ls = 'dashed')
ax2.plot(proposal, search_function(proposal), 'o', c = 'green', label = 'proposed observation')
ax3.set_ylim([0,1])
ax3.set_yticks([])
ax3.set_xlabel('x')
ax3.legend(loc=1)
ax2.legend(loc=2)
plt.tight_layout()
plt.subplots_adjust(hspace=0)
plt.show()
def example_plot_2d():
fig, (ax1, ax2) = plt.subplots(2, 1, gridspec_kw={'height_ratios': [1, 3]}, figsize=(10, 8))
plt.subplots_adjust(hspace=0)
ax1.plot(evaluations, max_values, marker='o', ls='solid', c='orange', label='optimum value', zorder=5)
ax1.plot([5, 30], [z_func.max(), z_func.max()], ls='dashed', label='actual max', c='black')
ax1.set_xlabel('function evaluations')
ax1.set_xlim([5, 30])
ax1.set_ylim([max(y) - 0.3, z_func.max() + 0.3])
ax1.xaxis.set_label_position('top')
ax1.yaxis.set_label_position('right')
ax1.xaxis.tick_top()
ax1.set_yticks([])
ax1.legend(loc=4)
ax2.contour(*mesh, z_func, 40)
ax2.plot([i[0] for i in GP.x], [i[1] for i in GP.x], 'D', c='red', markeredgecolor='black')
plt.show()
"""
GpOptimiser extends the functionality of GpRegressor to perform 'Bayesian optimisation'.
Bayesian optimisation is suited to problems for which a single evaluation of the function
being explored is expensive, such that the total number of function evaluations must be
made as small as possible.
"""
# define the function whose maximum we will search for
def search_function(x):
return sin(0.5*x) + 3 / (1 + (x-1)**2)
# define bounds for the optimisation
bounds = [(-8,8)]
# create some initialisation data
x = array([-8,8])
y = search_function(x)
# create an instance of GpOptimiser
GP = GpOptimiser(x,y,bounds=bounds)
# here we evaluate the search function for plotting purposes
M = 500
x_gp = linspace(*bounds[0],M)
y_func = search_function(x_gp)
max_values = [max(GP.y)]
evaluations = [len(GP.y)]
for i in range(11):
# plot the current state of the optimisation
example_plot_1d()
# request the proposed evaluation
new_x = GP.propose_evaluation()
# evaluate the new point
new_y = search_function(new_x)
# update the gaussian process with the new information
GP.add_evaluation(new_x, new_y)
# track the optimum value for plotting
max_values.append(max(GP.y))
evaluations.append(len(GP.y))
"""
2D example
"""
from mpl_toolkits.mplot3d import Axes3D
# define a new 2D search function
def search_function(v):
x,y = v
z = ((x-1)/2)**2 + ((y+3)/1.5)**2
return sin(0.5*x) + cos(0.4*y) + 5/(1 + z)
# set bounds
bounds = [(-8,8), (-8,8)]
# evaluate function for plotting
N = 80
x = linspace(*bounds[0], N)
y = linspace(*bounds[1], N)
mesh = meshgrid(x, y)
z_func = search_function(mesh)
# create some initialisation data
# we've picked a point at each corner and one in the middle
x = [(-8,-8), (8,-8), (-8,8), (8,8), (0,0)]
y = [search_function(k) for k in x]
# initiate the optimiser
GP = GpOptimiser(x,y,bounds=bounds)
max_values = [max(GP.y)]
evaluations = [len(GP.y)]
for i in range(25):
new_x = GP.propose_evaluation()
new_y = search_function(new_x)
GP.add_evaluation(new_x, new_y)
# track the optimum value for plotting
max_values.append(max(GP.y))
evaluations.append(len(GP.y))
# plot the results
example_plot_2d() | [
"inference.gp_tools.GpOptimiser",
"numpy.array",
"numpy.linspace",
"numpy.cos",
"matplotlib.pyplot.tight_layout",
"numpy.sin",
"numpy.meshgrid",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show"
] | [((3398, 3412), 'numpy.array', 'array', (['[-8, 8]'], {}), '([-8, 8])\n', (3403, 3412), False, 'from numpy import sin, cos, linspace, array, meshgrid\n'), ((3477, 3509), 'inference.gp_tools.GpOptimiser', 'GpOptimiser', (['x', 'y'], {'bounds': 'bounds'}), '(x, y, bounds=bounds)\n', (3488, 3509), False, 'from inference.gp_tools import GpOptimiser\n'), ((3586, 3609), 'numpy.linspace', 'linspace', (['*bounds[0]', 'M'], {}), '(*bounds[0], M)\n', (3594, 3609), False, 'from numpy import sin, cos, linspace, array, meshgrid\n'), ((4441, 4464), 'numpy.linspace', 'linspace', (['*bounds[0]', 'N'], {}), '(*bounds[0], N)\n', (4449, 4464), False, 'from numpy import sin, cos, linspace, array, meshgrid\n'), ((4469, 4492), 'numpy.linspace', 'linspace', (['*bounds[1]', 'N'], {}), '(*bounds[1], N)\n', (4477, 4492), False, 'from numpy import sin, cos, linspace, array, meshgrid\n'), ((4500, 4514), 'numpy.meshgrid', 'meshgrid', (['x', 'y'], {}), '(x, y)\n', (4508, 4514), False, 'from numpy import sin, cos, linspace, array, meshgrid\n'), ((4754, 4786), 'inference.gp_tools.GpOptimiser', 'GpOptimiser', (['x', 'y'], {'bounds': 'bounds'}), '(x, y, bounds=bounds)\n', (4765, 4786), False, 'from inference.gp_tools import GpOptimiser\n'), ((350, 427), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'gridspec_kw': "{'height_ratios': [1, 3, 1]}", 'figsize': '(10, 8)'}), "(3, 1, gridspec_kw={'height_ratios': [1, 3, 1]}, figsize=(10, 8))\n", (362, 427), True, 'import matplotlib.pyplot as plt\n'), ((2024, 2042), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2040, 2042), True, 'import matplotlib.pyplot as plt\n'), ((2047, 2076), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0)'}), '(hspace=0)\n', (2066, 2076), True, 'import matplotlib.pyplot as plt\n'), ((2081, 2091), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2089, 2091), True, 'import matplotlib.pyplot as plt\n'), ((2138, 2212), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'gridspec_kw': "{'height_ratios': [1, 3]}", 'figsize': '(10, 8)'}), "(2, 1, gridspec_kw={'height_ratios': [1, 3]}, figsize=(10, 8))\n", (2150, 2212), True, 'import matplotlib.pyplot as plt\n'), ((2217, 2246), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0)'}), '(hspace=0)\n', (2236, 2246), True, 'import matplotlib.pyplot as plt\n'), ((2861, 2871), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2869, 2871), True, 'import matplotlib.pyplot as plt\n'), ((3271, 3283), 'numpy.sin', 'sin', (['(0.5 * x)'], {}), '(0.5 * x)\n', (3274, 3283), False, 'from numpy import sin, cos, linspace, array, meshgrid\n'), ((4320, 4332), 'numpy.sin', 'sin', (['(0.5 * x)'], {}), '(0.5 * x)\n', (4323, 4332), False, 'from numpy import sin, cos, linspace, array, meshgrid\n'), ((4333, 4345), 'numpy.cos', 'cos', (['(0.4 * y)'], {}), '(0.4 * y)\n', (4336, 4345), False, 'from numpy import sin, cos, linspace, array, meshgrid\n'), ((1388, 1398), 'numpy.array', 'array', (['[k]'], {}), '([k])\n', (1393, 1398), False, 'from numpy import sin, cos, linspace, array, meshgrid\n')] |
import numpy as np
def alignChannels(red, green, blue):
"""Given 3 images corresponding to different channels of a color image,
compute the best aligned result with minimum abberations
Args:
red, green, blue - each is a HxW matrix corresponding to an HxW image
Returns:
rgb_output - HxWx3 color image output, aligned as desired"""
bestGreenAlignment = None
minSSD = 1e9
for horizontalShift in range(-30, 31 , 1):
#shiftedGreen = np.roll(green, horizontalShift, axis=0)
for virticalShift in range(-30, 31 , 1):
shiftedGreen = np.roll(np.roll(green, horizontalShift, axis=0),
virticalShift, axis=1)
shiftedSSD = np.sqrt(np.sum(np.square(shiftedGreen-red)))
if shiftedSSD < minSSD:
minSSD = shiftedSSD
bestGreenAlignment = shiftedGreen
bestBlueAlignment = None
minSSD = 1e9
for horizontalShift in range(-30, 31 , 1):
for virticalShift in range(-30, 31 , 1):
shiftedBlue = np.roll(np.roll(blue,horizontalShift, axis=0),
virticalShift, axis=1)
shiftedSSD = np.sqrt(np.sum(np.square(shiftedBlue-red)))
if shiftedSSD < minSSD:
minSSD = shiftedSSD
bestBlueAlignment = shiftedBlue
alignedImage = np.zeros([blue.shape[0], blue.shape[1], 3])
alignedImage[:,:,0] = red
alignedImage[:,:,1] = bestGreenAlignment
alignedImage[:,:,2] = bestBlueAlignment
return alignedImage
| [
"numpy.zeros",
"numpy.roll",
"numpy.square"
] | [((1191, 1234), 'numpy.zeros', 'np.zeros', (['[blue.shape[0], blue.shape[1], 3]'], {}), '([blue.shape[0], blue.shape[1], 3])\n', (1199, 1234), True, 'import numpy as np\n'), ((558, 597), 'numpy.roll', 'np.roll', (['green', 'horizontalShift'], {'axis': '(0)'}), '(green, horizontalShift, axis=0)\n', (565, 597), True, 'import numpy as np\n'), ((949, 987), 'numpy.roll', 'np.roll', (['blue', 'horizontalShift'], {'axis': '(0)'}), '(blue, horizontalShift, axis=0)\n', (956, 987), True, 'import numpy as np\n'), ((664, 693), 'numpy.square', 'np.square', (['(shiftedGreen - red)'], {}), '(shiftedGreen - red)\n', (673, 693), True, 'import numpy as np\n'), ((1053, 1081), 'numpy.square', 'np.square', (['(shiftedBlue - red)'], {}), '(shiftedBlue - red)\n', (1062, 1081), True, 'import numpy as np\n')] |
"""
This code using models on cluster, which needs large computing power.
Now we deployed an Object Detection model on AWS Lambda,
and accompied with Amazon API gateway, see obj_detector.py
"""
from models.yolo.models import *
from models.yolo.utils import *
import cv2
import os, sys, time, datetime, random
import torch
import numpy as np
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from torch.autograd import Variable
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from PIL import Image
from models.yolo.sort import *
class YoloDetectron():
def __init__(self):
# Basical setting related to the YOLO mdoel
self.config_path = '/home/ubuntu/workspace/CarsMemory/models/yolo/config/yolov3.cfg'
self.weights_path = '/home/ubuntu/workspace/CarsMemory/models/yolo/config/yolov3.weights'
self.class_path = '/home/ubuntu/workspace/CarsMemory/models/yolo/config/coco.names'
self.img_size = 416
self.conf_thres = 0.8
self.nms_thres = 0.4
# Load model and weights
self.model = Darknet(self.config_path, img_size=self.img_size)
self.model.load_weights(self.weights_path)
self.model.eval()
self.classes = utils.load_classes(self.class_path)
self.Tensor = torch.FloatTensor
# About image color
self.cmap = plt.get_cmap('tab20b')
self.colors = [self.cmap(i)[:3] for i in np.linspace(0, 1, 20)]
self.mot_tracker = Sort()
def _detect_image(self, img):
# scale and pad image
ratio = min(self.img_size/img.size[0], self.img_size/img.size[1])
imw = round(img.size[0] * ratio)
imh = round(img.size[1] * ratio)
img_transforms = transforms.Compose([transforms.Resize((imh, imw)),
transforms.Pad((max(int((imh-imw)/2),0), max(int((imw-imh)/2),0), max(int((imh-imw)/2),0), max(int((imw-imh)/2),0)),
(128, 128, 128)),
transforms.ToTensor(),
])
# convert image to Tensor
image_tensor = img_transforms(img).float()
image_tensor = image_tensor.unsqueeze_(0)
input_img = Variable(image_tensor.type(self.Tensor))
# run inference on the model and get detections
with torch.no_grad():
detections = self.model(input_img)
detections = utils.non_max_suppression(detections, 80)
return detections[0]
def processing(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
pilimg = Image.fromarray(frame)
detections = self._detect_image(pilimg)
img = np.array(pilimg)
pad_x = max(img.shape[0] - img.shape[1], 0) * (self.img_size / max(img.shape))
pad_y = max(img.shape[1] - img.shape[0], 0) * (self.img_size / max(img.shape))
unpad_h = self.img_size - pad_y
unpad_w = self.img_size - pad_x
if detections is not None:
tracked_objects = self.mot_tracker.update(detections.cpu())
# unique_labels = detections[:, -1].cpu().unique()
# n_cls_preds = len(unique_labels)
for x1, y1, x2, y2, obj_id, cls_pred in tracked_objects:
box_h = int(((y2 - y1) / unpad_h) * img.shape[0])
box_w = int(((x2 - x1) / unpad_w) * img.shape[1])
y1 = int(((y1 - pad_y // 2) / unpad_h) * img.shape[0])
x1 = int(((x1 - pad_x // 2) / unpad_w) * img.shape[1])
color = self.colors[int(obj_id) % len(self.colors)]
color = [i * 255 for i in color]
cls = self.classes[int(cls_pred)]
cv2.rectangle(frame, (x1, y1), (x1+box_w, y1+box_h), color, 4)
cv2.rectangle(frame, (x1, y1-35), (x1+len(cls)*19+60, y1), color, -1)
cv2.putText(frame, cls + "-" + str(int(obj_id)), (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 3)
return frame
| [
"cv2.rectangle",
"PIL.Image.fromarray",
"numpy.array",
"numpy.linspace",
"cv2.cvtColor",
"torchvision.transforms.Resize",
"torch.no_grad",
"torchvision.transforms.ToTensor",
"matplotlib.pyplot.get_cmap"
] | [((1390, 1412), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab20b"""'], {}), "('tab20b')\n", (1402, 1412), True, 'import matplotlib.pyplot as plt\n'), ((2519, 2557), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (2531, 2557), False, 'import cv2\n'), ((2575, 2597), 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {}), '(frame)\n', (2590, 2597), False, 'from PIL import Image\n'), ((2661, 2677), 'numpy.array', 'np.array', (['pilimg'], {}), '(pilimg)\n', (2669, 2677), True, 'import numpy as np\n'), ((2309, 2324), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2322, 2324), False, 'import torch\n'), ((1462, 1483), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(20)'], {}), '(0, 1, 20)\n', (1473, 1483), True, 'import numpy as np\n'), ((1785, 1814), 'torchvision.transforms.Resize', 'transforms.Resize', (['(imh, imw)'], {}), '((imh, imw))\n', (1802, 1814), False, 'from torchvision import datasets, transforms\n'), ((2005, 2026), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2024, 2026), False, 'from torchvision import datasets, transforms\n'), ((3677, 3743), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x1, y1)', '(x1 + box_w, y1 + box_h)', 'color', '(4)'], {}), '(frame, (x1, y1), (x1 + box_w, y1 + box_h), color, 4)\n', (3690, 3743), False, 'import cv2\n')] |
#!/usr/bin/env python
"""
Video Animation Functions
=========================
This module contains functions for displaying sequences of 2D data as
animations.
- animate Animate a 2D video sequence.
- animate2 Animate two 2D video sequences simultaneously.
- animate_compare Animate two 2D video sequences and their difference.
- frame_compare Display frames from two sequences and their difference.
"""
# Copyright (c) 2009-2015, <NAME>
# All rights reserved.
# Distributed under the terms of the BSD license:
# http://www.opensource.org/licenses/bsd-license
__all__ = ['animate', 'animate2', 'animate_compare', 'frame_compare']
import time
import numpy as np
import pylab as p
# Fix for animation problems with Qt4Agg backend:
if p.get_backend() == 'Qt4Agg':
from PyQt4.QtGui import QApplication
animate_fix = QApplication.processEvents
else:
def animate_fix():
pass
def animate(data, step=1, delay=0):
"""
Animate sequence of frames.
Animate a sequence of `Ny x Nx` bitmap frames stored in a `M x Ny x Nx` data
array.
Parameters
----------
data : numpy.ndarray
Sequence of `M` 2D bitmaps stored as an array with shape
`(M, Ny, Nx)`.
step : int
Skip `step` frames between each displayed frames.
delay : float
Wait `delay` seconds between each frame refresh.
"""
# Get maximum value in data to scale the luminance appropriately:
mx = np.max(np.abs(data))
img = p.imshow(data[0, :, :], vmin=-mx, vmax=mx)
for k in np.arange(0, data.shape[0], step):
time.sleep(delay)
img.set_data(data[k, :, :])
p.draw()
animate_fix()
def animate2(data_1, data_2, step=1, delay=0):
"""
Animate two sequence of frames simultaneously.
Animate two sequences of `Ny x Nx` bitmap frames stored in two `M x Ny x Nx` data
arrays.
Parameters
----------
data_1 : numpy.ndarray
Sequence of `M` 2D bitmaps stored as an array with shape
`(M, Ny, Nx)`.
data_2 : numpy.ndarray
Sequence of `M` 2D bitmaps stored as an array with shape
`(M, Ny, Nx)`.
step : int
Skip `step` frames between each displayed frames.
delay : float
Wait `delay` seconds between each frame refresh.
"""
if data_1.shape != data_2.shape:
raise ValueError('cannot animate two video sequences with '
'different dimensions')
# Get maximum value in data to scale the luminance appropriately:
mx_1 = np.max(np.abs(data_1))
mx_2 = np.max(np.abs(data_2))
p.subplot(121)
img_1 = p.imshow(data_1[0, :, :], vmin=-mx_1, vmax=mx_1)
p.subplot(122)
img_2 = p.imshow(data_2[0, :, :], vmin=-mx_2, vmax=mx_2)
for k in np.arange(0, data_1.shape[0], step):
time.sleep(delay)
img_1.set_data(data_1[k, :, :])
img_2.set_data(data_2[k, :, :])
p.draw()
animate_fix()
def animate_compare(data_1, data_2, step=1, delay=0):
"""
Animate two sequence of frames and their difference simultaneously.
Animate two sequences of `Ny x Nx` bitmap frames stored in two `M x Ny x Nx` data
arrays simultaneously with their difference.
Parameters
----------
data_1 : numpy.ndarray
Sequence of `M` 2D bitmaps stored as an array with shape
`(M, Ny, Nx)`.
data_2 : numpy.ndarray
Sequence of `M` 2D bitmaps stored as an array with shape
`(M, Ny, Nx)`.
step : int
Skip `step` frames between each displayed frames.
delay : float
Wait `delay` seconds between each frame refresh.
"""
if data_1.shape != data_2.shape:
raise ValueError('cannot animate two video sequences with '
'different dimensions')
# Get maximum value in data to scale the luminance appropriately:
mx_1 = np.max(np.abs(data_1))
mx_2 = np.max(np.abs(data_2))
mx_err = max(mx_1, mx_2)
p.subplot(131)
img_1 = p.imshow(data_1[0, :, :], vmin=-mx_1, vmax=mx_1)
p.subplot(132)
img_2 = p.imshow(data_2[0, :, :], vmin=-mx_2, vmax=mx_2)
p.subplot(133)
img_err = p.imshow(data_1[0, :, :]-data_2[0, :, :], vmin=-mx_err, vmax=mx_err)
for k in np.arange(0, data_1.shape[0], step):
time.sleep(delay)
img_1.set_data(data_1[k, :, :])
img_2.set_data(data_2[k, :, :])
img_err.set_data(data_1[k, :, :]-data_2[k, :, :])
p.draw()
animate_fix()
def frame_compare(data_1, data_2, i=0):
"""
Compare corresponding frames in two video sequences.
Simultaneously display two corresponding frames from two video sequences of
identical length.
Parameters
----------
data_1 : numpy.ndarray
Sequence of `M` 2D bitmaps stored as an array with shape
`(M, Ny, Nx)`.
data_2 : numpy.ndarray
Sequence of `M` 2D bitmaps stored as an array with shape
`(M, Ny, Nx)`.
i : int
Index of frame to display.
"""
if data_1.shape != data_2.shape:
raise ValueError('cannot compare frames from two video sequences with '
'different dimensions')
mx_1 = np.max(np.abs(data_1))
mx_2 = np.max(np.abs(data_2))
p.subplot(121)
p.imshow(data_1[i, :, :], vmin=-mx_1, vmax=mx_1)
p.subplot(122)
p.imshow(data_2[i, :, :], vmin=-mx_2, vmax=mx_2)
p.draw()
| [
"numpy.abs",
"pylab.draw",
"pylab.subplot",
"time.sleep",
"pylab.get_backend",
"numpy.arange",
"pylab.imshow"
] | [((763, 778), 'pylab.get_backend', 'p.get_backend', ([], {}), '()\n', (776, 778), True, 'import pylab as p\n'), ((1501, 1543), 'pylab.imshow', 'p.imshow', (['data[0, :, :]'], {'vmin': '(-mx)', 'vmax': 'mx'}), '(data[0, :, :], vmin=-mx, vmax=mx)\n', (1509, 1543), True, 'import pylab as p\n'), ((1557, 1590), 'numpy.arange', 'np.arange', (['(0)', 'data.shape[0]', 'step'], {}), '(0, data.shape[0], step)\n', (1566, 1590), True, 'import numpy as np\n'), ((2614, 2628), 'pylab.subplot', 'p.subplot', (['(121)'], {}), '(121)\n', (2623, 2628), True, 'import pylab as p\n'), ((2641, 2689), 'pylab.imshow', 'p.imshow', (['data_1[0, :, :]'], {'vmin': '(-mx_1)', 'vmax': 'mx_1'}), '(data_1[0, :, :], vmin=-mx_1, vmax=mx_1)\n', (2649, 2689), True, 'import pylab as p\n'), ((2694, 2708), 'pylab.subplot', 'p.subplot', (['(122)'], {}), '(122)\n', (2703, 2708), True, 'import pylab as p\n'), ((2721, 2769), 'pylab.imshow', 'p.imshow', (['data_2[0, :, :]'], {'vmin': '(-mx_2)', 'vmax': 'mx_2'}), '(data_2[0, :, :], vmin=-mx_2, vmax=mx_2)\n', (2729, 2769), True, 'import pylab as p\n'), ((2783, 2818), 'numpy.arange', 'np.arange', (['(0)', 'data_1.shape[0]', 'step'], {}), '(0, data_1.shape[0], step)\n', (2792, 2818), True, 'import numpy as np\n'), ((3980, 3994), 'pylab.subplot', 'p.subplot', (['(131)'], {}), '(131)\n', (3989, 3994), True, 'import pylab as p\n'), ((4007, 4055), 'pylab.imshow', 'p.imshow', (['data_1[0, :, :]'], {'vmin': '(-mx_1)', 'vmax': 'mx_1'}), '(data_1[0, :, :], vmin=-mx_1, vmax=mx_1)\n', (4015, 4055), True, 'import pylab as p\n'), ((4060, 4074), 'pylab.subplot', 'p.subplot', (['(132)'], {}), '(132)\n', (4069, 4074), True, 'import pylab as p\n'), ((4087, 4135), 'pylab.imshow', 'p.imshow', (['data_2[0, :, :]'], {'vmin': '(-mx_2)', 'vmax': 'mx_2'}), '(data_2[0, :, :], vmin=-mx_2, vmax=mx_2)\n', (4095, 4135), True, 'import pylab as p\n'), ((4140, 4154), 'pylab.subplot', 'p.subplot', (['(133)'], {}), '(133)\n', (4149, 4154), True, 'import pylab as p\n'), ((4169, 4239), 'pylab.imshow', 'p.imshow', (['(data_1[0, :, :] - data_2[0, :, :])'], {'vmin': '(-mx_err)', 'vmax': 'mx_err'}), '(data_1[0, :, :] - data_2[0, :, :], vmin=-mx_err, vmax=mx_err)\n', (4177, 4239), True, 'import pylab as p\n'), ((4251, 4286), 'numpy.arange', 'np.arange', (['(0)', 'data_1.shape[0]', 'step'], {}), '(0, data_1.shape[0], step)\n', (4260, 4286), True, 'import numpy as np\n'), ((5256, 5270), 'pylab.subplot', 'p.subplot', (['(121)'], {}), '(121)\n', (5265, 5270), True, 'import pylab as p\n'), ((5275, 5323), 'pylab.imshow', 'p.imshow', (['data_1[i, :, :]'], {'vmin': '(-mx_1)', 'vmax': 'mx_1'}), '(data_1[i, :, :], vmin=-mx_1, vmax=mx_1)\n', (5283, 5323), True, 'import pylab as p\n'), ((5328, 5342), 'pylab.subplot', 'p.subplot', (['(122)'], {}), '(122)\n', (5337, 5342), True, 'import pylab as p\n'), ((5347, 5395), 'pylab.imshow', 'p.imshow', (['data_2[i, :, :]'], {'vmin': '(-mx_2)', 'vmax': 'mx_2'}), '(data_2[i, :, :], vmin=-mx_2, vmax=mx_2)\n', (5355, 5395), True, 'import pylab as p\n'), ((5400, 5408), 'pylab.draw', 'p.draw', ([], {}), '()\n', (5406, 5408), True, 'import pylab as p\n'), ((1477, 1489), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (1483, 1489), True, 'import numpy as np\n'), ((1600, 1617), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (1610, 1617), False, 'import time\n'), ((1662, 1670), 'pylab.draw', 'p.draw', ([], {}), '()\n', (1668, 1670), True, 'import pylab as p\n'), ((2560, 2574), 'numpy.abs', 'np.abs', (['data_1'], {}), '(data_1)\n', (2566, 2574), True, 'import numpy as np\n'), ((2594, 2608), 'numpy.abs', 'np.abs', (['data_2'], {}), '(data_2)\n', (2600, 2608), True, 'import numpy as np\n'), ((2828, 2845), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (2838, 2845), False, 'import time\n'), ((2934, 2942), 'pylab.draw', 'p.draw', ([], {}), '()\n', (2940, 2942), True, 'import pylab as p\n'), ((3897, 3911), 'numpy.abs', 'np.abs', (['data_1'], {}), '(data_1)\n', (3903, 3911), True, 'import numpy as np\n'), ((3931, 3945), 'numpy.abs', 'np.abs', (['data_2'], {}), '(data_2)\n', (3937, 3945), True, 'import numpy as np\n'), ((4296, 4313), 'time.sleep', 'time.sleep', (['delay'], {}), '(delay)\n', (4306, 4313), False, 'import time\n'), ((4460, 4468), 'pylab.draw', 'p.draw', ([], {}), '()\n', (4466, 4468), True, 'import pylab as p\n'), ((5202, 5216), 'numpy.abs', 'np.abs', (['data_1'], {}), '(data_1)\n', (5208, 5216), True, 'import numpy as np\n'), ((5236, 5250), 'numpy.abs', 'np.abs', (['data_2'], {}), '(data_2)\n', (5242, 5250), True, 'import numpy as np\n')] |
import numpy as np
import os
import sys
utils_path = os.path.join(os.path.abspath(os.getenv('PROCESSING_DIR')),'utils')
if utils_path not in sys.path:
sys.path.append(utils_path)
import util_files
import util_cloud
import urllib
import ee
from google.cloud import storage
import logging
import gzip
import shutil
import rasterio
# set up logging
# get the top-level logger object
logger = logging.getLogger()
for handler in logger.handlers: logger.removeHandler(handler)
logger.setLevel(logging.INFO)
# make it print to the console.
console = logging.StreamHandler()
logger.addHandler(console)
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# name of asset on GEE where you want to upload data
# this should be an asset name that is not currently in use
dataset_name = 'req_019_facebook_total_precipitation' #check
logger.info('Executing script for dataset: ' + dataset_name)
# first, set the directory that you are working in with the path variable
path = os.path.abspath(os.path.join(os.getenv('BLOG_DIR'),dataset_name))
# move to this directory
os.chdir(path)
# create a new sub-directory within your specified dir called 'data'
# within this directory, create files to store raw and processed data
data_dir = 'data'
if not os.path.exists(data_dir):
os.mkdir(data_dir)
'''
Download data and save to your data directory
'''
logger.info('Downloading raw data')
# list the urls used to download the data from the source website
url_list = ['https://opendata.dwd.de/climate_environment/GPCC/full_data_monthly_v2020/025/full_data_monthly_v2020_1891_1900_025.nc.gz',
'https://opendata.dwd.de/climate_environment/GPCC/full_data_monthly_v2020/025/full_data_monthly_v2020_1901_1910_025.nc.gz',
'https://opendata.dwd.de/climate_environment/GPCC/full_data_monthly_v2020/025/full_data_monthly_v2020_1911_1920_025.nc.gz',
'https://opendata.dwd.de/climate_environment/GPCC/full_data_monthly_v2020/025/full_data_monthly_v2020_1921_1930_025.nc.gz',
'https://opendata.dwd.de/climate_environment/GPCC/full_data_monthly_v2020/025/full_data_monthly_v2020_1931_1940_025.nc.gz',
'https://opendata.dwd.de/climate_environment/GPCC/full_data_monthly_v2020/025/full_data_monthly_v2020_1941_1950_025.nc.gz',
'https://opendata.dwd.de/climate_environment/GPCC/full_data_monthly_v2020/025/full_data_monthly_v2020_1951_1960_025.nc.gz',
'https://opendata.dwd.de/climate_environment/GPCC/full_data_monthly_v2020/025/full_data_monthly_v2020_1961_1970_025.nc.gz',
'https://opendata.dwd.de/climate_environment/GPCC/full_data_monthly_v2020/025/full_data_monthly_v2020_1971_1980_025.nc.gz',
'https://opendata.dwd.de/climate_environment/GPCC/full_data_monthly_v2020/025/full_data_monthly_v2020_1981_1990_025.nc.gz',
'https://opendata.dwd.de/climate_environment/GPCC/full_data_monthly_v2020/025/full_data_monthly_v2020_1991_2000_025.nc.gz',
'https://opendata.dwd.de/climate_environment/GPCC/full_data_monthly_v2020/025/full_data_monthly_v2020_2001_2010_025.nc.gz',
'https://opendata.dwd.de/climate_environment/GPCC/full_data_monthly_v2020/025/full_data_monthly_v2020_2011_2019_025.nc.gz']
# download the data from the source
raw_data_file = [os.path.join(data_dir,os.path.basename(url)) for url in url_list]
for url, file in zip(url_list, raw_data_file):
urllib.request.urlretrieve(url, file)
# unzip source data
raw_data_file_unzipped = [file[:-3] for file in raw_data_file]
for file,file_unzipped in zip(raw_data_file,raw_data_file_unzipped):
with gzip.open(file, 'rb') as f_in:
with open(file_unzipped, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
# convert the netcdf files to tif files
for raw_file in raw_data_file_unzipped:
util_files.convert_netcdf(raw_file, ['precip'])
processed_data_file = [os.path.join(raw_file[:-3]+'_precip.tif') for raw_file in raw_data_file_unzipped]
processed_data_annual=[os.path.join(data_dir,'full_data_annual_v2020_'+str(year)+'_025_precip.tif') for year in range(1891,2020)]
n_layers=[int(rasterio.open(file).meta['count']/12) for file in processed_data_file]
# calculate annual total precipitation
for id, file in enumerate(processed_data_file,start=0):
with rasterio.open(file) as src0:
# update metedata for annual aggregation
meta = src0.meta
meta.update(count = 1)
meta.update(nodata = meta['nodata']*12)
# sum and export annual total precipitation as tif file
for i in range(int(src0.meta['count']/12)):
with rasterio.open(processed_data_annual[sum(n_layers[:id])+i], 'w', **meta) as dst:
dst.write_band(1,np.sum(src0.read(range((i*12+1), (i*12+13))), axis = 0))
'''
Upload processed data to Google Earth Engine
'''
logger.info('Uploading processed data to Google Cloud Storage.')
# set up Google Cloud Storage project and bucket objects
gcsClient = storage.Client(os.environ.get("CLOUDSDK_CORE_PROJECT"))
gcsBucket = gcsClient.bucket(os.environ.get("GEE_STAGING_BUCKET"))
# upload files to Google Cloud Storage
gcs_uris= util_cloud.gcs_upload(processed_data_annual, dataset_name, gcs_bucket=gcsBucket)
logger.info('Uploading processed data to Google Earth Engine.')
# initialize ee and eeUtil modules for uploading to Google Earth Engine
auth = ee.ServiceAccountCredentials(os.getenv('GEE_SERVICE_ACCOUNT'), os.getenv('GOOGLE_APPLICATION_CREDENTIALS'))
ee.Initialize(auth)
# set pyramiding policy for GEE upload
pyramiding_policy = 'MEAN' #check
# create an image collection where we will put the processed data files in GEE
image_collection = f'projects/resource-watch-gee/Facebook/PrecipitationAnalysis/GPCC_annual'
ee.data.createAsset({'type': 'ImageCollection'}, image_collection)
# set image collection's privacy to public
acl = {"all_users_can_read": True}
ee.data.setAssetAcl(image_collection, acl)
print('Privacy set to public.')
# list the bands in each image
band_ids = ['b1']
task_id = []
# upload processed data files to GEE
for uri in gcs_uris:
# generate an asset name for the current file by using the filename (minus the file type extension)
asset_name = f'projects/resource-watch-gee/Facebook/PrecipitationAnalysis/GPCC_annual/{os.path.basename(uri)[:-4]}'
# create the band manifest for this asset
mf_bands = [{'id': band_id, 'tileset_band_index': band_ids.index(band_id), 'tileset_id': os.path.basename(uri)[:-4],
'pyramidingPolicy': pyramiding_policy} for band_id in band_ids]
# create complete manifest for asset upload
manifest = util_cloud.gee_manifest_complete(asset_name, uri, mf_bands)
# upload the file from GCS to GEE
task = util_cloud.gee_ingest(manifest)
print(asset_name + ' uploaded to GEE')
task_id.append(task)
# remove files from Google Cloud Storage
util_cloud.gcs_remove(gcs_uris, gcs_bucket=gcsBucket)
logger.info('Files deleted from Google Cloud Storage.')
'''
Data processing on Google Earth Engine
'''
# Initialize earth engine
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
# load image collection
GPCC_annual = ee.ImageCollection("projects/resource-watch-gee/Facebook/PrecipitationAnalysis/GPCC_annual")
# save projection (crs, crsTransform, scale)
projection = GPCC_annual.first().projection().getInfo()
projection_gee = GPCC_annual.first().projection()
crs = projection.get('crs')
crsTransform = projection.get('transform')
scale = GPCC_annual.first().projection().nominalScale().getInfo()
print('crs: ', crs)
print('crsTransform: ', crsTransform)
print('scale: ', scale)
# convert ImageCollection to Image
band_names = ee.List([str(i) for i in np.arange(1891,2020)])
GPCC_annual_img = GPCC_annual.toBands()
GPCC_annual_img = GPCC_annual_img.rename(band_names)
# define countries to calculate statistics for
country_list = ["USA","GBR","FRA","DEU","CAN", "SWE", "BRA", "MEX", "BEL", "IRL", "NLD", "NGA", "SAU", "ZAF", "ESP", "IND", "IDN", "TWN"]
# load country and state shapefiles
countries = ee.FeatureCollection("projects/resource-watch-gee/gadm36_0_simplified")
states = ee.FeatureCollection("projects/resource-watch-gee/gadm36_1_simplified")
for country_ios in country_list:
# national-level analysis
# load country data, filter to desired ISO Codes
country = countries.filterMetadata('GID_0', 'equals', ee.String(country_ios))
# export to Google Drive
output_name='GPCC_annual_country_'+country_ios
output_folder='Facebook'
export_results_task = ee.batch.Export.table.toDrive(
collection = GPCC_annual_img.reduceRegions(country, ee.Reducer.mean(), scale = scale, tileScale = 16).select(ee.List(['GID_0','NAME_0']).cat(band_names), retainGeometry = False),
description = output_name,
fileNamePrefix = output_name,
folder = output_folder)
# start the task
export_results_task.start()
# state-level analysis
# load state data, filter to desired ISO Codes
state = states.filterMetadata('GID_0', 'equals', ee.String(country_ios))
# export to Google Drive
output_name='GPCC_annual_state_'+country_ios
output_folder='Facebook'
export_results_task = ee.batch.Export.table.toDrive(
collection = GPCC_annual_img.reduceRegions(state, ee.Reducer.mean(), scale = scale, tileScale = 16).select(ee.List(['CC_1','ENGTYPE_1','GID_0','GID_1','HASC_1','NAME_0','NAME_1','NL_NAME_1','TYPE_1','VARNAME_1']).cat(band_names), retainGeometry = False),
description = output_name,
fileNamePrefix = output_name,
folder = output_folder)
# start the task
export_results_task.start() | [
"logging.getLogger",
"logging.StreamHandler",
"gzip.open",
"util_cloud.gcs_remove",
"ee.ImageCollection",
"ee.String",
"sys.path.append",
"numpy.arange",
"os.path.exists",
"ee.Authenticate",
"ee.Reducer.mean",
"urllib.request.urlretrieve",
"util_cloud.gee_ingest",
"os.mkdir",
"ee.data.se... | [((395, 414), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (412, 414), False, 'import logging\n'), ((549, 572), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (570, 572), False, 'import logging\n'), ((600, 687), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'}), "(format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (619, 687), False, 'import logging\n'), ((1092, 1106), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (1100, 1106), False, 'import os\n'), ((5148, 5233), 'util_cloud.gcs_upload', 'util_cloud.gcs_upload', (['processed_data_annual', 'dataset_name'], {'gcs_bucket': 'gcsBucket'}), '(processed_data_annual, dataset_name, gcs_bucket=gcsBucket\n )\n', (5169, 5233), False, 'import util_cloud\n'), ((5481, 5500), 'ee.Initialize', 'ee.Initialize', (['auth'], {}), '(auth)\n', (5494, 5500), False, 'import ee\n'), ((5748, 5814), 'ee.data.createAsset', 'ee.data.createAsset', (["{'type': 'ImageCollection'}", 'image_collection'], {}), "({'type': 'ImageCollection'}, image_collection)\n", (5767, 5814), False, 'import ee\n'), ((5894, 5936), 'ee.data.setAssetAcl', 'ee.data.setAssetAcl', (['image_collection', 'acl'], {}), '(image_collection, acl)\n', (5913, 5936), False, 'import ee\n'), ((6873, 6926), 'util_cloud.gcs_remove', 'util_cloud.gcs_remove', (['gcs_uris'], {'gcs_bucket': 'gcsBucket'}), '(gcs_uris, gcs_bucket=gcsBucket)\n', (6894, 6926), False, 'import util_cloud\n'), ((7191, 7288), 'ee.ImageCollection', 'ee.ImageCollection', (['"""projects/resource-watch-gee/Facebook/PrecipitationAnalysis/GPCC_annual"""'], {}), "(\n 'projects/resource-watch-gee/Facebook/PrecipitationAnalysis/GPCC_annual')\n", (7209, 7288), False, 'import ee\n'), ((8079, 8150), 'ee.FeatureCollection', 'ee.FeatureCollection', (['"""projects/resource-watch-gee/gadm36_0_simplified"""'], {}), "('projects/resource-watch-gee/gadm36_0_simplified')\n", (8099, 8150), False, 'import ee\n'), ((8160, 8231), 'ee.FeatureCollection', 'ee.FeatureCollection', (['"""projects/resource-watch-gee/gadm36_1_simplified"""'], {}), "('projects/resource-watch-gee/gadm36_1_simplified')\n", (8180, 8231), False, 'import ee\n'), ((155, 182), 'sys.path.append', 'sys.path.append', (['utils_path'], {}), '(utils_path)\n', (170, 182), False, 'import sys\n'), ((1271, 1295), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (1285, 1295), False, 'import os\n'), ((1301, 1319), 'os.mkdir', 'os.mkdir', (['data_dir'], {}), '(data_dir)\n', (1309, 1319), False, 'import os\n'), ((3418, 3455), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['url', 'file'], {}), '(url, file)\n', (3444, 3455), False, 'import urllib\n'), ((3827, 3874), 'util_files.convert_netcdf', 'util_files.convert_netcdf', (['raw_file', "['precip']"], {}), "(raw_file, ['precip'])\n", (3852, 3874), False, 'import util_files\n'), ((3898, 3941), 'os.path.join', 'os.path.join', (["(raw_file[:-3] + '_precip.tif')"], {}), "(raw_file[:-3] + '_precip.tif')\n", (3910, 3941), False, 'import os\n'), ((4990, 5029), 'os.environ.get', 'os.environ.get', (['"""CLOUDSDK_CORE_PROJECT"""'], {}), "('CLOUDSDK_CORE_PROJECT')\n", (5004, 5029), False, 'import os\n'), ((5060, 5096), 'os.environ.get', 'os.environ.get', (['"""GEE_STAGING_BUCKET"""'], {}), "('GEE_STAGING_BUCKET')\n", (5074, 5096), False, 'import os\n'), ((5402, 5434), 'os.getenv', 'os.getenv', (['"""GEE_SERVICE_ACCOUNT"""'], {}), "('GEE_SERVICE_ACCOUNT')\n", (5411, 5434), False, 'import os\n'), ((5436, 5479), 'os.getenv', 'os.getenv', (['"""GOOGLE_APPLICATION_CREDENTIALS"""'], {}), "('GOOGLE_APPLICATION_CREDENTIALS')\n", (5445, 5479), False, 'import os\n'), ((6622, 6681), 'util_cloud.gee_manifest_complete', 'util_cloud.gee_manifest_complete', (['asset_name', 'uri', 'mf_bands'], {}), '(asset_name, uri, mf_bands)\n', (6654, 6681), False, 'import util_cloud\n'), ((6731, 6762), 'util_cloud.gee_ingest', 'util_cloud.gee_ingest', (['manifest'], {}), '(manifest)\n', (6752, 6762), False, 'import util_cloud\n'), ((7067, 7082), 'ee.Initialize', 'ee.Initialize', ([], {}), '()\n', (7080, 7082), False, 'import ee\n'), ((82, 109), 'os.getenv', 'os.getenv', (['"""PROCESSING_DIR"""'], {}), "('PROCESSING_DIR')\n", (91, 109), False, 'import os\n'), ((1030, 1051), 'os.getenv', 'os.getenv', (['"""BLOG_DIR"""'], {}), "('BLOG_DIR')\n", (1039, 1051), False, 'import os\n'), ((3322, 3343), 'os.path.basename', 'os.path.basename', (['url'], {}), '(url)\n', (3338, 3343), False, 'import os\n'), ((3618, 3639), 'gzip.open', 'gzip.open', (['file', '"""rb"""'], {}), "(file, 'rb')\n", (3627, 3639), False, 'import gzip\n'), ((4301, 4320), 'rasterio.open', 'rasterio.open', (['file'], {}), '(file)\n', (4314, 4320), False, 'import rasterio\n'), ((7110, 7127), 'ee.Authenticate', 'ee.Authenticate', ([], {}), '()\n', (7125, 7127), False, 'import ee\n'), ((7132, 7147), 'ee.Initialize', 'ee.Initialize', ([], {}), '()\n', (7145, 7147), False, 'import ee\n'), ((8407, 8429), 'ee.String', 'ee.String', (['country_ios'], {}), '(country_ios)\n', (8416, 8429), False, 'import ee\n'), ((9078, 9100), 'ee.String', 'ee.String', (['country_ios'], {}), '(country_ios)\n', (9087, 9100), False, 'import ee\n'), ((3710, 3741), 'shutil.copyfileobj', 'shutil.copyfileobj', (['f_in', 'f_out'], {}), '(f_in, f_out)\n', (3728, 3741), False, 'import shutil\n'), ((7729, 7750), 'numpy.arange', 'np.arange', (['(1891)', '(2020)'], {}), '(1891, 2020)\n', (7738, 7750), True, 'import numpy as np\n'), ((6286, 6307), 'os.path.basename', 'os.path.basename', (['uri'], {}), '(uri)\n', (6302, 6307), False, 'import os\n'), ((6454, 6475), 'os.path.basename', 'os.path.basename', (['uri'], {}), '(uri)\n', (6470, 6475), False, 'import os\n'), ((4125, 4144), 'rasterio.open', 'rasterio.open', (['file'], {}), '(file)\n', (4138, 4144), False, 'import rasterio\n'), ((8658, 8675), 'ee.Reducer.mean', 'ee.Reducer.mean', ([], {}), '()\n', (8673, 8675), False, 'import ee\n'), ((8715, 8743), 'ee.List', 'ee.List', (["['GID_0', 'NAME_0']"], {}), "(['GID_0', 'NAME_0'])\n", (8722, 8743), False, 'import ee\n'), ((9325, 9342), 'ee.Reducer.mean', 'ee.Reducer.mean', ([], {}), '()\n', (9340, 9342), False, 'import ee\n'), ((9382, 9500), 'ee.List', 'ee.List', (["['CC_1', 'ENGTYPE_1', 'GID_0', 'GID_1', 'HASC_1', 'NAME_0', 'NAME_1',\n 'NL_NAME_1', 'TYPE_1', 'VARNAME_1']"], {}), "(['CC_1', 'ENGTYPE_1', 'GID_0', 'GID_1', 'HASC_1', 'NAME_0',\n 'NAME_1', 'NL_NAME_1', 'TYPE_1', 'VARNAME_1'])\n", (9389, 9500), False, 'import ee\n')] |
'''
Some info on various layers, so you know what to expect
depending on which layer you choose:
layer 1: wavy
layer 2: lines
layer 3: boxes
layer 4: circles?
layer 6: dogs, bears, cute animals.
layer 7: faces, buildings
layer 8: fish begin to appear, frogs/reptilian eyes.
layer 10: Monkies, lizards, snakes, duck
Choosing various parameters like num_iterations, rescale,
and num_repeats really varies on which layer you're doing.
We could probably come up with some sort of formula. The
deeper the layer is, the more iterations and
repeats you will want.
Layer 3: 20 iterations, 0.5 rescale, and 8 repeats is decent start
Layer 10: 40 iterations and 25 repeats is good.
'''
from deepdreamer import model, load_image, recursive_optimize
import numpy as np
import PIL.Image
import os
layer_tensor = model.layer_tensors[2] # Number from 1 to 10 for diffrent layers as mentioned above
os.system('cls')
for file_name in os.listdir('input'):
if file_name.endswith(".jpg"):
print('Starting ', file_name)
img_result = load_image(filename='{}'.format('input/{}'.format(file_name)))
img_result = recursive_optimize(layer_tensor=layer_tensor, image=img_result,
# how clear is the dream vs original image
num_iterations=20, step_size=1.0, rescale_factor=0.5,
# How many "passes" over the data. More passes, the more granular the gradients will be.
num_repeats=8, blend=0.2)
img_result = np.clip(img_result, 0.0, 255.0)
img_result = img_result.astype(np.uint8)
result = PIL.Image.fromarray(img_result, mode='RGB')
result.save('output/{}'.format(file_name))
print('Finished processing ', file_name)
print('Done')
| [
"deepdreamer.recursive_optimize",
"os.system",
"os.listdir",
"numpy.clip"
] | [((924, 940), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (933, 940), False, 'import os\n'), ((959, 978), 'os.listdir', 'os.listdir', (['"""input"""'], {}), "('input')\n", (969, 978), False, 'import os\n'), ((1164, 1315), 'deepdreamer.recursive_optimize', 'recursive_optimize', ([], {'layer_tensor': 'layer_tensor', 'image': 'img_result', 'num_iterations': '(20)', 'step_size': '(1.0)', 'rescale_factor': '(0.5)', 'num_repeats': '(8)', 'blend': '(0.2)'}), '(layer_tensor=layer_tensor, image=img_result,\n num_iterations=20, step_size=1.0, rescale_factor=0.5, num_repeats=8,\n blend=0.2)\n', (1182, 1315), False, 'from deepdreamer import model, load_image, recursive_optimize\n'), ((1572, 1603), 'numpy.clip', 'np.clip', (['img_result', '(0.0)', '(255.0)'], {}), '(img_result, 0.0, 255.0)\n', (1579, 1603), True, 'import numpy as np\n')] |
import numpy
class Fickett:
'''
calculate Fickett TESTCODE for full sequence
NAR 10(17) 5303-531
modified from source code of CPAT 1.2.1 downloaded from https://sourceforge.net/projects/rna-cpat/files/?source=navbar
'''
def __init__(self):
'''new compiled Fickett look-up table'''
self.position_parameter = [1.9,1.8,1.7,1.6,1.5,1.4,1.3,1.2,1.1,0.0]
self.content_parameter = [0.33,0.31,0.29,0.27,0.25,0.23,0.21,0.19,0.17,0]
self.position_probability = {
"A":[0.51,0.55,0.57,0.52,0.48,0.58,0.57,0.54,0.50,0.36],
"C":[0.29,0.44,0.55,0.49,0.52,0.60,0.60,0.56,0.51,0.38],
"G":[0.62,0.67,0.74,0.65,0.61,0.62,0.52,0.41,0.31,0.17],
"T":[0.51,0.60,0.69,0.64,0.62,0.67,0.58,0.48,0.39,0.24],
}
self.position_weight = {"A":0.062,"C":0.093,"G":0.205,"T":0.154}
self.content_probability = {
"A":[0.40,0.55,0.58,0.58,0.52,0.48,0.45,0.45,0.38,0.19],
"C":[0.50,0.63,0.59,0.50,0.46,0.45,0.47,0.56,0.59,0.33],
"G":[0.21,0.40,0.47,0.50,0.52,0.56,0.57,0.52,0.44,0.23],
"T":[0.30,0.49,0.56,0.53,0.48,0.48,0.52,0.57,0.60,0.51]
}
self.content_weight = {"A":0.084,"C":0.076,"G":0.081,"T":0.055}
def look_up_position_probability(self,value, base):
'''
look up positional probability by base and value
'''
if float(value) < 0:
return None
for idx,val in enumerate (self.position_parameter):
if (float(value) >= val):
return float(self.position_probability[base][idx]) * float(self.position_weight[base])
def look_up_content_probability(self,value, base):
'''
look up content probability by base and value
'''
if float(value) < 0:
return None
for idx,val in enumerate (self.content_parameter):
if (float(value) >= val):
return float(self.content_probability[base][idx]) * float(self.content_weight[base])
def fickett_value(self,dna):
'''
calculate Fickett value from full RNA transcript sequence
'''
if len(dna) < 2:
return 0
fickett_score=0
dna=dna
total_base = len(dna)
A_content = float(dna.count("A"))/total_base
C_content = float(dna.count("C"))/total_base
G_content = float(dna.count("G"))/total_base
T_content = float(dna.count("T"))/total_base
phase_0 = dna[::3]
phase_1 = dna[1::3]
phase_2 = dna[2::3]
phase_0_A = phase_0.count("A")
phase_1_A = phase_1.count("A")
phase_2_A = phase_2.count("A")
phase_0_C = phase_0.count("C")
phase_1_C = phase_1.count("C")
phase_2_C = phase_2.count("C")
phase_0_G = phase_0.count("G")
phase_1_G = phase_1.count("G")
phase_2_G = phase_2.count("G")
phase_0_T = phase_0.count("T")
phase_1_T = phase_1.count("T")
phase_2_T = phase_2.count("T")
A_content = float(phase_0_A + phase_1_A + phase_2_A)/total_base
C_content = float(phase_0_C + phase_1_C + phase_2_C)/total_base
G_content = float(phase_0_G + phase_1_G + phase_2_G)/total_base
T_content = float(phase_0_T + phase_1_T + phase_2_T)/total_base
A_position= numpy.max([phase_0_A,phase_1_A,phase_2_A])/(numpy.min([phase_0_A,phase_1_A,phase_2_A]) +1.0)
C_position= numpy.max([phase_0_C,phase_1_C,phase_2_C])/(numpy.min([phase_0_C,phase_1_C,phase_2_C]) +1.0)
G_position= numpy.max([phase_0_G,phase_1_G,phase_2_G])/(numpy.min([phase_0_G,phase_1_G,phase_2_G]) +1.0)
T_position= numpy.max([phase_0_T,phase_1_T,phase_2_T])/(numpy.min([phase_0_T,phase_1_T,phase_2_T]) +1.0)
fickett_score += self.look_up_content_probability(A_content,"A")
fickett_score += self.look_up_content_probability(C_content,"C")
fickett_score += self.look_up_content_probability(G_content,"G")
fickett_score += self.look_up_content_probability(T_content,"T")
fickett_score += self.look_up_position_probability(A_position,"A")
fickett_score += self.look_up_position_probability(C_position,"C")
fickett_score += self.look_up_position_probability(G_position,"G")
fickett_score += self.look_up_position_probability(T_position,"T")
return fickett_score
| [
"numpy.min",
"numpy.max"
] | [((3374, 3418), 'numpy.max', 'numpy.max', (['[phase_0_A, phase_1_A, phase_2_A]'], {}), '([phase_0_A, phase_1_A, phase_2_A])\n', (3383, 3418), False, 'import numpy\n'), ((3487, 3531), 'numpy.max', 'numpy.max', (['[phase_0_C, phase_1_C, phase_2_C]'], {}), '([phase_0_C, phase_1_C, phase_2_C])\n', (3496, 3531), False, 'import numpy\n'), ((3600, 3644), 'numpy.max', 'numpy.max', (['[phase_0_G, phase_1_G, phase_2_G]'], {}), '([phase_0_G, phase_1_G, phase_2_G])\n', (3609, 3644), False, 'import numpy\n'), ((3713, 3757), 'numpy.max', 'numpy.max', (['[phase_0_T, phase_1_T, phase_2_T]'], {}), '([phase_0_T, phase_1_T, phase_2_T])\n', (3722, 3757), False, 'import numpy\n'), ((3418, 3462), 'numpy.min', 'numpy.min', (['[phase_0_A, phase_1_A, phase_2_A]'], {}), '([phase_0_A, phase_1_A, phase_2_A])\n', (3427, 3462), False, 'import numpy\n'), ((3531, 3575), 'numpy.min', 'numpy.min', (['[phase_0_C, phase_1_C, phase_2_C]'], {}), '([phase_0_C, phase_1_C, phase_2_C])\n', (3540, 3575), False, 'import numpy\n'), ((3644, 3688), 'numpy.min', 'numpy.min', (['[phase_0_G, phase_1_G, phase_2_G]'], {}), '([phase_0_G, phase_1_G, phase_2_G])\n', (3653, 3688), False, 'import numpy\n'), ((3757, 3801), 'numpy.min', 'numpy.min', (['[phase_0_T, phase_1_T, phase_2_T]'], {}), '([phase_0_T, phase_1_T, phase_2_T])\n', (3766, 3801), False, 'import numpy\n')] |
"""
logreg.py
This module contains functions to run and analyse logistic regressions
to predict stimulus information from ROI activity for data generated by the
Allen Institute OpenScope experiments for the Credit Assignment Project.
Authors: <NAME>
Date: October, 2018
Note: this code uses python 3.7.
"""
import os
import copy
import logging
import warnings
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import torch
from analysis import quint_analys
from util import data_util, file_util, gen_util, logger_util, logreg_util, \
math_util, plot_util
from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util
from plot_fcts import logreg_plots
from util import gen_util
logger = logging.getLogger(__name__)
TAB = " "
#### ALWAYS SET TO FALSE - CHANGE ONLY FOR TESTING PURPOSES
TEST_BRICKS_VARIATIONS = False
#############################################
def get_comps(stimtype="gabors", q1v4=False, regvsurp=False):
"""
get_comps()
Returns comparisons that fit the criteria.
Optional args:
- stimtype (str) : stimtype
default: "gabors"
- q1v4 (bool) : if True, analysis is trained on first and tested on
last quintiles
default: False
- regvsurp (bool): if True, analysis is trained on regular and tested
on regular sequences
default: False
Returns:
- comps (list): list of comparisons that fit the criteria
"""
if stimtype == "gabors":
if regvsurp:
raise ValueError("regvsurp can only be used with bricks.")
comps = ["surp", "AvB", "AvC", "BvC", "DvU", "Aori", "Bori", "Cori",
"Dori", "Uori", "DoriU", "DoriA", "BCDoriA", "BCDoriU", "ABCoriD",
"ABCoriU"]
elif stimtype == "bricks":
comps = ["surp", "dir_all", "dir_surp", "dir_reg", "half_right",
"half_left", "half_diff"]
if regvsurp:
comps = gen_util.remove_if(
comps, ["surp", "dir_surp", "dir_all", "half_right",
"half_left", "half_diff"])
if q1v4:
comps = gen_util.remove_if(
comps, ["half_left", "half_right", "half_diff"])
else:
gen_util.accepted_values_error(
"stimtype", stimtype, ["gabors", "bricks"])
return comps
#############################################
def get_class_pars(comp="surp", stimtype="gabors"):
"""
get_class_pars()
Returns name of the class determining variable, and the surprise values to
use for the classes.
Optional args:
- comp (str) : type of comparison
default: "surp"
- stimtype (str) : stimulus type
default: "gabors"
Returns:
- class_var (str) : variable separating classes (e.g., "surps",
"gab_ori", "bri_dir")
- surps (str or list): surprise values (for each class, if list)
"""
if stimtype == "gabors":
if comp == "surp":
class_var = "surps"
surps = [0, 1]
elif comp == "DvU":
class_var = "surps"
surps = [0, 1]
elif "ori" in comp:
class_var = "gab_ori"
gab_letts = [lett.upper() for lett in comp.split("ori")
if len(lett) > 0]
surps = []
for lett in gab_letts:
if ("D" in lett) ^ ("U" in lett): # exclusive or
surp_val = 1 if "U" in lett else 0
surps.append(surp_val)
else:
surps.append("any")
if len(gab_letts) == 1:
surps = surps[0]
elif "dir" in comp:
raise ValueError("dir comparison not valid for gabors.")
else:
class_var = "gabfr"
surps = "any"
elif stimtype == "bricks":
class_var = "bri_dir"
if comp == "dir_all":
surps = "any"
elif comp == "dir_reg":
surps = 0
elif comp == "dir_surp":
surps = 1
elif comp == "surp":
surps = [0, 1]
class_var = "surps"
elif comp in ["half_right", "half_left", "half_diff"]:
surps = "any"
class_var = comp
else:
raise ValueError("Only surp, dir_all, dir_reg, dir_surp, "
"samehalf, diffhalf comparisons supported for Bricks.")
return class_var, surps
#############################################
def get_stimpar(comp="surp", stimtype="gabors", bri_dir="both", bri_size=128,
gabfr=0, gabk=16, gab_ori="all", bri_pre=0.0):
"""
get_stimpar()
Returns a stimulus parameter named tuple based on the stimulus parameters
passed and comparison type.
Optional args:
- comp (str) : type of comparison
default: "surp"
- stimtype (str) : stimulus type
default: "gabors"
- bri_dir (str or list) : brick direction
default: "both"
- bri_size (int or list): brick direction
default: 128
- gabfr (int or list) : gabor frame of reference (may be a list
depending on "comp")
default: 0
- gabk (int or list) : gabor kappa
default: 16
- gab_ori (str) : gabor orientations ("all" or "shared"),
for comp values like DoriU, DoriA, etc.
default: "all"
- bri_pre (int) : pre parameter for Bricks
default: 0.0
Returns:
- stimpar (StimPar) : named tuple containing stimulus parameters
"""
if stimtype == "bricks" and "half" in bri_dir or "dir" in bri_dir:
logger.info("Ignoring brick dir setting.")
if not (len(comp.replace("ori", "").upper()) > 1):
gab_ori = "all"
[bri_dir, bri_size, gabfr,
gabk, gab_ori] = sess_gen_util.get_params(
stimtype, bri_dir, bri_size, gabfr, gabk, gab_ori)
if stimtype == "gabors":
# DO NOT ALLOW OVERLAPPING
if comp == "surp":
stimpar = sess_ntuple_util.init_stimpar(
stimtype, bri_dir, bri_size, gabfr, gabk, gab_ori, 0, 1.5)
elif comp == "DvU":
gabfr = sess_str_util.gabfr_nbrs(comp[0])
stimpar = sess_ntuple_util.init_stimpar(
stimtype, bri_dir, bri_size, gabfr, gabk, gab_ori, 0, 0.45)
elif "ori" in comp:
gab_letts = [lett.upper() for lett in comp.split("ori")
if len(lett) > 0]
act_gabfr = [[sess_str_util.gabfr_nbrs(lett) for lett in letts]
for letts in gab_letts]
if len(act_gabfr) == 1:
pre, post = 0, 0.45
if comp in ["Dori", "Uori"]:
pre, post = 0, 0.6
act_gabfr = act_gabfr[0]
if act_gabfr != gabfr:
logger.info(
f"Setting gabfr to {act_gabfr} instead of {gabfr}.")
else:
pre, post = -0.15, 0.45
gab_ori = sess_gen_util.gab_oris_shared_U(gab_letts, gab_ori)
stimpar = sess_ntuple_util.init_stimpar(
stimtype, bri_dir, bri_size, act_gabfr, gabk, gab_ori,
pre, post)
elif "dir" in comp or "half" in comp:
raise ValueError("dir/half comparison not valid for gabors.")
else:
gabfrs = sess_str_util.gabfr_nbrs([comp[0], comp[2]])
stimpar = sess_ntuple_util.init_stimpar(
stimtype, bri_dir, bri_size, gabfrs, gabk, gab_ori, 0, 0.45)
elif stimtype == "bricks":
# DO NOT ALLOW OVERLAPPING
if "right" in comp:
bri_dir = "right"
elif "left" in comp:
bri_dir = "left"
stimpar = sess_ntuple_util.init_stimpar(
stimtype, bri_dir, bri_size, gabfr, gabk, gab_ori, bri_pre, 1.0)
# for brick logreg test analyses
if TEST_BRICKS_VARIATIONS:
logger.warning("Setting bricks pre/post to 2 for testing purposes.")
stimpar = sess_ntuple_util.init_stimpar(
stimtype, bri_dir, bri_size, gabfr, gabk, gab_ori, 2, 2)
return stimpar
#############################################
def get_rundir(run_val, uniqueid=None, alg="sklearn"):
"""
get_rundir(run_val)
Returns the name of the specific subdirectory in which an analysis is
saved, based on a run number and unique ID.
Required args:
- run_val (int): run number ("pytorch" alg) or
number of run ("sklearn" alg)
Optional args:
- uniqueid (str or int): unique ID for analysis
default: None
- alg (str) : algorithm used to run logistic regression
("sklearn" or "pytorch")
default: "sklearn"
Returns:
- rundir (str): name of subdirectory to save analysis in
"""
if uniqueid is None:
if alg == "sklearn":
rundir = f"{run_val}_runs"
elif alg == "pytorch":
rundir = f"run_{run_val}"
else:
gen_util.accepted_values_error("alg", alg, ["sklearn", "pytorch"])
else:
rundir = f"{uniqueid}_{run_val}"
return rundir
#############################################
def get_compdir_dict(rundir, no_lists=False):
"""
get_compdir_dict(rundir)
Returns a dictionary with analysis parameters based on the full analysis
path.
Required args:
- rundir (str): path of subdirectory in which analysis is saved,
structured as
".../m_s_plane_stim_fluor_scaled_comp_shuffled/
uniqueid_run"
Optional args:
- no_lists (bool): if True, list parameters are replaced with a string,
e.g. "both"
False
Returns:
- compdir_dict (dict): parameter dictionary
- bri_dir (str or list) : Bricks direction parameter ("right",
"left", ["right", "left"] or "none")
- bri_size (int or list): Bricks size parameter (128, 256,
[128, 256] or "none")
- comp (str) : comparison parameter ("surp", "AvB",
"AvC", "BvC" or "DvU", None)
- fluor (str) : fluorescence parameter ("raw" or "dff")
- gabk (int or list) : Gabor kappa parameter (4, 16, [4, 16] or
"none")
- plane (str) : plane ("soma" or "dend")
- mouse_n (int) : mouse number
- sess_n (int) : session number
- scale (bool) : scaling parameter
- run_n (int) : run number
- shuffle (bool) : shuffle parameter
- stimtype (str) : stimulus type ("gabors" or "bricks")
- uniqueid (str) : unique ID (datetime, 6 digit number or
None)
"""
parts = rundir.split(os.sep)
param_str = parts[-2]
run_str = parts[-1]
compdir_dict = sess_gen_util.get_params_from_str(param_str, no_lists)
if "run" in run_str:
compdir_dict["uniqueid"] = None
compdir_dict["run_n"] = int(run_str.split("_")[1])
else:
compdir_dict["uniqueid"] = "_".join(
[str(sub) for sub in run_str.split("_")[:-1]])
compdir_dict["run_n"] = int(run_str.split("_")[-1])
return compdir_dict
#############################################
def get_df_name(task="analyse", stimtype="gabors", comp="surp", ctrl=False,
alg="sklearn"):
"""
get_df_name()
Returns a dictionary with analysis parameters based on the full analysis
path.
Optional args:
- task (str) : type of task for which to get the dataframe
default: "analyse"
- stimtype (str): type of stimulus
default: "gabors"
- comp (str) : type of comparison
default: "surp"
- ctrl (bool) : if True, control comparisons are analysed
default: False
- alg (str) : algorithm used to run logistic regression
("sklearn" or "pytorch")
default: "sklearn"
Returns:
- df_name (str): name of the dataframe
"""
alg_str = ""
if alg == "pytorch":
alg_str = "_pt"
elif alg != "sklearn":
gen_util.accepted_values_error("alg", alg, ["pytorch", "sklearn"])
ctrl_str = sess_str_util.ctrl_par_str(ctrl)
sub_str = f"{stimtype[0:3]}_{comp}{ctrl_str}{alg_str}"
if task == "collate":
df_name = f"{sub_str}_all_scores_df.csv"
elif task == "analyse":
df_name = f"{sub_str}_score_stats_df.csv"
return df_name
#############################################
def info_dict(analyspar=None, sesspar=None, stimpar=None, extrapar=None,
comp="surp", alg="sklearn", n_rois=None, epoch_n=None):
"""
info_dict()
Returns an info dictionary from the parameters. Includes epoch number if it
is passed.
Returns an ordered list of keys instead if any of the dictionaries or
namedtuples are None.
Required args:
- analyspar (AnalysPar): named tuple containing analysis parameters
default: None
- sesspar (SessPar) : named tuple containing session parameters
default: None
- stimpar (StimPar) : named tuple containing stimulus parameters
default: None
- extrapar (dict) : dictionary with extra parameters
default: None
["run_n"] (int) : run number
["shuffle"] (bool): whether data is shuffled
["uniqueid"] (str): uniqueid
Optional args:
- comp (str) : comparison type
default: "surp"
- alg (str) : algorithm used to run logistic regression
("sklearn" or "pytorch")
default: "sklearn"
- n_rois (int) : number of ROIs
default: None
- epoch_n (int): epoch number
default: None
Returns:
if all namedtuples and dictionaries are passed:
- info (dict): analysis dictionary
else if any are None:
- info (list): list of dictionary keys
"""
if not any(par is None for par in [analyspar, sesspar, stimpar, extrapar]):
if stimpar.stimtype == "bricks":
bri_dir = gen_util.list_if_not(stimpar.bri_dir)
if len(bri_dir) == 2:
bri_dir = "both"
else:
bri_dir = bri_dir[0]
else:
bri_dir = stimpar.bri_dir
info = {"mouse_n" : sesspar.mouse_n,
"sess_n" : sesspar.sess_n,
"plane" : sesspar.plane,
"line" : sesspar.line,
"fluor" : analyspar.fluor,
"scale" : analyspar.scale,
"shuffle" : extrapar["shuffle"],
"stimtype": stimpar.stimtype,
"bri_dir" : bri_dir,
"comp" : comp,
"uniqueid": extrapar["uniqueid"],
"runtype" : sesspar.runtype,
"n_rois" : n_rois
}
if alg == "pytorch":
info["run_n"] = extrapar["run_n"]
if epoch_n is not None:
info["epoch_n"] = epoch_n
# if no args are passed, just returns keys
else:
info = ["mouse_n", "sess_n", "plane", "line", "fluor", "scale",
"shuffle", "stimtype", "bri_dir", "comp", "uniqueid", "run_n",
"runtype", "n_rois", "epoch_n"]
return info
#############################################
def save_hyperpar(analyspar, logregpar, sesspar, stimpar, extrapar):
"""
save_hyperpar(analyspar, logregpar, sesspar, stimpar, extrapar)
Saves the hyperparameters for an analysis.
Required args:
- analyspar (AnalysPar): named tuple containing analysis parameters
- logregpar (LogRegPar): named tuple containing logistic regression
parameters
- sesspar (SessPar) : named tuple containing session parameters
- stimpar (StimPar) : named tuple containing stimulus parameters
- extrapar (dict) : dictionary with extra parameters
["dirname"] (str): directory in which to save hyperparameters
Returns:
- hyperpars (dict): hyperparameter dictionary with inputs as keys and
named tuples converted to dictionaries
"""
hyperpars = {"analyspar": analyspar._asdict(),
"logregpar": logregpar._asdict(),
"sesspar" : sesspar._asdict(),
"stimpar" : stimpar._asdict(),
"extrapar" : extrapar
}
file_util.saveinfo(hyperpars, "hyperparameters.json", extrapar["dirname"])
return hyperpars
#############################################
def get_classes(comp="surp", gab_ori="shared"):
"""
get_classes()
Returns names for classes based on the comparison type.
Optional args:
- comp (str) : type of comparison
default: "surp"
- gab_ori (str or list): Gabor orientations
default: "all"
Returns:
- classes (list): list of class names
"""
if gab_ori == "all":
gab_ori = [0, 45, 90, 135]
if comp == "surp":
classes = ["Regular", "Surprise"]
elif comp in ["AvB", "AvC", "BvC", "DvU"]:
classes = [f"Gabor {fr}" for fr in [comp[0], comp[2]]]
elif "ori" in comp:
deg_vals = gab_ori
stripped = comp.replace("ori", "")
if stripped == "U":
deg_vals = [val + 90 for val in deg_vals]
elif len(stripped) == 2:
deg_vals = gab_ori[0]
deg = u"\u00B0"
classes = [f"{val}{deg}" for val in deg_vals]
elif "dir" in comp:
classes = [sess_str_util.dir_par_str(
direc, str_type="print").replace(
"bricks (", "").replace(", ", " (").capitalize()
for direc in ["right", "left"]]
elif "half" in comp:
classes = ["First half", "Second half"]
else:
gen_util.accepted_values_error("comp", comp,
["surp", "AvB", "AvC", "BvC", "DvU", "dir...", "...ori..."])
return classes
#############################################
def get_data(stim, analyspar, stimpar, quintpar, qu_i=0, surp=[0, 1],
n=1, remconsec_surps=False, get_2nd=False):
"""
get_data(sess, quintpar, stimpar)
Returns ROI data based on specified criteria.
Required args:
- stim (Stim) : stimulus object
- analyspar (AnalysPar): named tuple containing analysis parameters
- stimpar (StimPar) : named tuple containing stimulus parameters
- quintpar (QuintPar) : named tuple containing quintile parameters
Optional args:
- qu_i (int) : quintile index
default: 0
- surp (list) : surprise values
default: [0, 1]
- n (int) : factor by which to multiply number of
surprise values
default: 1
- remconsec_surps (bool): whether consecutive segments are removed for
surprise segments
default: False
- get_2nd (bool) : if True, every second segment is retained
default: False
Returns:
- roi_data (3D array): ROI data, as sequences x frames x ROIs
- surp_n (int) : Number of surprise sequences
"""
# data for single quintile
# first number of surprises, then segs
for t, surp_use in enumerate([1, surp]):
remconsec = (remconsec_surps and surp_use == 1)
segs = quint_analys.quint_segs(
stim, stimpar, quintpar.n_quints, qu_i, surp_use,
remconsec=remconsec)[0][0]
# get alternating for consecutive segments
if get_2nd and not remconsec:
segs = gen_util.get_alternating_consec(segs, first=False)
if t == 0:
surp_n = len(segs) * n
twop_fr = stim.get_twop_fr_by_seg(segs, first=True)["first_twop_fr"]
# do not scale (scaling factors cannot be based on test data)
roi_data = gen_util.reshape_df_data(
stim.get_roi_data(twop_fr, stimpar.pre, stimpar.post,
analyspar.fluor, remnans=True, scale=False), squeeze_cols=True)
# for brick logreg test analyses
if TEST_BRICKS_VARIATIONS:
if remconsec_surps:
# Normalize to first half
mid = roi_data.shape[-1] // 2
div = np.median(roi_data[:, :, : mid], axis=-1)
roi_data = roi_data - np.expand_dims(div, -1)
# # Mean only
if TEST_BRICKS_VARIATIONS == "mean":
logger.warning("Using mean across ROIs, for testing purposes.")
# 1 x seqs x frames
roi_data = np.expand_dims(np.nanmean(roi_data, axis=0), axis=0)
# Mean and std
elif TEST_BRICKS_VARIATIONS == "mean_std":
logger.warning("Using mean and standard deviation across ROIs, "
"for testing purposes.")
roi_data = np.stack([np.nanmean(roi_data, axis=0),
np.nanstd(roi_data, axis=0)], axis=0)
# transpose to seqs x frames x ROIs
roi_data = np.transpose(roi_data, [1, 2, 0])
return roi_data, surp_n
#############################################
def get_sess_data(sess, analyspar, stimpar, quintpar, class_var="surps",
surps=[0, 1], regvsurp=False, split_oris=False):
"""
get_sess_data(sess, analyspar, stimpar, quintpar)
Logs session information and returns ROI trace segments, target classes
and class information and number of surprise segments in the dataset.
Required args:
- sess (Session) : session
- analyspar (AnalysPar): named tuple containing analysis parameters
- stimpar (StimPar) : named tuple containing stimulus parameters
- quintpar (QuintPar) : named tuple containing quintile parameters
Optional args:
- class_var (str) : class determining variable ("surps" or
stimpar attribute)
default: "surps"
- surps (list, str, int) : surprise value(s) (list if class_var is
"surps", otherwise 0, 1 or "any")
- regvsurp (bool) : if True, the first dataset will include
regular sequences and the second will
include surprise sequences
default: False
- split_oris (bool or list): List of Gabor frames for each split, or
False if splitting orientation comparison
is not applicable.
default: False
Returns:
- roi_seqs (list) : list of 3D arrays of selected ROI trace seqs
(1 or 2 if an additional test set is included),
each structured as sequences x frames x ROIs
- seq_classes (list): list of 2D arrays of sequence classes
(1 or 2 if an additional test set is included),
each structured as class values x 1
- n_surps (list) : list of lists of number of surprise sequences
(doubled if "half" comparison),
structured as datasets x class
"""
stim = sess.get_stim(stimpar.stimtype)
split_oris = split_oris is not False # set to boolean
if (regvsurp + (len(quintpar.qu_idx) > 1) + ("half" in class_var)
+ split_oris) > 1:
raise ValueError("Cannot combine any of the following: separating "
"quintiles, regvsurp, half comparisons, multiple Gabor frame "
"orientation comparisons.")
elif len(quintpar.qu_idx) > 2:
raise ValueError("Max of 2 quintiles expected.")
elif split_oris and len(stimpar.gabfr) > 2:
raise ValueError("Max of 2 Gabor frame sets expected for orientation "
"classification.")
# check for stimulus pre/post problems
pre_post_err = False
get_2nd, remconsec_surps = False, False
if stimpar.pre > 0:
if stimpar.stimtype == "bricks":
if class_var == "surps":
remconsec_surps = True
elif stimpar.pre == 1:
get_2nd = True
else:
pre_post_err = True
else:
pre_post_err = True
if stimpar.post > 1.0:
if not stimpar.stimtype == "gabors" and stimpar.post <= 1.5:
pre_post_err = True
if pre_post_err:
raise NotImplementedError("Not implemented to prevent sequence overlap "
f"for {stimpar.stimtype}: {stimpar.pre} pre/{stimpar.post} post "
f"for {class_var} classification")
n = 1
if class_var == "surps":
n_cl = len(surps)
elif "half" in class_var:
n_cl = 2
# DOUBLE surp ns to compensate for shorter blocks, if using control
n = 2
if "diff" in class_var:
quintpar = sess_ntuple_util.init_quintpar(
4, [[1, 2]], [None], [None])
if len(np.unique(stim.direcs)) != 2:
raise ValueError(
"Segments do not fit these criteria (missing directions).")
else:
quintpar = sess_ntuple_util.init_quintpar(
2, [[0, 1]], [None], [None])
else:
n_cl = len(stimpar._asdict()[class_var])
# modify surps, qu_idx, gabfr to cycle through datasets
if len(quintpar.qu_idx) == 2:
surps = [surps, surps]
gabfr_idxs = ["ignore", "ignore"]
if regvsurp:
raise ValueError(
"Cannot set regvsurp to True if more than 1 quintile.")
if "part" in class_var:
raise ValueError("Cannot do half comparisons with quintiles.")
elif regvsurp:
surps = [surps, 1-surps]
gabfr_idxs = ["ignore", "ignore"]
quintpar = sess_ntuple_util.init_quintpar(
1, [0, 0], [None, None], [None, None])
elif split_oris:
surps = surps
gabfr_idxs = [0, 1]
quintpar = sess_ntuple_util.init_quintpar(
1, [0, 0], [None, None], [None, None])
else:
surps = [surps]
gabfr_idxs = ["ignore"]
gabfr_idxs = [0, 1] if split_oris else ["ignore", "ignore"]
# cycle through classes
roi_seqs = [[] for _ in range(len(quintpar.qu_idx))]
seq_classes = [[] for _ in range(len(quintpar.qu_idx))]
surp_ns = [[] for _ in range(len(quintpar.qu_idx))]
# cycle through data groups (quint or regvsurp or gabfr for oris)
for d, (qu_i, subsurps, gabfr_idx) in enumerate(
zip(quintpar.qu_idx, surps, gabfr_idxs)):
for cl in range(n_cl):
use_qu_i = [qu_i]
surp = subsurps
stimpar_sp = stimpar
if class_var == "surps":
surp = subsurps[cl]
elif "half" in class_var:
use_qu_i = [qu_i[cl]]
else:
keys = class_var
vals = stimpar._asdict()[class_var][cl]
if split_oris:
keys = [keys, "gabfr", "gab_ori"]
vals = [vals, stimpar.gabfr[gabfr_idx],
stimpar.gab_ori[gabfr_idx][cl]]
# modify stimpar
stimpar_sp = sess_ntuple_util.get_modif_ntuple(
stimpar, keys, vals)
roi_data, surp_n = get_data(
stim, analyspar, stimpar_sp, quintpar, qu_i=use_qu_i,
surp=surp, remconsec_surps=remconsec_surps, n=n,
get_2nd=get_2nd)
roi_seqs[d].append(roi_data)
seq_classes[d].append(np.full(len(roi_data), cl))
surp_ns[d].append(surp_n)
# concatenate data split by class along trial seqs axis
roi_seqs[d] = np.concatenate(roi_seqs[d], axis=0)
seq_classes[d] = np.concatenate(seq_classes[d], axis=0)
# get logistic variance across datasets
log_var = np.log(np.var(np.concatenate(roi_seqs, axis=0)))
n_fr, nrois = roi_seqs[0].shape[1:] # in training set
if stimpar.stimtype == "gabors":
surp_use = surps[0]
if surp_use == [0, 1] and not isinstance(stimpar.gabfr, list):
surp_use = "any"
if split_oris:
gabfr_lett = [sess_str_util.gabfr_letters(
gabfr, surp=surp_use) for gabfr in stimpar.gabfr]
gabfr_lett = " -> ".join([str(lett) for lett in gabfr_lett])
else:
gabfr_lett = sess_str_util.gabfr_letters(
stimpar.gabfr, surp=surp_use)
stim_info = f"\nGab fr: {gabfr_lett}\nGab K: {stimpar.gabk}"
elif stimpar.stimtype == "bricks":
stim_info = (f"\nBri dir: {stimpar.bri_dir}\n"
f"Bri size: {stimpar.bri_size}")
logger.info(f"Runtype: {sess.runtype}\nMouse: {sess.mouse_n}\n"
f"Sess: {sess.sess_n}\nPlane: {sess.plane}\nLine: {sess.line}\n"
f"Fluor: {analyspar.fluor}\nROIs: {nrois}{stim_info}\n"
f"Frames per seg: {n_fr}\nLogvar: {log_var:.2f}",
extra={"spacing": "\n"})
return roi_seqs, seq_classes, surp_ns
#############################################
def sample_seqs(roi_seqs, seq_classes, n_surp):
"""
sample_seqs(roi_seqs, seq_classes, n_surp)
Samples sequences to correspond to the ratio of surprise to regular
sequences.
Required args:
- roi_seqs (3D array) : array of all ROI trace sequences, structured
as: sequences x frames x ROIs
- seq_classes (2D array): array of all sequence classes (0, 1),
structured as class values x 1
- n_surp (int) : number of surprise sequences
Returns:
- roi_seqs (3D array) : array of selected ROI trace sequences,
structured as sequences x frames x ROIs
- seq_classes (2D array): array of sequence classes, structured as
class values x 1
"""
if np.unique(seq_classes).tolist() != [0, 1]:
raise ValueError("Function expects classes 0 and 1 only.")
class0_all = np.where(seq_classes == 0)[0]
class1_all = np.where(seq_classes == 1)[0]
n_reg = (len(class0_all) + len(class1_all))//2 - n_surp
class0_idx = np.random.choice(class0_all, n_reg, replace=False)
class1_idx = np.random.choice(class1_all, n_surp, replace=False)
roi_seqs = np.concatenate(
[roi_seqs[class0_idx], roi_seqs[class1_idx]], axis=0)
seq_classes = np.concatenate(
[seq_classes[class0_idx], seq_classes[class1_idx]], axis=0)
return roi_seqs, seq_classes
#############################################
def save_tr_stats(plot_data, plot_targ, data_names, analyspar, stimpar, n_rois,
alg="sklearn",mod=None, dirname="."):
"""
save_tr_stats(plot_data, plot_targ, data_names, stimpar, n_rois)
Extracts, saves and returns trace statistics in a json in the specified
directory.
Required args:
- plot_data (list) : list of 3D arrays of selected ROI trace seqs
to be plotted, each structured as
sequences x frames x ROIs
- plot_targ (list) : list of 2D arrays of sequence classes to be
plotted, each structured as class values x 1
- data_names (list) : names for each plot_data array
- analyspar (AnalysPar): named tuple containing analysis parameters
- stimpar (SessPar) : named tuple containing stimulus parameters
- n_rois (int) : number of ROIs in data
Optional args:
- alg (str) : algorithm used to run logistic regression
("sklearn" or "pytorch")
default: "sklearn"
- mod (sklearn pipeline): sklearn pipeline (model). Required if alg is
"sklearn"
default: None
- dirname (str) : directory in which to save the traces
default: "."
Returns:
- tr_stats (dict) : dictionary of trace stats data
["n_rois"] (int) : number of ROIs
["train_ns"] (list) : number of segments per class
["train_class_stats"] (3D array) : training statistics, structured
as class x stats (me, err) x
frames
["xran"] (array-like) : x values for frames
optionally, if an additional named set (e.g., "test_Q4") is passed:
["set_ns"] (list) : number of segments per class
["set_class_stats"] (3D array): trace statistics,
structured as
class x stats (me, err) x
frames
"""
if len(data_names) != len(plot_data):
raise ValueError("Not as many 'plot_data' items as 'data_names'.")
tr_stats = {"n_rois": n_rois}
classes = np.unique(plot_targ[0])
for data, targ, name in zip(plot_data, plot_targ, data_names): # get stats
if data is None:
continue
if alg == "sklearn":
# scales, flattens and optionally shuffles data
data = logreg_util.get_transf_data_sk(mod, data, False,
name=="train")
elif alg == "pytorch":
data = data.numpy()
targ = targ.numpy()
else:
gen_util.accepted_values_error("alg", alg, ["sklearn", "pytorch"])
xran, class_stats, ns = logreg_util.get_stats(
data, targ, stimpar.pre, stimpar.post, classes, analyspar.stats,
analyspar.error)
tr_stats["xran"] = xran.tolist()
tr_stats[f"{name}_class_stats"] = class_stats.tolist()
tr_stats[f"{name}_ns"] = ns
file_util.saveinfo(tr_stats, "tr_stats.json", dirname)
return tr_stats
#############################################
@logreg_util.catch_set_problem
def init_logreg_model_pt(roi_seqs, seq_classes, logregpar, extrapar,
scale=True, device="cpu", thresh_cl=2):
"""
init_logreg_model_pt(roi_seqs, seq_classes, logregpar, extrapar)
Initializes and returns the pytorch logreg model and dataloaders.
Required args:
- roi_seqs (list) : list of 3D arrays of selected ROI trace seqs
(1 or 2 if an additional test set is
included), each structured as
sequences x frames x ROIs
- seq_classes (list) : list of 2D arrays of sequence classes (0 or 1)
(1 or 2 if an additional test set is
included), each structured as
class values x 1
- logregpar (LogRegPar) : named tuple containing logistic regression
parameters
- extrapar (dict) : dictionary with extra parameters
["shuffle"] (bool): if True, data is shuffled
Optional args:
- scale (bool) : whether data is scaled by ROI
default: True
- device (str) : device to use
default: "cpu"
- thresh_cl (int) : size threshold for classes in each non empty
set beneath which the indices are reselected
(only if targets are passed). Not checked if
thresh_cl is 0.
default: 2
Returns:
- model (torch.nn.Module) : Neural network module with optimizer
and loss function as attributes
- dls (list of torch DataLoaders): list of torch DataLoaders for
each set. If a set is empty, the
corresponding dls value is None.
- extrapar (dict) : dictionary with extra parameters
["cl_wei"] (list) : list of weights for each class
["loss_name"] (str) : name of the loss function used
["sc_facts"] (list) : min/max value(s) with which to scale
["shuffle"] (bool) : if True, data is shuffled
["shuff_reidx"] (list) : list of indices with which targets were
shuffled
"""
sc_dim = "last" if scale else "none"
if np.unique(seq_classes[0]).tolist() != [0, 1]:
raise NotImplementedError("This Pytorch logreg function is "
"implemented only for classes 0 and 1.")
if len(roi_seqs) > 2:
raise ValueError("Must pass no more than 2 sets of data, but "
f"found {len(roi_seqs)}.")
dl_info = data_util.create_dls(
roi_seqs[0], seq_classes[0], train_p=logregpar.train_p, sc_dim=sc_dim,
sc_type="stand_rob", extrem="perc", shuffle=extrapar["shuffle"],
batchsize=logregpar.batchsize, thresh_cl=thresh_cl)
dls = dl_info[0]
extrapar = copy.deepcopy(extrapar)
if scale:
extrapar["sc_facts"] = dl_info[-1]
if len(roi_seqs) == 2:
class_vals = seq_classes[1]
if extrapar["shuffle"]:
np.random.shuffle(class_vals)
if scale:
roi_seqs[1] = data_util.scale_datasets(
torch.Tensor(roi_seqs[1]), sc_facts=extrapar["sc_facts"])[0]
dls.append(data_util.init_dl(
roi_seqs[1], class_vals, logregpar.batchsize))
if extrapar["shuffle"]:
extrapar["shuff_reidx"] = dl_info[1]
# from train targets
extrapar["cl_wei"] = logreg_util.class_weights(dls[0].dataset.targets)
n_fr, n_rois = roi_seqs[0].shape[1:]
model = logreg_util.LogReg(n_rois, n_fr).to(device)
model.opt = torch.optim.Adam(
model.parameters(), lr=logregpar.lr, weight_decay=logregpar.wd)
model.loss_fn = logreg_util.weighted_BCE(extrapar["cl_wei"])
extrapar["loss_name"] = model.loss_fn.name
return model, dls, extrapar
#############################################
def save_scores(info, scores, key_order=None, dirname=""):
"""
save_scores(args, scores, saved_eps)
Saves run information and scores per epoch as a dataframe.
Required args:
- info (dict) : dictionary of parameters (see info_dict())
- scores (pd DataFrame): dataframe of recorded scores
Optional args:
- key_order (list): ordered list of keys
default: None
- dirname (str) : directory in which to save scores
default: ""
Returns:
- summ_df (pd DataFrame): dataframe with scores and recorded parameters
"""
summ_df = copy.deepcopy(scores)
if key_order is None:
key_order = info.keys()
for key in reversed(key_order):
if key in info.keys():
summ_df.insert(0, key, info[key])
file_util.saveinfo(summ_df, "scores_df.csv", dirname)
return summ_df
#############################################
def setup_run(quintpar, extrapar, techpar, sess_data, comp="surp",
gab_ori="all"):
"""
setup_run(quintpar, extrapar, techpar, sess_data)
Sets up run(s) by setting seed, getting classes and number of ROIs.
Required args:
- quintpar (QuintPar) : named tuple containing quintile parameters
- extrapar (dict) : dictionary with extra parameters
["seed"] (int) : seed to use
["shuffle"] (bool): if analysis is on shuffled data
["uniqueid"] (str): unique ID for the analysis
- techpar (dict) : dictionary with technical parameters
["alg"] (str) : algorithm used ("sklearn" or "pytorch")
["compdir"] (str) : specific output comparison directory
["device"] (str) : device to use (e.g., "cpu" or "cuda")
["fontdir"] (str) : directory in which additional fonts are
located
["output"] (str) : main output directiory
["plt_bkend"] (str): plt backend to use (e.g., "agg" or None)
["reseed"] (bool) : if True, run is reseeded
- sess_data (list): list of session data:
- roi_seqs (list) : list of 3D arrays of selected ROI trace seqs
(1 or 2 if an additional test set is
included), each structured as
sequences x frames x ROIs
- seq_classes (list): list of 2D arrays of sequence classes
(1 or 2 if an additional test set is
included), each structured as
class values x 1
- n_surps (list) : list of lists of number of surprise sequences
(doubled if "half" comparison),
structured as datasets x class
Optional args:
- comp (str) : comparison
default: "surp"
- gab_ori (str or list): Gabor orientations,
for comp values like DoriU, DoriA, etc.
default: "shared"
Returns:
- extrapar (dict) : dictionary with extra parameters
["classes"] (list): list of class names
["n_rois"] (int) : number of ROIs
["seed"] (int) : seed to use
["shuffle"] (bool): if analysis is on shuffled data
["uniqueid"] (str): unique ID for the analysis
- roi_seqs (list) : list of 3D arrays of selected ROI trace seqs
(1 or 2 if an additional test set is included),
each structured as sequences x frames x ROIs
- seq_classes (list): list of 2D arrays of sequence classes
(1 or 2 if an additional test set is included),
each structured as class values x 1
- n_surps (list) : list of lists of number of surprise sequences
(doubled if "half" comparison),
structured as datasets x class
"""
extrapar = copy.deepcopy(extrapar)
if techpar["reseed"]: # reset seed
extrapar["seed"] = None
extrapar["seed"] = gen_util.seed_all(
extrapar["seed"], techpar["device"],
seed_torch=(techpar["alg"] == "pytorch")) # seed torch, if needed
extrapar["classes"] = get_classes(comp, gab_ori)
[roi_seqs, seq_classes, n_surps] = copy.deepcopy(sess_data)
extrapar["n_rois"] = roi_seqs[0].shape[-1]
return extrapar, roi_seqs, seq_classes, n_surps
#############################################
def all_runs_sk(n_runs, analyspar, logregpar, quintpar, sesspar, stimpar,
extrapar, techpar, sess_data):
"""
all_runs_sk(n_runs, analyspar, logregpar, quintpar, sesspar, stimpar,
extrapar, techpar, sess_data)
Does all runs of a logistic regression on the specified comparison
and session data using sklearn. Records hyperparameters, all models,
tr_stats dictionary. Plots scores and training statistics.
Required args:
- n_runs (int) : number of runs to do
- analyspar (AnalysPar): named tuple containing analysis parameters
- logregpar (LogRegPar): named tuple containing logistic regression
parameters
- quintpar (QuintPar) : named tuple containing quintile parameters
- sesspar (SessPar) : named tuple containing session parameters
- stimpar (StimPar) : named tuple containing stimulus parameters
- extrapar (dict) : dictionary with extra parameters
["seed"] (int) : seed to use
["shuffle"] (bool): if analysis is on shuffled data
["uniqueid"] (str): unique ID for the analysis
- techpar (dict) : dictionary with technical parameters
["compdir"] (str) : specific output comparison directory
["device"] (str) : device to use (e.g., "cpu" or "cuda")
["fontdir"] (str) : directory in which additional fonts are
located
["output"] (str) : main output directiory
["plt_bkend"] (str): plt backend to use (e.g., "agg" or None)
["reseed"] (bool) : if True, run is reseeded
- sess_data (list): list of session data:
- roi_seqs (list) : list of 3D arrays of selected ROI trace seqs
(1 or 2 if an additional test set is
included), each structured as
sequences x frames x ROIs
- seq_classes (list): list of 2D arrays of sequence classes
(1 or 2 if an additional test set is
included), each structured as
class values x 1
- n_surps (list) : list of lists of number of surprise sequences
(doubled if "half" comparison),
structured as datasets x class
"""
logregpar = sess_ntuple_util.get_modif_ntuple(
logregpar, ["batchsize", "lr", "wd"], [None, None, None])
if techpar["device"] == "cuda":
warnings.warn("sklearn method not implemented with GPU.")
[extrapar, roi_seqs, seq_classes, n_surps] = setup_run(
quintpar, extrapar, techpar, sess_data, logregpar.comp,
gab_ori=stimpar.gab_ori)
main_data = [roi_seqs[0], seq_classes[0]]
samples = [False for _ in n_surps]
if logregpar.ctrl:
# get n_surp for last class
samples = [n_surp[-1] for n_surp in n_surps]
keep_all = [val in logregpar.comp for val in ["ori", "dir_reg", "half"]]
if sum(keep_all) > 0: # get n_surp for all classes
samples = n_surps
plot_util.manage_mpl(techpar["plt_bkend"], fontdir=techpar["fontdir"])
extrapar = copy.deepcopy(extrapar)
extrapar["n_runs"] = n_runs
rundir = get_rundir(extrapar["n_runs"], extrapar["uniqueid"], logregpar.alg)
extrapar["dirname"] = file_util.createdir([techpar["output"],
techpar["compdir"], rundir])
scale_str = sess_str_util.scale_par_str(analyspar.scale, "print")
shuff_str = sess_str_util.shuff_par_str(extrapar["shuffle"], "labels")
logger.info(f"Runs ({n_runs}): {scale_str}{shuff_str}",
extra={"spacing": "\n"})
split_test = False
returns = logreg_util.run_logreg_cv_sk(
main_data[0], main_data[1], logregpar._asdict(), extrapar,
analyspar.scale, samples[0], split_test, extrapar["seed"],
techpar["parallel"], catch_set_prob=True)
if returns is None:
return
else:
mod_cvs, cv, extrapar = returns
hyperpars = save_hyperpar(analyspar, logregpar, sesspar, stimpar, extrapar)
# Additional data for scoring
set_names = ["train", "test"]
extra_data, extra_cv = None, None
extra_name = sess_str_util.ext_test_str(
logregpar.q1v4, logregpar.regvsurp, logregpar.comp)
if cv._split_test:
set_names.append("test_out")
if len(roi_seqs) == 2:
extra_data = [roi_seqs[1], seq_classes[1]]
extra_cv = logreg_util.StratifiedShuffleSplitMod(
n_splits=cv.n_splits, train_p=0.5, sample=samples[1],
bal=logregpar.bal) # since train_p cannot be 1.0
if extra_name is None:
raise ValueError("Extra test dataset not labelled.")
set_names.append(extra_name)
mod_cvs = logreg_util.test_logreg_cv_sk(
mod_cvs, cv, extrapar["scoring"], main_data=main_data,
extra_data=extra_data, extra_name=extra_name, extra_cv=extra_cv,
catch_set_prob=True)
if mod_cvs is None:
return
# Get and save best model
best_mod_idx = np.argmax(mod_cvs[f"{set_names[-1]}_neg_log_loss"])
best_mod = mod_cvs["estimator"][best_mod_idx]
# Save data used for best model
plot_data, plot_targ, data_names = [], [], []
for name, subcv, data in zip(
["train", extra_name], [cv, extra_cv], [main_data, extra_data]):
if data is None:
continue
if name == "train":
idx = subcv._set_idx[best_mod_idx][0]
else:
idx = [i for sub in subcv._set_idx[best_mod_idx] for i in sub]
plot_data.append(data[0][idx])
plot_targ.append(data[1][idx])
data_names.append(name)
tr_stats = save_tr_stats(
plot_data, plot_targ, data_names, analyspar, stimpar,
extrapar["n_rois"], logregpar.alg, best_mod,
dirname=extrapar["dirname"])
# Get scores dataframe
scores = logreg_util.create_score_df_sk(
mod_cvs, best_mod_idx, set_names, extrapar["scoring"])
info = info_dict(
analyspar, sesspar, stimpar, extrapar, logregpar.comp, logregpar.alg,
n_rois=extrapar["n_rois"])
full_scores = save_scores(
info, scores, key_order=info_dict(), dirname=extrapar["dirname"])
# Save best model
logreg_plots.plot_traces_scores(
hyperpars, tr_stats, full_scores, plot_wei=best_mod_idx)
plt.close("all")
#############################################
def single_run_pt(run_n, analyspar, logregpar, quintpar, sesspar, stimpar,
extrapar, techpar, sess_data):
"""
single_run_pt(run_n, analyspar, logregpar, quintpar, sesspar, stimpar,
extrapar, techpar, sess_data)
Does a single run of a logistic regression using PyTorch on the specified
comparison and session data. Records hyperparameters, best model, last
model, tr_stats dictionary. Plots scores and training statistics.
Required args:
- run_n (int) : run number
- analyspar (AnalysPar): named tuple containing analysis parameters
- logregpar (LogRegPar): named tuple containing logistic regression
parameters
- quintpar (QuintPar) : named tuple containing quintile parameters
- sesspar (SessPar) : named tuple containing session parameters
- stimpar (StimPar) : named tuple containing stimulus parameters
- extrapar (dict) : dictionary with extra parameters
["seed"] (int) : seed to use
["shuffle"] (bool): if analysis is on shuffled data
["uniqueid"] (str): unique ID for the analysis
- techpar (dict) : dictionary with technical parameters
["compdir"] (str) : specific output comparison directory
["device"] (str) : device to use (e.g., "cpu" or "cuda")
["fontdir"] (str) : directory in which additional fonts are
located
["output"] (str) : main output directiory
["plt_bkend"] (str): plt backend to use (e.g., "agg" or None)
["reseed"] (bool) : if True, run is reseeded
- sess_data (list): list of session data:
- roi_seqs (list) : list of 3D arrays of selected ROI trace seqs
(1 or 2 if an additional test set is
included), each structured as
sequences x frames x ROIs
- seq_classes (list): list of 2D arrays of sequence classes
(1 or 2 if an additional test set is
included), each structured as
class values x 1
- n_surps (list) : number of surprise sequences, listed by
quintile, (doubled if "half" comparison)
"""
extrapar = copy.deepcopy(extrapar)
extrapar["seed"] *= run_n + 1 # ensure different seed for each run
[extrapar, roi_seqs, seq_classes, n_surps] = setup_run(
quintpar, extrapar, techpar, sess_data, logregpar.comp,
gab_ori=stimpar.gab_ori)
extrapar["run_n"] = run_n
scale_str = sess_str_util.scale_par_str(analyspar.scale, "print")
shuff_str = sess_str_util.shuff_par_str(extrapar["shuffle"], "labels")
run_n = extrapar["run_n"]
logger.info(f"Run: {run_n}{scale_str}{shuff_str}",
extra={"spacing": "\n"})
for i in range(len(roi_seqs)):
if logregpar.ctrl: # select a random subsample
roi_seqs[i], seq_classes[i] = sample_seqs(
roi_seqs[i], seq_classes[i], n_surps[i][-1])
if logregpar.bal: # balance classes
roi_seqs[i], seq_classes[i] = data_util.bal_classes(
roi_seqs[i], seq_classes[i])
plot_util.manage_mpl(techpar["plt_bkend"], fontdir=techpar["fontdir"])
thresh_cl = 2
if sesspar.runtype == "pilot":
thresh_cl = 1
rundir = get_rundir(extrapar["run_n"], extrapar["uniqueid"], logregpar.alg)
extrapar["dirname"] = file_util.createdir(
[techpar["output"], techpar["compdir"], rundir])
returns = init_logreg_model_pt(
roi_seqs, seq_classes, logregpar, extrapar, analyspar.scale,
techpar["device"], thresh_cl=thresh_cl, catch_set_prob=True)
if returns is None:
return
else:
mod, dls, extrapar = returns
hyperpars = save_hyperpar(analyspar, logregpar, sesspar, stimpar, extrapar)
data_names = ["train"]
extra_name = sess_str_util.ext_test_str(
logregpar.q1v4, logregpar.regvsurp, logregpar.comp)
idx = [0]
if len(roi_seqs) == 2:
idx.append(-1)
if extra_name == "":
raise ValueError("Extra test dataset not labelled.")
data_names.append(extra_name)
plot_data = [dls[i].dataset.data for i in idx]
plot_targ = [dls[i].dataset.targets for i in idx]
tr_stats = save_tr_stats(
plot_data, plot_targ, data_names, analyspar, stimpar,
extrapar["n_rois"], alg=logregpar.alg, dirname=extrapar["dirname"])
info = info_dict(
analyspar, sesspar, stimpar, extrapar, logregpar.comp, logregpar.alg,
n_rois=tr_stats["n_rois"])
scores = logreg_util.fit_model_pt(
info, logregpar.n_epochs, mod, dls, techpar["device"],
extrapar["dirname"], ep_freq=techpar["ep_freq"],
test_dl2_name=extra_name)
logger.info("Run {}: training done.\n".format(extrapar["run_n"]))
# save scores in dataframe
full_scores = save_scores(
info, scores, key_order=info_dict(), dirname=extrapar["dirname"])
# plot traces and scores (only for first 5 runs)
# no plotting for the rest to reduce number of files generated
if run_n <= 5:
logreg_plots.plot_traces_scores(
hyperpars, tr_stats, full_scores, plot_wei=True)
plt.close("all")
#############################################
def run_regr(sess, analyspar, stimpar, logregpar, quintpar, extrapar, techpar):
"""
Does runs of a logistic regressions on the specified comparison on a
session.
Required args:
- sess (Session) : Session on which to run logistic regression
- analyspar (AnalysPar): named tuple containing analysis parameters
- stimpar (StimPar) : named tuple containing stimulus parameters
- logregpar (LogRegPar): named tuple containing logistic regression
analysis parameters
- quintpar (QuintPar) : named tuple containing quintile analysis
parameters
- extrapar (dict) : dictionary containing additional analysis
parameters
["uniqueid"] (str or int): unique ID for analysis
["seed"] (int) : seed to seed random processes with
- techpar (dict) : dictionary containing technical analysi
parameters
["device"] (str) : device name (i.e., "cuda" or "cpu")
["ep_freq"] (int) : frequency at which to log loss to console
["fontdir"] (str) : directory in which additional fonts are
located
["n_reg"] (int) : number of regular runs
["n_shuff"] (int) : number of shuffled runs
["output"] (str) : general directory in which to save output
["parallel"] (bool): if True, runs are done in parallel
["plt_bkend"] (str): pyplot backend to use
["reseed"] (bool) : if True, each run is reseeded
"""
techpar = copy.deepcopy(techpar)
sesspar = sess_ntuple_util.init_sesspar(
sess.sess_n, False, sess.plane,sess.line, runtype=sess.runtype,
mouse_n=sess.mouse_n)
class_var, surps = get_class_pars(logregpar.comp, stimpar.stimtype)
split_oris = sess_str_util.get_split_oris(logregpar.comp)
try:
sess_data = get_sess_data(
sess, analyspar, stimpar, quintpar, class_var, surps,
regvsurp=logregpar.regvsurp, split_oris=split_oris)
except ValueError as err:
catch_phr = ["fit these criteria", "No frames"]
catch = sum(phr in str(err) for phr in catch_phr)
if catch:
warnings.warn(str(err))
return
else:
raise err
for n_runs, shuffle in zip(
[techpar["n_reg"], techpar["n_shuff"]], [False, True]):
if n_runs == 0:
continue
extrapar = copy.deepcopy(extrapar)
extrapar["shuffle"] = shuffle
techpar = copy.deepcopy(techpar)
techpar["compdir"] = sess_gen_util.get_analysdir(
sesspar.mouse_n, sesspar.sess_n, sesspar.plane, analyspar.fluor,
analyspar.scale, stimpar.stimtype, stimpar.bri_dir,
stimpar.bri_size, stimpar.gabk, logregpar.comp, logregpar.ctrl,
extrapar["shuffle"])
if logregpar.alg == "pytorch":
techpar["compdir"] = "{}_pt".format(techpar["compdir"])
if n_runs == 0:
continue
# optionally runs in parallel
args_list = [analyspar, logregpar, quintpar, sesspar, stimpar,
extrapar, techpar, sess_data]
gen_util.parallel_wrap(
single_run_pt, range(n_runs), args_list,
parallel=techpar["parallel"])
elif logregpar.alg == "sklearn":
all_runs_sk(n_runs, analyspar, logregpar, quintpar, sesspar,
stimpar, extrapar, techpar, sess_data)
else:
gen_util.accepted_values_error("logregpar.alg", logregpar.alg,
["pytorch", "sklearn"])
#############################################
def collate_scores(direc, all_labels, alg="sklearn"):
"""
collate_scores(direc, all_labels)
Collects the analysis information and scores from the last epoch recorded
for a run and returns in dataframe.
Required args:
- direc (str) : path to the specific comparison run folder
- all_labels (list): ordered list of columns to save to dataframe
Optional args:
- alg (str): algorithm used to run logistic regression
("sklearn" or "pytorch")
default: "sklearn"
Return:
- scores (pd DataFrame): Dataframe containing run analysis information
and scores from the last epoch recorded.
"""
logger.info(direc)
scores = pd.DataFrame(columns=all_labels)
ep_info, hyperpars = logreg_util.get_scores(direc, alg)
if ep_info is None:
comp_dict = get_compdir_dict(direc, no_lists=True)
comp_dict["scale"] = hyperpars["analyspar"]["scale"]
comp_dict["runtype"] = hyperpars["sesspar"]["runtype"]
comp_dict["line"] = hyperpars["sesspar"]["line"]
comp_dict["n_rois"] = hyperpars["extrapar"]["n_rois"]
for col in all_labels: # ensures correct order
if col in comp_dict.keys():
scores.loc[0, col] = comp_dict[col]
else:
for col in all_labels:
if col in ep_info.columns:
if alg == "pytorch":
scores.loc[0, col] = ep_info[col].item()
elif alg == "sklearn":
scores[col] = ep_info[col]
return scores
#############################################
def remove_overlap_comp_dir(gen_dirs, stimtype="gabors", comp="surp"):
"""
remove_overlap_comp_dir(gen_dirs)
Returns list of directories with those corresponding to comparisons with
overlapping names removed.
Required args:
- gen_dirs (list): list of directories
Optional args:
- stimtype (str) : stimulus type
default: "gabors"
- comp (str) : type of comparison
default: "surp"
"""
all_comps = get_comps(stimtype)
for other_comp in all_comps:
if comp != other_comp and comp in other_comp:
gen_dirs = [gen_dir for gen_dir in gen_dirs
if other_comp not in gen_dir]
return gen_dirs
#############################################
def adjust_duplicate_runs(all_scores):
"""
adjust_duplicate_runs(all_scores)
Adjust the run numbers for duplicate runs so that they are sequential and
not repeating.
Required args:
- all_scores (pd DataFrame): dataframe compiling all the scores for
logistic regression runs
Returns:
- all_scores (pd DataFrame): dataframe compiling all the scores for
logistic regression runs, with run numbers
adjusted if needed to be sequential and
non repeating.
"""
grouping_keys = list(filter(
lambda x: x not in ["uniqueid", "run_n", "epoch_n"], info_dict()))
# runs are expected to occur sequentially without gaps in the dataframe
# for any grouping.
for _, group in all_scores.groupby(grouping_keys):
breaks = group.loc[group["run_n"].diff() < 0].index
for break_pt in breaks:
current_max = group.loc[0 : break_pt, "run_n"].max()
group.loc[break_pt :, "run_n"] += current_max + 1
if len(breaks):
all_scores.loc[group.index, "run_n"] = group["run_n"]
return all_scores
#############################################
def run_collate(output, stimtype="gabors", comp="surp", ctrl=False,
alg="sklearn", parallel=False):
"""
run_collate(output)
Collects the analysis information and scores from the last epochs recorded
for all runs for a comparison type, and saves to a dataframe.
Overwrites any existing dataframe of collated data.
Required args:
- output (str): general directory in which summary dataframe is saved
Optional args:
- stimtype (str) : stimulus type
default: "gabors"
- comp (str) : type of comparison
default: "surp"
- ctrl (bool) : if True, control comparisons are analysed
default: False
- alg (str) : algorithm used to run logistic regression
("sklearn" or "pytorch")
default: "sklearn"
- parallel (bool): if True, run information is collected in parallel
default: False
Returns:
- all_scores (pd DataFrame): dataframe compiling all the scores for
logistic regression runs in the output
folder that correspond to the stimulus
type and comparison type criteria
"""
if not os.path.isdir(output):
logger.info(f"{output} does not exist.")
return
ext_test = sess_str_util.ext_test_str(
("q1v4" in output), ("rvs" in output), comp)
if ext_test == "":
ext_test = None
ctrl_str = sess_str_util.ctrl_par_str(ctrl)
gen_dirs = file_util.getfiles(
output, "subdirs", [stimtype[0:3], comp, ctrl_str])
if alg == "sklearn":
gen_dirs = [gen_dir for gen_dir in gen_dirs if "_pt" not in gen_dir]
elif alg == "pytorch":
gen_dirs = [gen_dir for gen_dir in gen_dirs if "_pt" in gen_dir]
else:
gen_util.accepted_values_error("alg", alg, ["sklearn", "pytorch"])
gen_dirs = remove_overlap_comp_dir(gen_dirs, stimtype, comp)
if not ctrl:
gen_dirs = [gen_dir for gen_dir in gen_dirs
if "ctrl" not in gen_dir]
if len(gen_dirs) == 0:
logger.info("No runs found.")
return
run_dirs = [run_dir for gen_dir in gen_dirs
for run_dir in file_util.getfiles(gen_dir, "subdirs")]
# identify, remove and flag empty directories
empty_dirs = [run_dir for run_dir in run_dirs
if len(os.listdir(run_dir)) == 0]
if len(empty_dirs) != 0:
logger.info("EMPTY DIRECTORIES:")
for empty_dir in empty_dirs:
run_dirs.remove(empty_dir)
logger.info(f"{empty_dir}", extra={"spacing": TAB})
all_labels = info_dict() + \
logreg_util.get_sc_labs(True, ext_test_name=ext_test) + ["saved"]
scores_list = gen_util.parallel_wrap(
collate_scores, run_dirs, [all_labels, alg], parallel=parallel)
# check for repeated run numbers
if len(scores_list) != 0:
all_scores = pd.concat(scores_list)
all_scores = all_scores[all_labels] # reorder
else:
all_scores = pd.DataFrame(columns=all_labels)
# check for and adjust duplicate run numbers
all_scores = all_scores.reset_index(drop=True)
all_scores = adjust_duplicate_runs(all_scores)
# sort df by mouse, session, plane, line, fluor, scale, shuffle, stimtype,
# comp, uniqueid, run_n, runtype
sorter = info_dict()[0:13]
all_scores = all_scores.sort_values(by=sorter).reset_index(drop=True)
savename = get_df_name("collate", stimtype, comp, ctrl, alg)
file_util.saveinfo(all_scores, savename, output, overwrite=True)
return all_scores
#############################################
def calc_stats(scores_summ, curr_lines, curr_idx, CI=0.95, ext_test=None,
stats="mean", shuffle=False):
"""
calc_stats(scores_summ, curr_lines, curr_idx)
Calculates statistics on scores from runs with specific analysis criteria
and records them in the summary scores dataframe.
Required args:
- scores_summ (pd DataFrame): DataFrame containing scores summary
- curr_lines (pd DataFrame) : DataFrame lines corresponding to specific
analysis criteria
- curr_idx (int) : Current row in the scores summary
DataFrame
Optional args:
- CI (num) : Confidence interval around which to collect
percentile values
default: 0.95
- extra_test (str): Name of extra test set, if any (None if none)
default: None
- stats (str) : stats to take, i.e., "mean" or "median"
default: "mean"
- shuffle (bool) : If True, data is for shuffled, and will be averaged
across runs before taking stats
default: False
Returns:
- scores_summ (pd DataFrame): Updated DataFrame containing scores, as
well as epoch_n, runs_total, runs_nan
summaries
"""
scores_summ = copy.deepcopy(scores_summ)
# score labels to perform statistics on
sc_labs = ["epoch_n"] + logreg_util.get_sc_labs(
True, ext_test_name=ext_test)
# avoids accidental nuisance dropping by pandas
curr_lines["epoch_n"] = curr_lines["epoch_n"].astype(float)
if shuffle: # group runs and take mean or median across
scores_summ.loc[curr_idx, "mouse_n"] = -1
keep_lines = \
[col for col in curr_lines.columns if col in sc_labs] + ["run_n"]
grped_lines = curr_lines[keep_lines].groupby("run_n", as_index=False)
if stats == "mean":
curr_lines = grped_lines.mean() # automatically skips NaNs
elif stats == "median":
curr_lines = grped_lines.median() # automatically skips NaNs
else:
gen_util.accepted_values_error("stats", stats, ["mean", "median"])
# calculate n_runs (without nans and with)
scores_summ.loc[curr_idx, "runs_total"] = len(curr_lines)
scores_summ.loc[curr_idx, "runs_nan"] = curr_lines["epoch_n"].isna().sum()
# percentiles to record
ps, p_names = math_util.get_percentiles(CI)
for sc_lab in sc_labs:
if sc_lab in curr_lines.keys():
cols = []
vals = []
data = curr_lines[sc_lab].astype(float)
for stat in ["mean", "median"]:
cols.extend([stat])
vals.extend(
[math_util.mean_med(data, stats=stat, nanpol="omit")])
for error in ["std", "sem"]:
cols.extend([error])
vals.extend([math_util.error_stat(
data, stats="mean", error=error, nanpol="omit")])
# get 25th and 75th quartiles
cols.extend(["q25", "q75"])
vals.extend(math_util.error_stat(
data, stats="median", error="std", nanpol="omit"))
# get other percentiles (for CI)
cols.extend(p_names)
vals.extend(math_util.error_stat(
data, stats="median", error="std", nanpol="omit", qu=ps))
# get MAD
cols.extend(["mad"])
vals.extend([math_util.error_stat(
data, stats="median", error="sem", nanpol="omit")])
# plug in values
cols = [f"{sc_lab}_{name}" for name in cols]
gen_util.set_df_vals(scores_summ, curr_idx, cols, vals)
return scores_summ
#############################################
def run_analysis(output, stimtype="gabors", comp="surp", ctrl=False,
CI=0.95, alg="sklearn", parallel=False, all_scores_df=None):
"""
run_analysis(output)
Calculates statistics on scores from runs for each specific analysis
criteria and saves them in the summary scores dataframe.
Overwrites any existing dataframe of analysed data.
Required args:
- output (str): general directory in which summary dataframe is saved.
Optional args:
- stimtype (str) : stimulus type
default: "gabors"
- comp (str) : type of comparison
default: "surp"
- ctrl (bool) : if True, control comparisons are analysed
default: False
- CI (num) : CI for shuffled data
default: 0.95
- alg (str) : algorithm used to run logistic regression
("sklearn" or "pytorch")
default: "sklearn"
- parallel (bool) : if True, run information is collected in
parallel
default: False
- all_scores_df (pd df): already collated scores dataframe
default: None
Returns:
- scores_summ (pd DataFrame): dataframe with analysed scores
"""
if all_scores_df is None:
all_scores_df = run_collate(output, stimtype, comp, ctrl, alg, parallel)
stats = "mean" # across runs for shuffle CIs
if all_scores_df is None:
return
scores_summ = pd.DataFrame()
ext_test = sess_str_util.ext_test_str(
("q1v4" in output), ("rvs" in output), comp)
if ext_test == "":
ext_test = None
# common labels
comm_labs = gen_util.remove_if(info_dict(),
["uniqueid", "run_n", "epoch_n"])
# get all unique comb of labels
for acr_shuff in [False, True]:
if not acr_shuff:
df_unique = all_scores_df[comm_labs].drop_duplicates()
else:
df_unique = all_scores_df[gen_util.remove_if(comm_labs,
["mouse_n", "n_rois"])].drop_duplicates()
for _, df_row in df_unique.iterrows():
if acr_shuff and not df_row["shuffle"]:
# second pass, only shuffle
continue
vals = [df_row[x] for x in comm_labs]
curr_lines = gen_util.get_df_vals(all_scores_df, comm_labs, vals)
# assign values to current line in summary df
curr_idx = len(scores_summ)
gen_util.set_df_vals(scores_summ, curr_idx, comm_labs, vals)
# calculate stats
scores_summ = calc_stats(scores_summ, curr_lines, curr_idx, CI,
ext_test, stats=stats, shuffle=acr_shuff)
savename = get_df_name("analyse", stimtype, comp, ctrl, alg)
file_util.saveinfo(scores_summ, savename, output, overwrite=True)
return scores_summ
#############################################
def run_plot(output, stimtype="gabors", comp="surp", ctrl=False, bri_dir="both",
fluor="dff", scale=True, CI=0.95, alg="sklearn", plt_bkend=None,
fontdir=None, modif=False):
"""
run_plot(output)
Plots summary data for a specific comparison, for each datatype in a
separate figure and saves figures.
Required args:
- output (str): general directory in which summary dataframe is saved
Optional args:
- stimtype (str) : stimulus type
default: "gabors"
- comp (str) : type of comparison
default: "surp"
- ctrl (bool) : if True, control comparisons are analysed
default: False
- bri_dir (str) : brick direction
default: "both"
- fluor (str) : fluorescence trace type
default: "dff"
- scale (bool) : whether data is scaled by ROI
default: True
- CI (num) : CI for shuffled data
default: 0.95
- alg (str) : algorithm used to run logistic regression
("sklearn" or "pytorch")
default: "sklearn"
- plt_bkend (str): pyplot backend to use
default: None
- fontdir (str) : directory in which additional fonts are located
default: None
- modif (bool) : if True, plots are made in a modified (simplified
way)
default: False
"""
if comp in ["half_right", "half_left"]:
bri_dir = comp.replace("half_", "")
savename = get_df_name("analyse", stimtype, comp, ctrl, alg)
logreg_plots.plot_summ(output, savename, stimtype, comp, ctrl, bri_dir,
fluor, scale, CI, plt_bkend, fontdir, modif)
| [
"logging.getLogger",
"sess_util.sess_ntuple_util.init_quintpar",
"util.gen_util.parallel_wrap",
"util.file_util.createdir",
"plot_fcts.logreg_plots.plot_traces_scores",
"util.logreg_util.fit_model_pt",
"util.logreg_util.get_transf_data_sk",
"util.gen_util.list_if_not",
"util.gen_util.get_df_vals",
... | [((736, 763), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (753, 763), False, 'import logging\n'), ((6334, 6409), 'sess_util.sess_gen_util.get_params', 'sess_gen_util.get_params', (['stimtype', 'bri_dir', 'bri_size', 'gabfr', 'gabk', 'gab_ori'], {}), '(stimtype, bri_dir, bri_size, gabfr, gabk, gab_ori)\n', (6358, 6409), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((11794, 11848), 'sess_util.sess_gen_util.get_params_from_str', 'sess_gen_util.get_params_from_str', (['param_str', 'no_lists'], {}), '(param_str, no_lists)\n', (11827, 11848), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((13297, 13329), 'sess_util.sess_str_util.ctrl_par_str', 'sess_str_util.ctrl_par_str', (['ctrl'], {}), '(ctrl)\n', (13323, 13329), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((17797, 17871), 'util.file_util.saveinfo', 'file_util.saveinfo', (['hyperpars', '"""hyperparameters.json"""', "extrapar['dirname']"], {}), "(hyperpars, 'hyperparameters.json', extrapar['dirname'])\n", (17815, 17871), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((22648, 22681), 'numpy.transpose', 'np.transpose', (['roi_data', '[1, 2, 0]'], {}), '(roi_data, [1, 2, 0])\n', (22660, 22681), True, 'import numpy as np\n'), ((32023, 32073), 'numpy.random.choice', 'np.random.choice', (['class0_all', 'n_reg'], {'replace': '(False)'}), '(class0_all, n_reg, replace=False)\n', (32039, 32073), True, 'import numpy as np\n'), ((32091, 32142), 'numpy.random.choice', 'np.random.choice', (['class1_all', 'n_surp'], {'replace': '(False)'}), '(class1_all, n_surp, replace=False)\n', (32107, 32142), True, 'import numpy as np\n'), ((32163, 32231), 'numpy.concatenate', 'np.concatenate', (['[roi_seqs[class0_idx], roi_seqs[class1_idx]]'], {'axis': '(0)'}), '([roi_seqs[class0_idx], roi_seqs[class1_idx]], axis=0)\n', (32177, 32231), True, 'import numpy as np\n'), ((32260, 32334), 'numpy.concatenate', 'np.concatenate', (['[seq_classes[class0_idx], seq_classes[class1_idx]]'], {'axis': '(0)'}), '([seq_classes[class0_idx], seq_classes[class1_idx]], axis=0)\n', (32274, 32334), True, 'import numpy as np\n'), ((35005, 35028), 'numpy.unique', 'np.unique', (['plot_targ[0]'], {}), '(plot_targ[0])\n', (35014, 35028), True, 'import numpy as np\n'), ((35839, 35893), 'util.file_util.saveinfo', 'file_util.saveinfo', (['tr_stats', '"""tr_stats.json"""', 'dirname'], {}), "(tr_stats, 'tr_stats.json', dirname)\n", (35857, 35893), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((38920, 39137), 'util.data_util.create_dls', 'data_util.create_dls', (['roi_seqs[0]', 'seq_classes[0]'], {'train_p': 'logregpar.train_p', 'sc_dim': 'sc_dim', 'sc_type': '"""stand_rob"""', 'extrem': '"""perc"""', 'shuffle': "extrapar['shuffle']", 'batchsize': 'logregpar.batchsize', 'thresh_cl': 'thresh_cl'}), "(roi_seqs[0], seq_classes[0], train_p=logregpar.train_p,\n sc_dim=sc_dim, sc_type='stand_rob', extrem='perc', shuffle=extrapar[\n 'shuffle'], batchsize=logregpar.batchsize, thresh_cl=thresh_cl)\n", (38940, 39137), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((39193, 39216), 'copy.deepcopy', 'copy.deepcopy', (['extrapar'], {}), '(extrapar)\n', (39206, 39216), False, 'import copy\n'), ((39786, 39835), 'util.logreg_util.class_weights', 'logreg_util.class_weights', (['dls[0].dataset.targets'], {}), '(dls[0].dataset.targets)\n', (39811, 39835), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((40074, 40118), 'util.logreg_util.weighted_BCE', 'logreg_util.weighted_BCE', (["extrapar['cl_wei']"], {}), "(extrapar['cl_wei'])\n", (40098, 40118), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((40923, 40944), 'copy.deepcopy', 'copy.deepcopy', (['scores'], {}), '(scores)\n', (40936, 40944), False, 'import copy\n'), ((41123, 41176), 'util.file_util.saveinfo', 'file_util.saveinfo', (['summ_df', '"""scores_df.csv"""', 'dirname'], {}), "(summ_df, 'scores_df.csv', dirname)\n", (41141, 41176), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((44507, 44530), 'copy.deepcopy', 'copy.deepcopy', (['extrapar'], {}), '(extrapar)\n', (44520, 44530), False, 'import copy\n'), ((44634, 44733), 'util.gen_util.seed_all', 'gen_util.seed_all', (["extrapar['seed']", "techpar['device']"], {'seed_torch': "(techpar['alg'] == 'pytorch')"}), "(extrapar['seed'], techpar['device'], seed_torch=techpar[\n 'alg'] == 'pytorch')\n", (44651, 44733), False, 'from util import gen_util\n'), ((44870, 44894), 'copy.deepcopy', 'copy.deepcopy', (['sess_data'], {}), '(sess_data)\n', (44883, 44894), False, 'import copy\n'), ((47587, 47683), 'sess_util.sess_ntuple_util.get_modif_ntuple', 'sess_ntuple_util.get_modif_ntuple', (['logregpar', "['batchsize', 'lr', 'wd']", '[None, None, None]'], {}), "(logregpar, ['batchsize', 'lr', 'wd'], [\n None, None, None])\n", (47620, 47683), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((48324, 48394), 'util.plot_util.manage_mpl', 'plot_util.manage_mpl', (["techpar['plt_bkend']"], {'fontdir': "techpar['fontdir']"}), "(techpar['plt_bkend'], fontdir=techpar['fontdir'])\n", (48344, 48394), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((48411, 48434), 'copy.deepcopy', 'copy.deepcopy', (['extrapar'], {}), '(extrapar)\n', (48424, 48434), False, 'import copy\n'), ((48574, 48642), 'util.file_util.createdir', 'file_util.createdir', (["[techpar['output'], techpar['compdir'], rundir]"], {}), "([techpar['output'], techpar['compdir'], rundir])\n", (48593, 48642), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((48673, 48726), 'sess_util.sess_str_util.scale_par_str', 'sess_str_util.scale_par_str', (['analyspar.scale', '"""print"""'], {}), "(analyspar.scale, 'print')\n", (48700, 48726), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((48743, 48801), 'sess_util.sess_str_util.shuff_par_str', 'sess_str_util.shuff_par_str', (["extrapar['shuffle']", '"""labels"""'], {}), "(extrapar['shuffle'], 'labels')\n", (48770, 48801), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((49447, 49525), 'sess_util.sess_str_util.ext_test_str', 'sess_str_util.ext_test_str', (['logregpar.q1v4', 'logregpar.regvsurp', 'logregpar.comp'], {}), '(logregpar.q1v4, logregpar.regvsurp, logregpar.comp)\n', (49473, 49525), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((50007, 50187), 'util.logreg_util.test_logreg_cv_sk', 'logreg_util.test_logreg_cv_sk', (['mod_cvs', 'cv', "extrapar['scoring']"], {'main_data': 'main_data', 'extra_data': 'extra_data', 'extra_name': 'extra_name', 'extra_cv': 'extra_cv', 'catch_set_prob': '(True)'}), "(mod_cvs, cv, extrapar['scoring'], main_data=\n main_data, extra_data=extra_data, extra_name=extra_name, extra_cv=\n extra_cv, catch_set_prob=True)\n", (50036, 50187), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((50294, 50345), 'numpy.argmax', 'np.argmax', (["mod_cvs[f'{set_names[-1]}_neg_log_loss']"], {}), "(mod_cvs[f'{set_names[-1]}_neg_log_loss'])\n", (50303, 50345), True, 'import numpy as np\n'), ((51145, 51235), 'util.logreg_util.create_score_df_sk', 'logreg_util.create_score_df_sk', (['mod_cvs', 'best_mod_idx', 'set_names', "extrapar['scoring']"], {}), "(mod_cvs, best_mod_idx, set_names, extrapar[\n 'scoring'])\n", (51175, 51235), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((51508, 51601), 'plot_fcts.logreg_plots.plot_traces_scores', 'logreg_plots.plot_traces_scores', (['hyperpars', 'tr_stats', 'full_scores'], {'plot_wei': 'best_mod_idx'}), '(hyperpars, tr_stats, full_scores, plot_wei=\n best_mod_idx)\n', (51539, 51601), False, 'from plot_fcts import logreg_plots\n'), ((51611, 51627), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (51620, 51627), True, 'from matplotlib import pyplot as plt\n'), ((54169, 54192), 'copy.deepcopy', 'copy.deepcopy', (['extrapar'], {}), '(extrapar)\n', (54182, 54192), False, 'import copy\n'), ((54470, 54523), 'sess_util.sess_str_util.scale_par_str', 'sess_str_util.scale_par_str', (['analyspar.scale', '"""print"""'], {}), "(analyspar.scale, 'print')\n", (54497, 54523), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((54540, 54598), 'sess_util.sess_str_util.shuff_par_str', 'sess_str_util.shuff_par_str', (["extrapar['shuffle']", '"""labels"""'], {}), "(extrapar['shuffle'], 'labels')\n", (54567, 54598), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((55086, 55156), 'util.plot_util.manage_mpl', 'plot_util.manage_mpl', (["techpar['plt_bkend']"], {'fontdir': "techpar['fontdir']"}), "(techpar['plt_bkend'], fontdir=techpar['fontdir'])\n", (55106, 55156), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((55344, 55412), 'util.file_util.createdir', 'file_util.createdir', (["[techpar['output'], techpar['compdir'], rundir]"], {}), "([techpar['output'], techpar['compdir'], rundir])\n", (55363, 55412), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((55810, 55888), 'sess_util.sess_str_util.ext_test_str', 'sess_str_util.ext_test_str', (['logregpar.q1v4', 'logregpar.regvsurp', 'logregpar.comp'], {}), '(logregpar.q1v4, logregpar.regvsurp, logregpar.comp)\n', (55836, 55888), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((56519, 56682), 'util.logreg_util.fit_model_pt', 'logreg_util.fit_model_pt', (['info', 'logregpar.n_epochs', 'mod', 'dls', "techpar['device']", "extrapar['dirname']"], {'ep_freq': "techpar['ep_freq']", 'test_dl2_name': 'extra_name'}), "(info, logregpar.n_epochs, mod, dls, techpar[\n 'device'], extrapar['dirname'], ep_freq=techpar['ep_freq'],\n test_dl2_name=extra_name)\n", (56543, 56682), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((57156, 57172), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (57165, 57172), True, 'from matplotlib import pyplot as plt\n'), ((58924, 58946), 'copy.deepcopy', 'copy.deepcopy', (['techpar'], {}), '(techpar)\n', (58937, 58946), False, 'import copy\n'), ((58961, 59081), 'sess_util.sess_ntuple_util.init_sesspar', 'sess_ntuple_util.init_sesspar', (['sess.sess_n', '(False)', 'sess.plane', 'sess.line'], {'runtype': 'sess.runtype', 'mouse_n': 'sess.mouse_n'}), '(sess.sess_n, False, sess.plane, sess.line,\n runtype=sess.runtype, mouse_n=sess.mouse_n)\n', (58990, 59081), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((59185, 59229), 'sess_util.sess_str_util.get_split_oris', 'sess_str_util.get_split_oris', (['logregpar.comp'], {}), '(logregpar.comp)\n', (59213, 59229), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((61831, 61863), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'all_labels'}), '(columns=all_labels)\n', (61843, 61863), True, 'import pandas as pd\n'), ((61890, 61924), 'util.logreg_util.get_scores', 'logreg_util.get_scores', (['direc', 'alg'], {}), '(direc, alg)\n', (61912, 61924), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((66317, 66384), 'sess_util.sess_str_util.ext_test_str', 'sess_str_util.ext_test_str', (["('q1v4' in output)", "('rvs' in output)", 'comp'], {}), "('q1v4' in output, 'rvs' in output, comp)\n", (66343, 66384), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((66461, 66493), 'sess_util.sess_str_util.ctrl_par_str', 'sess_str_util.ctrl_par_str', (['ctrl'], {}), '(ctrl)\n', (66487, 66493), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((66509, 66579), 'util.file_util.getfiles', 'file_util.getfiles', (['output', '"""subdirs"""', '[stimtype[0:3], comp, ctrl_str]'], {}), "(output, 'subdirs', [stimtype[0:3], comp, ctrl_str])\n", (66527, 66579), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((67757, 67847), 'util.gen_util.parallel_wrap', 'gen_util.parallel_wrap', (['collate_scores', 'run_dirs', '[all_labels, alg]'], {'parallel': 'parallel'}), '(collate_scores, run_dirs, [all_labels, alg],\n parallel=parallel)\n', (67779, 67847), False, 'from util import gen_util\n'), ((68531, 68595), 'util.file_util.saveinfo', 'file_util.saveinfo', (['all_scores', 'savename', 'output'], {'overwrite': '(True)'}), '(all_scores, savename, output, overwrite=True)\n', (68549, 68595), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((70177, 70203), 'copy.deepcopy', 'copy.deepcopy', (['scores_summ'], {}), '(scores_summ)\n', (70190, 70203), False, 'import copy\n'), ((71281, 71310), 'util.math_util.get_percentiles', 'math_util.get_percentiles', (['CI'], {}), '(CI)\n', (71306, 71310), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((74414, 74428), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (74426, 74428), True, 'import pandas as pd\n'), ((74445, 74512), 'sess_util.sess_str_util.ext_test_str', 'sess_str_util.ext_test_str', (["('q1v4' in output)", "('rvs' in output)", 'comp'], {}), "('q1v4' in output, 'rvs' in output, comp)\n", (74471, 74512), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((75698, 75763), 'util.file_util.saveinfo', 'file_util.saveinfo', (['scores_summ', 'savename', 'output'], {'overwrite': '(True)'}), '(scores_summ, savename, output, overwrite=True)\n', (75716, 75763), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((77658, 77778), 'plot_fcts.logreg_plots.plot_summ', 'logreg_plots.plot_summ', (['output', 'savename', 'stimtype', 'comp', 'ctrl', 'bri_dir', 'fluor', 'scale', 'CI', 'plt_bkend', 'fontdir', 'modif'], {}), '(output, savename, stimtype, comp, ctrl, bri_dir,\n fluor, scale, CI, plt_bkend, fontdir, modif)\n', (77680, 77778), False, 'from plot_fcts import logreg_plots\n'), ((29507, 29542), 'numpy.concatenate', 'np.concatenate', (['roi_seqs[d]'], {'axis': '(0)'}), '(roi_seqs[d], axis=0)\n', (29521, 29542), True, 'import numpy as np\n'), ((29568, 29606), 'numpy.concatenate', 'np.concatenate', (['seq_classes[d]'], {'axis': '(0)'}), '(seq_classes[d], axis=0)\n', (29582, 29606), True, 'import numpy as np\n'), ((31868, 31894), 'numpy.where', 'np.where', (['(seq_classes == 0)'], {}), '(seq_classes == 0)\n', (31876, 31894), True, 'import numpy as np\n'), ((31915, 31941), 'numpy.where', 'np.where', (['(seq_classes == 1)'], {}), '(seq_classes == 1)\n', (31923, 31941), True, 'import numpy as np\n'), ((35564, 35671), 'util.logreg_util.get_stats', 'logreg_util.get_stats', (['data', 'targ', 'stimpar.pre', 'stimpar.post', 'classes', 'analyspar.stats', 'analyspar.error'], {}), '(data, targ, stimpar.pre, stimpar.post, classes,\n analyspar.stats, analyspar.error)\n', (35585, 35671), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((47732, 47789), 'warnings.warn', 'warnings.warn', (['"""sklearn method not implemented with GPU."""'], {}), "('sklearn method not implemented with GPU.')\n", (47745, 47789), False, 'import warnings\n'), ((49692, 49806), 'util.logreg_util.StratifiedShuffleSplitMod', 'logreg_util.StratifiedShuffleSplitMod', ([], {'n_splits': 'cv.n_splits', 'train_p': '(0.5)', 'sample': 'samples[1]', 'bal': 'logregpar.bal'}), '(n_splits=cv.n_splits, train_p=0.5,\n sample=samples[1], bal=logregpar.bal)\n', (49729, 49806), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((57057, 57142), 'plot_fcts.logreg_plots.plot_traces_scores', 'logreg_plots.plot_traces_scores', (['hyperpars', 'tr_stats', 'full_scores'], {'plot_wei': '(True)'}), '(hyperpars, tr_stats, full_scores, plot_wei=True\n )\n', (57088, 57142), False, 'from plot_fcts import logreg_plots\n'), ((59832, 59855), 'copy.deepcopy', 'copy.deepcopy', (['extrapar'], {}), '(extrapar)\n', (59845, 59855), False, 'import copy\n'), ((59912, 59934), 'copy.deepcopy', 'copy.deepcopy', (['techpar'], {}), '(techpar)\n', (59925, 59934), False, 'import copy\n'), ((59964, 60205), 'sess_util.sess_gen_util.get_analysdir', 'sess_gen_util.get_analysdir', (['sesspar.mouse_n', 'sesspar.sess_n', 'sesspar.plane', 'analyspar.fluor', 'analyspar.scale', 'stimpar.stimtype', 'stimpar.bri_dir', 'stimpar.bri_size', 'stimpar.gabk', 'logregpar.comp', 'logregpar.ctrl', "extrapar['shuffle']"], {}), "(sesspar.mouse_n, sesspar.sess_n, sesspar.plane,\n analyspar.fluor, analyspar.scale, stimpar.stimtype, stimpar.bri_dir,\n stimpar.bri_size, stimpar.gabk, logregpar.comp, logregpar.ctrl,\n extrapar['shuffle'])\n", (59991, 60205), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((66214, 66235), 'os.path.isdir', 'os.path.isdir', (['output'], {}), '(output)\n', (66227, 66235), False, 'import os\n'), ((67941, 67963), 'pandas.concat', 'pd.concat', (['scores_list'], {}), '(scores_list)\n', (67950, 67963), True, 'import pandas as pd\n'), ((68049, 68081), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'all_labels'}), '(columns=all_labels)\n', (68061, 68081), True, 'import pandas as pd\n'), ((70277, 70330), 'util.logreg_util.get_sc_labs', 'logreg_util.get_sc_labs', (['(True)'], {'ext_test_name': 'ext_test'}), '(True, ext_test_name=ext_test)\n', (70300, 70330), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((2341, 2415), 'util.gen_util.accepted_values_error', 'gen_util.accepted_values_error', (['"""stimtype"""', 'stimtype', "['gabors', 'bricks']"], {}), "('stimtype', stimtype, ['gabors', 'bricks'])\n", (2371, 2415), False, 'from util import gen_util\n'), ((6537, 6629), 'sess_util.sess_ntuple_util.init_stimpar', 'sess_ntuple_util.init_stimpar', (['stimtype', 'bri_dir', 'bri_size', 'gabfr', 'gabk', 'gab_ori', '(0)', '(1.5)'], {}), '(stimtype, bri_dir, bri_size, gabfr, gabk,\n gab_ori, 0, 1.5)\n', (6566, 6629), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((8277, 8375), 'sess_util.sess_ntuple_util.init_stimpar', 'sess_ntuple_util.init_stimpar', (['stimtype', 'bri_dir', 'bri_size', 'gabfr', 'gabk', 'gab_ori', 'bri_pre', '(1.0)'], {}), '(stimtype, bri_dir, bri_size, gabfr, gabk,\n gab_ori, bri_pre, 1.0)\n', (8306, 8375), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((13214, 13280), 'util.gen_util.accepted_values_error', 'gen_util.accepted_values_error', (['"""alg"""', 'alg', "['pytorch', 'sklearn']"], {}), "('alg', alg, ['pytorch', 'sklearn'])\n", (13244, 13280), False, 'from util import gen_util\n'), ((15403, 15440), 'util.gen_util.list_if_not', 'gen_util.list_if_not', (['stimpar.bri_dir'], {}), '(stimpar.bri_dir)\n', (15423, 15440), False, 'from util import gen_util\n'), ((21240, 21290), 'util.gen_util.get_alternating_consec', 'gen_util.get_alternating_consec', (['segs'], {'first': '(False)'}), '(segs, first=False)\n', (21271, 21290), False, 'from util import gen_util\n'), ((21865, 21905), 'numpy.median', 'np.median', (['roi_data[:, :, :mid]'], {'axis': '(-1)'}), '(roi_data[:, :, :mid], axis=-1)\n', (21874, 21905), True, 'import numpy as np\n'), ((27570, 27639), 'sess_util.sess_ntuple_util.init_quintpar', 'sess_ntuple_util.init_quintpar', (['(1)', '[0, 0]', '[None, None]', '[None, None]'], {}), '(1, [0, 0], [None, None], [None, None])\n', (27600, 27639), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((29688, 29720), 'numpy.concatenate', 'np.concatenate', (['roi_seqs'], {'axis': '(0)'}), '(roi_seqs, axis=0)\n', (29702, 29720), True, 'import numpy as np\n'), ((30203, 30260), 'sess_util.sess_str_util.gabfr_letters', 'sess_str_util.gabfr_letters', (['stimpar.gabfr'], {'surp': 'surp_use'}), '(stimpar.gabfr, surp=surp_use)\n', (30230, 30260), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((35263, 35328), 'util.logreg_util.get_transf_data_sk', 'logreg_util.get_transf_data_sk', (['mod', 'data', '(False)', "(name == 'train')"], {}), "(mod, data, False, name == 'train')\n", (35293, 35328), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((39386, 39415), 'numpy.random.shuffle', 'np.random.shuffle', (['class_vals'], {}), '(class_vals)\n', (39403, 39415), True, 'import numpy as np\n'), ((39582, 39645), 'util.data_util.init_dl', 'data_util.init_dl', (['roi_seqs[1]', 'class_vals', 'logregpar.batchsize'], {}), '(roi_seqs[1], class_vals, logregpar.batchsize)\n', (39599, 39645), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((39900, 39932), 'util.logreg_util.LogReg', 'logreg_util.LogReg', (['n_rois', 'n_fr'], {}), '(n_rois, n_fr)\n', (39918, 39932), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((55013, 55063), 'util.data_util.bal_classes', 'data_util.bal_classes', (['roi_seqs[i]', 'seq_classes[i]'], {}), '(roi_seqs[i], seq_classes[i])\n', (55034, 55063), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((66835, 66901), 'util.gen_util.accepted_values_error', 'gen_util.accepted_values_error', (['"""alg"""', 'alg', "['sklearn', 'pytorch']"], {}), "('alg', alg, ['sklearn', 'pytorch'])\n", (66865, 66901), False, 'from util import gen_util\n'), ((67229, 67267), 'util.file_util.getfiles', 'file_util.getfiles', (['gen_dir', '"""subdirs"""'], {}), "(gen_dir, 'subdirs')\n", (67247, 67267), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((67672, 67725), 'util.logreg_util.get_sc_labs', 'logreg_util.get_sc_labs', (['(True)'], {'ext_test_name': 'ext_test'}), '(True, ext_test_name=ext_test)\n', (67695, 67725), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((72577, 72632), 'util.gen_util.set_df_vals', 'gen_util.set_df_vals', (['scores_summ', 'curr_idx', 'cols', 'vals'], {}), '(scores_summ, curr_idx, cols, vals)\n', (72597, 72632), False, 'from util import gen_util\n'), ((75235, 75287), 'util.gen_util.get_df_vals', 'gen_util.get_df_vals', (['all_scores_df', 'comm_labs', 'vals'], {}), '(all_scores_df, comm_labs, vals)\n', (75255, 75287), False, 'from util import gen_util\n'), ((75398, 75458), 'util.gen_util.set_df_vals', 'gen_util.set_df_vals', (['scores_summ', 'curr_idx', 'comm_labs', 'vals'], {}), '(scores_summ, curr_idx, comm_labs, vals)\n', (75418, 75458), False, 'from util import gen_util\n'), ((2068, 2170), 'util.gen_util.remove_if', 'gen_util.remove_if', (['comps', "['surp', 'dir_surp', 'dir_all', 'half_right', 'half_left', 'half_diff']"], {}), "(comps, ['surp', 'dir_surp', 'dir_all', 'half_right',\n 'half_left', 'half_diff'])\n", (2086, 2170), False, 'from util import gen_util\n'), ((2238, 2305), 'util.gen_util.remove_if', 'gen_util.remove_if', (['comps', "['half_left', 'half_right', 'half_diff']"], {}), "(comps, ['half_left', 'half_right', 'half_diff'])\n", (2256, 2305), False, 'from util import gen_util\n'), ((6694, 6727), 'sess_util.sess_str_util.gabfr_nbrs', 'sess_str_util.gabfr_nbrs', (['comp[0]'], {}), '(comp[0])\n', (6718, 6727), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((6750, 6843), 'sess_util.sess_ntuple_util.init_stimpar', 'sess_ntuple_util.init_stimpar', (['stimtype', 'bri_dir', 'bri_size', 'gabfr', 'gabk', 'gab_ori', '(0)', '(0.45)'], {}), '(stimtype, bri_dir, bri_size, gabfr, gabk,\n gab_ori, 0, 0.45)\n', (6779, 6843), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((8565, 8655), 'sess_util.sess_ntuple_util.init_stimpar', 'sess_ntuple_util.init_stimpar', (['stimtype', 'bri_dir', 'bri_size', 'gabfr', 'gabk', 'gab_ori', '(2)', '(2)'], {}), '(stimtype, bri_dir, bri_size, gabfr, gabk,\n gab_ori, 2, 2)\n', (8594, 8655), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((9672, 9738), 'util.gen_util.accepted_values_error', 'gen_util.accepted_values_error', (['"""alg"""', 'alg', "['sklearn', 'pytorch']"], {}), "('alg', alg, ['sklearn', 'pytorch'])\n", (9702, 9738), False, 'from util import gen_util\n'), ((21004, 21102), 'analysis.quint_analys.quint_segs', 'quint_analys.quint_segs', (['stim', 'stimpar', 'quintpar.n_quints', 'qu_i', 'surp_use'], {'remconsec': 'remconsec'}), '(stim, stimpar, quintpar.n_quints, qu_i, surp_use,\n remconsec=remconsec)\n', (21027, 21102), False, 'from analysis import quint_analys\n'), ((21941, 21964), 'numpy.expand_dims', 'np.expand_dims', (['div', '(-1)'], {}), '(div, -1)\n', (21955, 21964), True, 'import numpy as np\n'), ((26646, 26705), 'sess_util.sess_ntuple_util.init_quintpar', 'sess_ntuple_util.init_quintpar', (['(4)', '[[1, 2]]', '[None]', '[None]'], {}), '(4, [[1, 2]], [None], [None])\n', (26676, 26705), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((26923, 26982), 'sess_util.sess_ntuple_util.init_quintpar', 'sess_ntuple_util.init_quintpar', (['(2)', '[[0, 1]]', '[None]', '[None]'], {}), '(2, [[0, 1]], [None], [None])\n', (26953, 26982), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((27743, 27812), 'sess_util.sess_ntuple_util.init_quintpar', 'sess_ntuple_util.init_quintpar', (['(1)', '[0, 0]', '[None, None]', '[None, None]'], {}), '(1, [0, 0], [None, None], [None, None])\n', (27773, 27812), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((29996, 30045), 'sess_util.sess_str_util.gabfr_letters', 'sess_str_util.gabfr_letters', (['gabfr'], {'surp': 'surp_use'}), '(gabfr, surp=surp_use)\n', (30023, 30045), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((31740, 31762), 'numpy.unique', 'np.unique', (['seq_classes'], {}), '(seq_classes)\n', (31749, 31762), True, 'import numpy as np\n'), ((35465, 35531), 'util.gen_util.accepted_values_error', 'gen_util.accepted_values_error', (['"""alg"""', 'alg', "['sklearn', 'pytorch']"], {}), "('alg', alg, ['sklearn', 'pytorch'])\n", (35495, 35531), False, 'from util import gen_util\n'), ((38600, 38625), 'numpy.unique', 'np.unique', (['seq_classes[0]'], {}), '(seq_classes[0])\n', (38609, 38625), True, 'import numpy as np\n'), ((60906, 60996), 'util.gen_util.accepted_values_error', 'gen_util.accepted_values_error', (['"""logregpar.alg"""', 'logregpar.alg', "['pytorch', 'sklearn']"], {}), "('logregpar.alg', logregpar.alg, ['pytorch',\n 'sklearn'])\n", (60936, 60996), False, 'from util import gen_util\n'), ((70977, 71043), 'util.gen_util.accepted_values_error', 'gen_util.accepted_values_error', (['"""stats"""', 'stats', "['mean', 'median']"], {}), "('stats', stats, ['mean', 'median'])\n", (71007, 71043), False, 'from util import gen_util\n'), ((71964, 72034), 'util.math_util.error_stat', 'math_util.error_stat', (['data'], {'stats': '"""median"""', 'error': '"""std"""', 'nanpol': '"""omit"""'}), "(data, stats='median', error='std', nanpol='omit')\n", (71984, 72034), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((72199, 72276), 'util.math_util.error_stat', 'math_util.error_stat', (['data'], {'stats': '"""median"""', 'error': '"""std"""', 'nanpol': '"""omit"""', 'qu': 'ps'}), "(data, stats='median', error='std', nanpol='omit', qu=ps)\n", (72219, 72276), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((7608, 7707), 'sess_util.sess_ntuple_util.init_stimpar', 'sess_ntuple_util.init_stimpar', (['stimtype', 'bri_dir', 'bri_size', 'act_gabfr', 'gabk', 'gab_ori', 'pre', 'post'], {}), '(stimtype, bri_dir, bri_size, act_gabfr, gabk,\n gab_ori, pre, post)\n', (7637, 7707), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((22211, 22239), 'numpy.nanmean', 'np.nanmean', (['roi_data'], {'axis': '(0)'}), '(roi_data, axis=0)\n', (22221, 22239), True, 'import numpy as np\n'), ((28989, 29043), 'sess_util.sess_ntuple_util.get_modif_ntuple', 'sess_ntuple_util.get_modif_ntuple', (['stimpar', 'keys', 'vals'], {}), '(stimpar, keys, vals)\n', (29022, 29043), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((39502, 39527), 'torch.Tensor', 'torch.Tensor', (['roi_seqs[1]'], {}), '(roi_seqs[1])\n', (39514, 39527), False, 'import torch\n'), ((67391, 67410), 'os.listdir', 'os.listdir', (['run_dir'], {}), '(run_dir)\n', (67401, 67410), False, 'import os\n'), ((72388, 72458), 'util.math_util.error_stat', 'math_util.error_stat', (['data'], {'stats': '"""median"""', 'error': '"""sem"""', 'nanpol': '"""omit"""'}), "(data, stats='median', error='sem', nanpol='omit')\n", (72408, 72458), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((7534, 7585), 'sess_util.sess_gen_util.gab_oris_shared_U', 'sess_gen_util.gab_oris_shared_U', (['gab_letts', 'gab_ori'], {}), '(gab_letts, gab_ori)\n', (7565, 7585), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((7902, 7946), 'sess_util.sess_str_util.gabfr_nbrs', 'sess_str_util.gabfr_nbrs', (['[comp[0], comp[2]]'], {}), '([comp[0], comp[2]])\n', (7926, 7946), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((7969, 8063), 'sess_util.sess_ntuple_util.init_stimpar', 'sess_ntuple_util.init_stimpar', (['stimtype', 'bri_dir', 'bri_size', 'gabfrs', 'gabk', 'gab_ori', '(0)', '(0.45)'], {}), '(stimtype, bri_dir, bri_size, gabfrs, gabk,\n gab_ori, 0, 0.45)\n', (7998, 8063), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((19248, 19357), 'util.gen_util.accepted_values_error', 'gen_util.accepted_values_error', (['"""comp"""', 'comp', "['surp', 'AvB', 'AvC', 'BvC', 'DvU', 'dir...', '...ori...']"], {}), "('comp', comp, ['surp', 'AvB', 'AvC', 'BvC',\n 'DvU', 'dir...', '...ori...'])\n", (19278, 19357), False, 'from util import gen_util\n'), ((26742, 26764), 'numpy.unique', 'np.unique', (['stim.direcs'], {}), '(stim.direcs)\n', (26751, 26764), True, 'import numpy as np\n'), ((71605, 71656), 'util.math_util.mean_med', 'math_util.mean_med', (['data'], {'stats': 'stat', 'nanpol': '"""omit"""'}), "(data, stats=stat, nanpol='omit')\n", (71623, 71656), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((71766, 71834), 'util.math_util.error_stat', 'math_util.error_stat', (['data'], {'stats': '"""mean"""', 'error': 'error', 'nanpol': '"""omit"""'}), "(data, stats='mean', error=error, nanpol='omit')\n", (71786, 71834), False, 'from util import data_util, file_util, gen_util, logger_util, logreg_util, math_util, plot_util\n'), ((74903, 74955), 'util.gen_util.remove_if', 'gen_util.remove_if', (['comm_labs', "['mouse_n', 'n_rois']"], {}), "(comm_labs, ['mouse_n', 'n_rois'])\n", (74921, 74955), False, 'from util import gen_util\n'), ((7013, 7043), 'sess_util.sess_str_util.gabfr_nbrs', 'sess_str_util.gabfr_nbrs', (['lett'], {}), '(lett)\n', (7037, 7043), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n'), ((22495, 22523), 'numpy.nanmean', 'np.nanmean', (['roi_data'], {'axis': '(0)'}), '(roi_data, axis=0)\n', (22505, 22523), True, 'import numpy as np\n'), ((22545, 22572), 'numpy.nanstd', 'np.nanstd', (['roi_data'], {'axis': '(0)'}), '(roi_data, axis=0)\n', (22554, 22572), True, 'import numpy as np\n'), ((18974, 19024), 'sess_util.sess_str_util.dir_par_str', 'sess_str_util.dir_par_str', (['direc'], {'str_type': '"""print"""'}), "(direc, str_type='print')\n", (18999, 19024), False, 'from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on March 10 2020
@author: <NAME>
Translated from C++ code by <NAME>
---
Camera models for pixel-to-vector transformation
"""
import os
import numpy as np
import xmltodict
import math
class OcamCalibCameraModel:
'''
OcamCalib camera model for fisheye cameras
'''
def __init__(self,calib_file):
param_dict = self.read_opencv_storage_from_file(calib_file)
self.set_params(param_dict)
return None
def set_params(self,opencv_storage_dict):
#Etract parameters from opencv storage dictionary object
d = opencv_storage_dict
self.fx = np.zeros(5)
self.fx[0] = float(d['ss4'])
self.fx[1] = float(d['ss3'])
self.fx[2] = float(d['ss2'])
self.fx[3] = float(d['ss1'])
self.fx[4] = float(d['ss0'])
self.M = np.zeros((2,2))
self.M[0,0] = float(d['c'])
self.M[0,1] = float(d['d'])
self.M[1,0] = float(d['e'])
self.M[1,1] = 1.0
self.xc = float(d['xc'])
self.yc = float(d['yc'])
self.width = int(d['width'])
self.height = int(d['height'])
self.image_circle_FOV = float(d['imageCircleFOV'])
#Derived parameters
self.dfdx = np.zeros(4)
self.dfdx[0] = 4 * self.fx[0]
self.dfdx[1] = 3 * self.fx[1]
self.dfdx[2] = 2 * self.fx[2]
self.dfdx[3] = self.fx[3]
self.inv_M = np.linalg.inv(self.M)
self.initial_x = self.width/4 #starting point for polynomial solver
def read_opencv_storage_from_file(self,calib_file):
print(calib_file)
with open(calib_file) as fd:
dict_ = xmltodict.parse(fd.read())
model_dict = dict_['opencv_storage']['cam_model']
return model_dict
def vector_to_pixel(self,point):
'''
Go from vector (in camera coordinates) to pixel (image coordinates)
input: point - x,y,z in camera frame
'''
forward = np.array([0,0,1])
r = vec3_normalise(point)
alpha = np.arccos(np.dot(r,forward))
R = self.alpha_to_R(alpha)
if R <0 :
x = -1.0
y = -1.0
return False
#Scale to get ideal fisheye pixel coordinates:
mag = np.sqrt(r[0]**2 + r[1]**2)
if (mag != 0):
mag = R / mag
# NOTE: model (x,y) is (height,width) so we swap
px = r[1] * mag
py = r[0] * mag
# Account for non ideal fisheye effects (shear and translation):
y = self.M[0,0]*px + self.M[0,1]*py + self.xc
x = self.M[1,0]*px + self.M[1,1]*py + self.yc
x = np.round(x).astype(int)
y = np.round(y).astype(int)
return x, y, R**2
def pixel_to_vector(self,x,y):
#NOTE: model (x,y) is (height,width) so we swap
dx = y - self.xc
dy = x - self.yc;
px = self.inv_M[0,0]*dx +self.inv_M[0,1]*dy;
py = self.inv_M[1,0]*dx + self.inv_M[1,1]*dy;
R2 = px*px + py*py;
direction = np.array([0,0,0])
direction[0] = py;
direction[1] = px;
direction[2] = -eval_poly4(self.fx, np.sqrt(R2));
return direction
def alpha_to_R(self,alpha):
'''
Solves polynomial to go from alpha (angle between rays) to R (distance from center point on sensor)
'''
#Newton-Raphson search for the solution
newFx3 = self.fx[3] - np.tan(alpha - np.pi/2);
fx = np.array([self.fx[0], self.fx[1], self.fx[2], newFx3, self.fx[4]])
dfdx = np.array([self.dfdx[0], self.dfdx[1], self.dfdx[2], newFx3])
x = self.initial_x;
while True:
px = x
x -= eval_poly4(fx,x) / eval_poly3(dfdx,x)
if (abs(x - px) > 1e-3):
break
R = x
return R
class RectilinearCameraModel:
'''
Rectilinear camera model(compatible with Realsense)
'''
def __init__(self,calib_file):
'''
Load xml file with intrinsic calibration parameters
'''
param_dict = self.read_opencv_storage_from_file(calib_file)
self.set_params(param_dict)
#Derived parameters
self.focalLengthPixels = (self.height * 0.5) / math.tan(self.verticalFOV * 0.5)
R = self.focalLengthPixels * math.tan(self.imageCircleFOV * 0.5)
if (self.imageCircleFOV <= 0):
R = self.width + self.height; # allows everything
self.imageCircleR2 = R * R
def set_params(self,opencv_storage_dict):
'''
Extract parameters from opencv storage dictionary object
'''
d = opencv_storage_dict
self.xc = float(d['centreX'])
self.yc = float(d['centreY'])
self.imageCircleFOV = float(d['imageCircleFOV'])
self.verticalFOV = float(d['verticalFOV'])
self.width = int(d['width'])
self.height = int(d['height'])
def read_opencv_storage_from_file(self,calib_file):
with open(calib_file) as fd:
dict_ = xmltodict.parse(fd.read())
model_dict = dict_['opencv_storage']['cam_model']
return model_dict
def vector_to_pixel(self, point):
'''
Go from vector (in camera coordinates) to pixel (image coordinates)
input: point (list) - x,y,z in camera frame
'''
s = self.focalLengthPixels / point[2]
dx = point[0] * s
dy = point[1] * s
x = dx + self.xc
y = dy + self.yc
x = np.round(x).astype(int)
y = np.round(y).astype(int)
R_squared = dx**2 + dy**2
return x, y, R_squared
def pixel_to_vector(self, x, y):
'''
Go from pixel in image coordinates to vector in camera coordinates
'''
#NOTE: model (x,y) is (height,width) so we swap
dx = x - self.xc
dy = y - self.yc
direction = np.array([0,0,0])
direction[0] = dx
direction[1] = dy
direction[2] = self.focalLengthPixels
return direction
# Utility functions
def vec3_normalise(point):
norm = np.linalg.norm(point)
if norm == 0:
return point
return point / norm
#Evaluation of polynomials
#cubic
def eval_poly3(poly,x):
return ((poly[0]*x + poly[1])*x + poly[2])*x + poly[3]
#quartic
def eval_poly4(poly, x):
return (((poly[0]*x + poly[1])*x + poly[2])*x + poly[3])*x + poly[4] | [
"numpy.sqrt",
"numpy.tan",
"math.tan",
"numpy.array",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.dot",
"numpy.linalg.norm",
"numpy.round"
] | [((6173, 6194), 'numpy.linalg.norm', 'np.linalg.norm', (['point'], {}), '(point)\n', (6187, 6194), True, 'import numpy as np\n'), ((663, 674), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (671, 674), True, 'import numpy as np\n'), ((877, 893), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (885, 893), True, 'import numpy as np\n'), ((1285, 1296), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (1293, 1296), True, 'import numpy as np\n'), ((1466, 1487), 'numpy.linalg.inv', 'np.linalg.inv', (['self.M'], {}), '(self.M)\n', (1479, 1487), True, 'import numpy as np\n'), ((2045, 2064), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (2053, 2064), True, 'import numpy as np\n'), ((2331, 2361), 'numpy.sqrt', 'np.sqrt', (['(r[0] ** 2 + r[1] ** 2)'], {}), '(r[0] ** 2 + r[1] ** 2)\n', (2338, 2361), True, 'import numpy as np\n'), ((3103, 3122), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (3111, 3122), True, 'import numpy as np\n'), ((3545, 3611), 'numpy.array', 'np.array', (['[self.fx[0], self.fx[1], self.fx[2], newFx3, self.fx[4]]'], {}), '([self.fx[0], self.fx[1], self.fx[2], newFx3, self.fx[4]])\n', (3553, 3611), True, 'import numpy as np\n'), ((3627, 3687), 'numpy.array', 'np.array', (['[self.dfdx[0], self.dfdx[1], self.dfdx[2], newFx3]'], {}), '([self.dfdx[0], self.dfdx[1], self.dfdx[2], newFx3])\n', (3635, 3687), True, 'import numpy as np\n'), ((5960, 5979), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (5968, 5979), True, 'import numpy as np\n'), ((2123, 2141), 'numpy.dot', 'np.dot', (['r', 'forward'], {}), '(r, forward)\n', (2129, 2141), True, 'import numpy as np\n'), ((3507, 3532), 'numpy.tan', 'np.tan', (['(alpha - np.pi / 2)'], {}), '(alpha - np.pi / 2)\n', (3513, 3532), True, 'import numpy as np\n'), ((4319, 4351), 'math.tan', 'math.tan', (['(self.verticalFOV * 0.5)'], {}), '(self.verticalFOV * 0.5)\n', (4327, 4351), False, 'import math\n'), ((4389, 4424), 'math.tan', 'math.tan', (['(self.imageCircleFOV * 0.5)'], {}), '(self.imageCircleFOV * 0.5)\n', (4397, 4424), False, 'import math\n'), ((2714, 2725), 'numpy.round', 'np.round', (['x'], {}), '(x)\n', (2722, 2725), True, 'import numpy as np\n'), ((2750, 2761), 'numpy.round', 'np.round', (['y'], {}), '(y)\n', (2758, 2761), True, 'import numpy as np\n'), ((3219, 3230), 'numpy.sqrt', 'np.sqrt', (['R2'], {}), '(R2)\n', (3226, 3230), True, 'import numpy as np\n'), ((5569, 5580), 'numpy.round', 'np.round', (['x'], {}), '(x)\n', (5577, 5580), True, 'import numpy as np\n'), ((5605, 5616), 'numpy.round', 'np.round', (['y'], {}), '(y)\n', (5613, 5616), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
from deepforest import CascadeForestClassifier
from sklearn.model_selection import train_test_split
X, y = load_iris(return_X_y=True)
X = np.array(X)
y = np.array(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model = CascadeForestClassifier()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
acc = accuracy_score(y_test, y_pred) * 100
print("Accuracy: {:.3f} %".format(acc))
| [
"sklearn.datasets.load_iris",
"sklearn.model_selection.train_test_split",
"deepforest.CascadeForestClassifier",
"numpy.array",
"sklearn.metrics.accuracy_score"
] | [((209, 235), 'sklearn.datasets.load_iris', 'load_iris', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (218, 235), False, 'from sklearn.datasets import load_iris\n'), ((241, 252), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (249, 252), True, 'import numpy as np\n'), ((257, 268), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (265, 268), True, 'import numpy as np\n'), ((305, 342), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (321, 342), False, 'from sklearn.model_selection import train_test_split\n'), ((352, 377), 'deepforest.CascadeForestClassifier', 'CascadeForestClassifier', ([], {}), '()\n', (375, 377), False, 'from deepforest import CascadeForestClassifier\n'), ((447, 477), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (461, 477), False, 'from sklearn.metrics import accuracy_score\n')] |
import numpy as np
import cv2
from utils import FacialRecoLogin
# Time before locking/unlocking (in seconds)
time_before_lock = 3
if __name__ == '__main__':
# Facial Reco object
reco_login = FacialRecoLogin(time_before_lock=time_before_lock)
# Grasp webcam
video_capture = cv2.VideoCapture(0)
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# List of names and face locations
face_names, face_locations = reco_login.frame_match(rgb_small_frame)
# Looping on the face detected on the frame
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# If user recognized
if name in reco_login.known_face_names:
cv2.rectangle(frame, (left, top), (right, bottom),
(0, 255, 0), 2)
# center1 = int((left+right)/2)
# center2 = int((top+bottom)/2)
# cv2.circle(frame, (center1, center2), int(
# right - (left+right)/2), (0, 255, 0), 7)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left, bottom + 30),
font, 1.0, (0, 255, 0), 1)
cv2.putText(frame, "Authorized... " + str(np.round(reco_login.time_recognized, 1)), (left, bottom + 60),
font, 1.0, (0, 255, 0), 1)
# If unknown
else:
cv2.rectangle(frame, (left, top),
(right, bottom), (0, 0, 255), 2)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 60, bottom - 40),
font, 1.0, (0, 0, 255), 1)
cv2.putText(frame, "No access... " + str(np.round(reco_login.time_unknown, 1)), (left + 60, bottom + 6),
font, 1.0, (0, 0, 255), 1)
# Check for lock/unlock timing
if reco_login.time_recognized <= 0:
reco_login.unlock(video_capture, speaker=True)
elif reco_login.time_unknown <= 0:
reco_login.lock(video_capture, speaker=True)
# Show webcam output
cv2.imshow('Video', frame)
if (cv2.waitKey(1) & 0xFF == ord('q')):
break
| [
"cv2.rectangle",
"utils.FacialRecoLogin",
"cv2.imshow",
"cv2.putText",
"cv2.VideoCapture",
"cv2.resize",
"cv2.waitKey",
"numpy.round"
] | [((203, 253), 'utils.FacialRecoLogin', 'FacialRecoLogin', ([], {'time_before_lock': 'time_before_lock'}), '(time_before_lock=time_before_lock)\n', (218, 253), False, 'from utils import FacialRecoLogin\n'), ((294, 313), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (310, 313), False, 'import cv2\n'), ((518, 561), 'cv2.resize', 'cv2.resize', (['frame', '(0, 0)'], {'fx': '(0.25)', 'fy': '(0.25)'}), '(frame, (0, 0), fx=0.25, fy=0.25)\n', (528, 561), False, 'import cv2\n'), ((2699, 2725), 'cv2.imshow', 'cv2.imshow', (['"""Video"""', 'frame'], {}), "('Video', frame)\n", (2709, 2725), False, 'import cv2\n'), ((1263, 1329), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(left, top)', '(right, bottom)', '(0, 255, 0)', '(2)'], {}), '(frame, (left, top), (right, bottom), (0, 255, 0), 2)\n', (1276, 1329), False, 'import cv2\n'), ((1641, 1713), 'cv2.putText', 'cv2.putText', (['frame', 'name', '(left, bottom + 30)', 'font', '(1.0)', '(0, 255, 0)', '(1)'], {}), '(frame, name, (left, bottom + 30), font, 1.0, (0, 255, 0), 1)\n', (1652, 1713), False, 'import cv2\n'), ((1977, 2043), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(left, top)', '(right, bottom)', '(0, 0, 255)', '(2)'], {}), '(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n', (1990, 2043), False, 'import cv2\n'), ((2135, 2212), 'cv2.putText', 'cv2.putText', (['frame', 'name', '(left + 60, bottom - 40)', 'font', '(1.0)', '(0, 0, 255)', '(1)'], {}), '(frame, name, (left + 60, bottom - 40), font, 1.0, (0, 0, 255), 1)\n', (2146, 2212), False, 'import cv2\n'), ((2738, 2752), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2749, 2752), False, 'import cv2\n'), ((1800, 1839), 'numpy.round', 'np.round', (['reco_login.time_recognized', '(1)'], {}), '(reco_login.time_recognized, 1)\n', (1808, 1839), True, 'import numpy as np\n'), ((2298, 2334), 'numpy.round', 'np.round', (['reco_login.time_unknown', '(1)'], {}), '(reco_login.time_unknown, 1)\n', (2306, 2334), True, 'import numpy as np\n')] |
import numpy as np
import torch.utils.data as tud
from sklearn.model_selection import train_test_split
from noge.constants import REAL_DATASETS, PLACES, DATA_DIR
from xlog.utils import load_pickle
class GraphDataset(tud.Dataset):
def __init__(self, graphs, mazes=None):
self.graphs = graphs
self.mazes = mazes
self.num_nodes_per_graph = np.array([G.number_of_nodes() for G in graphs], dtype=int)
self.num_edges_per_graph = np.array([G.number_of_edges() for G in graphs], dtype=int)
n_graphs = len(graphs)
self._pairs = [(g, s) for g in range(n_graphs) for s in graphs[g].nodes]
graph_idx, sources = zip(*self._pairs)
self.samples_graph_idx = np.array(graph_idx)
self._samples_sources = np.array(sources)
def __len__(self):
return len(self._pairs)
def __getitem__(self, item):
graph_index, source = self._pairs[item]
graph = self.graphs[graph_index]
sample = dict(graph=graph, source=source)
if self.mazes is not None:
sample.update(maze=self.mazes[graph_index])
return sample
@property
def max_nodes(self):
return max(self.num_nodes_per_graph)
@property
def max_edges(self):
return max(self.num_edges_per_graph)
@property
def num_graphs(self):
return len(self.graphs)
class SubsetSampler(tud.Sampler):
def __init__(self, dataset, seed, num_samples=50):
assert num_samples <= len(dataset)
self.dataset = dataset
self.seed = seed
self.rng = np.random.RandomState(seed=seed)
# for evaluation only choose pairs once (to be consistent across epochs)
n_graphs = len(dataset.graphs)
if n_graphs >= num_samples:
# sample one source node per graph
num_nodes_per_graph = self.dataset.num_nodes_per_graph
indices = []
offset = 0
for num_nodes in num_nodes_per_graph:
# num_nodes = num_nodes_per_graph[g]
index = self.rng.randint(num_nodes)
indices.append(offset + index)
offset += num_nodes
if len(indices) == num_samples:
break
self._indices = indices
else:
# the number of graphs is less than the required num_samples
n_total = len(dataset)
if n_total <= num_samples:
# if the total number of samples is less than or equal to required, use all samples
self._indices = list(range(n_total))
else:
# if the total number of samples is larger than required, sub-sample
self._indices = self.rng.choice(n_total, size=num_samples, replace=False).tolist()
self._indices.sort()
def __iter__(self):
return iter(self._indices)
def __len__(self):
return len(self._indices)
def get_test_loader(dataset, seed, num_samples):
sampler = SubsetSampler(dataset, seed=seed, num_samples=num_samples)
# set batch_size = None to get each sample without a batch dimension
# set collate_fn = identity to not trigger auto_collate which converts to torch types
loader = tud.DataLoader(dataset=dataset, batch_size=None,
collate_fn=lambda x: x, sampler=sampler)
return loader
class BalancedInfiniteRandomSampler:
""" Sample each graph with equal probability (in the limit) """
def __init__(self, dataset, seed, cycle_size=100_000, replace=True):
self.dataset = dataset
self.seed = seed
self.rng = np.random.RandomState(seed=seed)
# each node's weight should be proportional to 1 over the graph size of the node
inverse_graph_sizes = 1. / dataset.num_nodes_per_graph
self.p = inverse_graph_sizes[dataset.samples_graph_idx]
self.p = self.p / self.p.sum()
# self.weights = torch.as_tensor(self.p, dtype=torch.double)
self.cycle_size = cycle_size
self.replacement = replace
def __iter__(self):
while True:
# sample once every `cycle_size` (rng.choice is slow)
indices = self.rng.choice(len(self.dataset), self.cycle_size, self.replacement, p=self.p).tolist()
# items = torch.multinomial(self.weights, self.cycle_size, self.replacement).tolist()
for index in indices:
yield index
def get_train_generator(dataset, seed):
sampler = BalancedInfiniteRandomSampler(dataset, seed)
sampler_iter = iter(sampler)
while True:
index = next(sampler_iter)
sample = dataset[index]
yield sample
def _get_real_graph(dataset):
proc_dir = DATA_DIR / 'osm' / 'processed'
# get place and name
place = PLACES[dataset]
name = place['city'].replace(' ', '')
path_train = proc_dir / f"{name}_train.pkl"
path_test = proc_dir / f"{name}_test.pkl"
train_graph = load_pickle(path_train)
test_graph = load_pickle(path_test)
train_set = GraphDataset([train_graph])
test_set = GraphDataset([test_graph])
return train_set, test_set
def get_datasets(dataset, seed, test_size, val_size=0):
if dataset in REAL_DATASETS:
return _get_real_graph(dataset)
path_graphs = DATA_DIR / f"{dataset}.pkl"
graphs = load_pickle(path_graphs)
graphs_train, graphs_test = train_test_split(graphs, test_size=test_size, random_state=seed)
graphs_val = None
if val_size > 0:
graphs_train, graphs_val = train_test_split(graphs_train, test_size=val_size, random_state=seed)
mazes_train = None
mazes_test = None
mazes_val = None
if dataset in ('maze', 'hypermaze'):
graphs_train, mazes_train = zip(*graphs_train)
graphs_test, mazes_test = zip(*graphs_test)
if graphs_val is not None:
graphs_val, mazes_val = zip(*graphs_val)
train_set = GraphDataset(graphs_train, mazes_train)
test_set = GraphDataset(graphs_test, mazes_test)
if graphs_val is not None:
val_set = GraphDataset(graphs_val, mazes_val)
return train_set, val_set, test_set
return train_set, test_set
| [
"sklearn.model_selection.train_test_split",
"numpy.array",
"torch.utils.data.DataLoader",
"xlog.utils.load_pickle",
"numpy.random.RandomState"
] | [((3265, 3358), 'torch.utils.data.DataLoader', 'tud.DataLoader', ([], {'dataset': 'dataset', 'batch_size': 'None', 'collate_fn': '(lambda x: x)', 'sampler': 'sampler'}), '(dataset=dataset, batch_size=None, collate_fn=lambda x: x,\n sampler=sampler)\n', (3279, 3358), True, 'import torch.utils.data as tud\n'), ((4997, 5020), 'xlog.utils.load_pickle', 'load_pickle', (['path_train'], {}), '(path_train)\n', (5008, 5020), False, 'from xlog.utils import load_pickle\n'), ((5038, 5060), 'xlog.utils.load_pickle', 'load_pickle', (['path_test'], {}), '(path_test)\n', (5049, 5060), False, 'from xlog.utils import load_pickle\n'), ((5371, 5395), 'xlog.utils.load_pickle', 'load_pickle', (['path_graphs'], {}), '(path_graphs)\n', (5382, 5395), False, 'from xlog.utils import load_pickle\n'), ((5428, 5492), 'sklearn.model_selection.train_test_split', 'train_test_split', (['graphs'], {'test_size': 'test_size', 'random_state': 'seed'}), '(graphs, test_size=test_size, random_state=seed)\n', (5444, 5492), False, 'from sklearn.model_selection import train_test_split\n'), ((717, 736), 'numpy.array', 'np.array', (['graph_idx'], {}), '(graph_idx)\n', (725, 736), True, 'import numpy as np\n'), ((769, 786), 'numpy.array', 'np.array', (['sources'], {}), '(sources)\n', (777, 786), True, 'import numpy as np\n'), ((1587, 1619), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (1608, 1619), True, 'import numpy as np\n'), ((3657, 3689), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (3678, 3689), True, 'import numpy as np\n'), ((5572, 5641), 'sklearn.model_selection.train_test_split', 'train_test_split', (['graphs_train'], {'test_size': 'val_size', 'random_state': 'seed'}), '(graphs_train, test_size=val_size, random_state=seed)\n', (5588, 5641), False, 'from sklearn.model_selection import train_test_split\n')] |
# based on: https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly.html
import os, sys
import numpy as np
import keras
from . import helper as hp
class OneHotEncode():
def __init__(self, max_len_model, n_chars, indices_token, token_indices, pad_char, start_char, end_char):
'Initialization'
self.max_len_model = max_len_model
self.n_chars = n_chars
self.pad_char = pad_char
self.start_char = start_char
self.end_char = end_char
self.indices_token = indices_token
self.token_indices = token_indices
def one_hot_encode(self, token_list, n_chars):
output = np.zeros((token_list.shape[0], n_chars))
for j, token in enumerate(token_list):
output[j, token] = 1
return output
def smi_to_int(self, smi):
"""
this will turn a list of smiles in string format
and turn them into a np array of int, with padding
"""
token_list = hp.smi_tokenizer(smi)
token_list = [self.start_char] + token_list + [self.end_char]
padding = [self.pad_char]*(self.max_len_model - len(token_list))
token_list.extend(padding)
int_list = [self.token_indices[x] for x in token_list]
return np.asarray(int_list)
def int_to_smile(self, array):
"""
From an array of int, return a list of
molecules in string smile format
Note: remove the padding char
"""
all_smi = []
for seq in array:
new_mol = [self.indices_token[int(x)] for x in seq]
all_smi.append(''.join(new_mol).replace(self.pad_char, ''))
return all_smi
def clean_smile(self, smi):
""" remove return line symbols """
smi = smi.replace('\n', '')
return smi
def smile_to_onehot(self, path_data):
f = open(path_data)
lines = f.readlines()
n_data = len(lines)
X = np.empty((n_data, self.max_len_model, self.n_chars), dtype=int)
for i,smi in enumerate(lines):
# remove return line symbols
smi = self.clean_smile(smi)
# tokenize
int_smi = self.smi_to_int(smi)
# one hot encode
X[i] = self.one_hot_encode(int_smi, self.n_chars)
return X
def generator_smile_to_onehot(self, smi):
smi = self.clean_smile(smi)
int_smi = self.smi_to_int(smi)
one_hot = self.one_hot_encode(int_smi, self.n_chars)
return one_hot
class DataGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, list_IDs, batch_size, max_len_model, path_data, n_chars,
indices_token, token_indices, pad_char, start_char, end_char, shuffle=True):
'Initialization'
self.max_len_model = max_len_model
self.batch_size = batch_size
self.list_IDs = list_IDs
self.shuffle = shuffle
self.path_data = path_data
self.n_chars = n_chars
self.OneHotEncoder = OneHotEncode(max_len_model, n_chars,
indices_token, token_indices,
pad_char, start_char, end_char)
self.on_epoch_end()
f=open(self.path_data)
self.lines=f.readlines()
self.indices_token = indices_token
self.token_indices = token_indices
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
X, y = self.__data_generation(list_IDs_temp)
return X, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
'Generates batch of data containing batch_size samples'
switch = 1
y = np.empty((self.batch_size, self.max_len_model-switch, self.n_chars), dtype=int)
X = np.empty((self.batch_size, self.max_len_model-switch, self.n_chars), dtype=int)
# Generate data
for i, ID in enumerate(list_IDs_temp):
smi = self.lines[ID]
one_hot_smi = self.OneHotEncoder.generator_smile_to_onehot(smi)
X[i] = one_hot_smi[:-1]
y[i] = one_hot_smi[1:]
return X, y
| [
"numpy.zeros",
"numpy.asarray",
"numpy.empty",
"numpy.random.shuffle"
] | [((667, 707), 'numpy.zeros', 'np.zeros', (['(token_list.shape[0], n_chars)'], {}), '((token_list.shape[0], n_chars))\n', (675, 707), True, 'import numpy as np\n'), ((1285, 1305), 'numpy.asarray', 'np.asarray', (['int_list'], {}), '(int_list)\n', (1295, 1305), True, 'import numpy as np\n'), ((1998, 2061), 'numpy.empty', 'np.empty', (['(n_data, self.max_len_model, self.n_chars)'], {'dtype': 'int'}), '((n_data, self.max_len_model, self.n_chars), dtype=int)\n', (2006, 2061), True, 'import numpy as np\n'), ((4403, 4488), 'numpy.empty', 'np.empty', (['(self.batch_size, self.max_len_model - switch, self.n_chars)'], {'dtype': 'int'}), '((self.batch_size, self.max_len_model - switch, self.n_chars),\n dtype=int)\n', (4411, 4488), True, 'import numpy as np\n'), ((4495, 4580), 'numpy.empty', 'np.empty', (['(self.batch_size, self.max_len_model - switch, self.n_chars)'], {'dtype': 'int'}), '((self.batch_size, self.max_len_model - switch, self.n_chars),\n dtype=int)\n', (4503, 4580), True, 'import numpy as np\n'), ((4200, 4231), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indexes'], {}), '(self.indexes)\n', (4217, 4231), True, 'import numpy as np\n')] |
import sys
import numpy as np
vert = np.loadtxt(sys.argv[1])
tri = np.loadtxt(sys.argv[2])
with open(sys.argv[3], 'w') as f:
f.write('OFF\n')
f.write('{} {} {}\n'.format(int(vert.shape[0]), int(tri.shape[0]), 0))
with open(sys.argv[3], 'ab') as f:
np.savetxt(f, vert, fmt='%.6f')
np.savetxt(f, np.hstack([np.ones((tri.shape[0],1))*3, tri]), fmt='%d')
| [
"numpy.loadtxt",
"numpy.ones",
"numpy.savetxt"
] | [((38, 61), 'numpy.loadtxt', 'np.loadtxt', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (48, 61), True, 'import numpy as np\n'), ((68, 91), 'numpy.loadtxt', 'np.loadtxt', (['sys.argv[2]'], {}), '(sys.argv[2])\n', (78, 91), True, 'import numpy as np\n'), ((265, 296), 'numpy.savetxt', 'np.savetxt', (['f', 'vert'], {'fmt': '"""%.6f"""'}), "(f, vert, fmt='%.6f')\n", (275, 296), True, 'import numpy as np\n'), ((326, 352), 'numpy.ones', 'np.ones', (['(tri.shape[0], 1)'], {}), '((tri.shape[0], 1))\n', (333, 352), True, 'import numpy as np\n')] |
import pandas as pd
from scipy.stats import ttest_ind
import numpy as np
from statsmodels.stats.multitest import multipletests as multit
from warnings import warn
def count_reps(inseries):
inseries = inseries.tolist()
counts = {k:0 for k in list(set(inseries))}
out = [878 for i in range(len(inseries))]
for ind, ite in enumerate(inseries):
out[ind] = counts[ite]
counts[ite] += 1
return out
from scipy.stats import mstats_basic
def interpret(ld, condition_column, strain_column, values_column, control_condition, out_prefix, circularity=None, set_missing_na=False):
'''
Interpret experimental data report produced by pyphe-analyse.
'''
###Check if essential columns exist
print('Checking input table')
print('Checking if axis_column exists')
if condition_column not in list(ld):
raise NameError('Axis_column not found in table.')
print('....OK')
print('Checking if grouping_column exists')
if strain_column not in list(ld):
raise NameError('grouping_column not found in table.')
print('....OK')
print('Checking if values_column exists')
if values_column not in list(ld):
raise NameError('values_column not found in table.')
print('....OK')
print('Checking if control exists in axis_column')
if control_condition not in ld[condition_column].unique():
raise NameError('control not found in axis_column.')
print('....OK')
if circularity:
print('Circularity filter is set. Checking if Colony_circularity column exists')
if 'Colony_circularity' not in list(ld):
raise NameError('Input data has no column named Colony_circularity. Cannot apply circularity filter.')
###Report some simple numbers
print('Data report loaded successfully')
initial_conditions = ld[condition_column].unique()
print('Number of unique elements in axis column: %i'%len(initial_conditions))
initial_strains = ld[strain_column].unique()
print('Number of unique elements in grouping column: %i'%len(initial_strains))
print('Number of plates: %i'%len(ld['Plate'].unique()))
print('Number of non-NA data points: %i'%len(ld.loc[~pd.isnull(ld[values_column])].index))
###Simple QC filters
n_datapoints = (~ld[values_column].isnull()).sum()
if circularity:
ld.loc[ld['Colony_circularity']<circularity, values_column] = np.nan
nn_datapoints = (~ld[values_column].isnull()).sum()
print('Removed %i entries with circularity < %f'%(n_datapoints-nn_datapoints, circularity))
n_datapoints = nn_datapoints
if set_missing_na:
ld.loc[ld[values_column]==0, values_column] = np.nan
nn_datapoints = (~ld[values_column].isnull()).sum()
print('Removed %i entries with fitness 0'%(n_datapoints-nn_datapoints))
n_datapoints = nn_datapoints
###Group by replicates
ld_stats = ld.copy()
#drop any NA
ld_stats = ld_stats.loc[~ld_stats[values_column].isnull()]
#Recompute number of axis and grouping elements
conditions = ld_stats[condition_column].unique()
print('Number of unique elements in axis column after filtering: %i'%len(conditions))
strains = ld_stats[strain_column].unique()
print('Number of unique elements in grouping column: %i'%len(strains))
ld_stats['condition---strain'] = ld_stats[condition_column] + '---' + ld_stats[strain_column]
ld_stats['rep'] = count_reps(ld_stats['condition---strain'])
#Pivot this into wide format
ld_stats_piv = ld_stats.pivot_table(index=strain_column, columns=[condition_column,'rep'], values=values_column)
#assert that there are no duplicates, i.e. that count_reps() worked as expected
assert (ld_stats.pivot_table(index=strain_column, columns=[condition_column,'rep'], values=values_column, aggfunc=len).unstack().dropna()==1.0).all()
#Save this table:
ld_stats_piv.to_csv(out_prefix+'_reps.csv')
###Compute summary stats
mean_fitness = ld_stats_piv.mean(axis=1, level=0)
median_fitness = ld_stats_piv.median(axis=1, level=0)
fitness_stdev = ld_stats_piv.std(axis=1, level=0)
obs_count = ld_stats_piv.count(axis=1, level=0)
#Compute effect sizes
median_effect_size = median_fitness.div(median_fitness[control_condition], axis=0)
mean_effect_size = mean_fitness.div(mean_fitness[control_condition], axis=0)
###run Welch's t-test
print('Running t-tests')
p_Welch = {}
b = ld_stats_piv.xs(control_condition,axis=1, level=0).values
b = np.ma.masked_invalid(b)
for co in conditions:
a = ld_stats_piv.xs(co, axis=1, level=0).values
a = np.ma.masked_invalid(a)
pvals_temp = mstats_basic.ttest_ind(a, b, axis=1, equal_var=False)[1].filled(np.nan)
p_Welch[co] = pd.Series(pvals_temp, index=ld_stats_piv.index)
p_Welch = pd.concat(p_Welch, axis=1)
#multiple testing correction by BH
p_Welch_BH = p_Welch.copy()
for c in p_Welch_BH:
if p_Welch_BH[c].isnull().all():
warn('No p-values obtained for %s (probably not enaough replicates)'%c)
else:
p_Welch_BH.loc[~p_Welch_BH[c].isnull(), c] = multit(p_Welch_BH.loc[~p_Welch_BH[c].isnull(), c], method='fdr_bh')[1]
#aggregate data in table and save
#And join together in one big data frame
combined_data = pd.concat({'mean_fitness' : mean_fitness,
'mean_fitness_log2' : mean_fitness.applymap(np.log2),
'median_fitness' : median_fitness,
'median_fitness_log2' : median_fitness.applymap(np.log2),
'mean_effect_size' : mean_effect_size,
'mean_effect_size_log2' : mean_effect_size.applymap(np.log2),
'median_effect_size' : median_effect_size,
'median_effect_size_log2' : median_effect_size.applymap(np.log2),
'observation_count' : obs_count,
'stdev_fitness' : fitness_stdev,
'p_Welch' : p_Welch,
'p_Welch_BH' : p_Welch_BH,
'p_Welch_BH_-log10' : -p_Welch_BH.applymap(np.log10)}, axis=1)
combined_data = combined_data.swaplevel(axis=1).sort_index(axis=1)
combined_data.to_csv(out_prefix+'_summaryStats.csv')
print('Interpretation completed and results saved.')
return combined_data
| [
"pandas.Series",
"pandas.isnull",
"scipy.stats.mstats_basic.ttest_ind",
"numpy.ma.masked_invalid",
"warnings.warn",
"pandas.concat"
] | [((4556, 4579), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['b'], {}), '(b)\n', (4576, 4579), True, 'import numpy as np\n'), ((4876, 4902), 'pandas.concat', 'pd.concat', (['p_Welch'], {'axis': '(1)'}), '(p_Welch, axis=1)\n', (4885, 4902), True, 'import pandas as pd\n'), ((4675, 4698), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['a'], {}), '(a)\n', (4695, 4698), True, 'import numpy as np\n'), ((4814, 4861), 'pandas.Series', 'pd.Series', (['pvals_temp'], {'index': 'ld_stats_piv.index'}), '(pvals_temp, index=ld_stats_piv.index)\n', (4823, 4861), True, 'import pandas as pd\n'), ((5052, 5125), 'warnings.warn', 'warn', (["('No p-values obtained for %s (probably not enaough replicates)' % c)"], {}), "('No p-values obtained for %s (probably not enaough replicates)' % c)\n", (5056, 5125), False, 'from warnings import warn\n'), ((4720, 4773), 'scipy.stats.mstats_basic.ttest_ind', 'mstats_basic.ttest_ind', (['a', 'b'], {'axis': '(1)', 'equal_var': '(False)'}), '(a, b, axis=1, equal_var=False)\n', (4742, 4773), False, 'from scipy.stats import mstats_basic\n'), ((2218, 2246), 'pandas.isnull', 'pd.isnull', (['ld[values_column]'], {}), '(ld[values_column])\n', (2227, 2246), True, 'import pandas as pd\n')] |
#==============================================================#
# This script classifies an instance of a connection #
#==============================================================#
import sys
import math
import pickle
import numpy as np
import pandas as pd
from sklearn import svm
from sklearn import preprocessing
from sklearn.covariance import EllipticEnvelope
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
#================================#
# Load the model #
#================================#
model = pickle.load(open("./elliptic_envelope.mlmodel", 'rb'))
n_features = 7
#===============================#
# Input connection string #
#===============================#
#conn = sys.argv[1]
#feature_values = conn.split(",")
#conn = "1198897,9,51,13,22,0,10043209,7,7,16,14,32,850,459,17,8,59,5,3,1,3,1,2,0,2,1,1,1,1,4,4,2,1,1,1,1,1"
#conn = "0,0,0,0,0,0,0,4,0,0,1043,1373,0,0,0,0,0,0,0,0,0,0,1,1,0,0,18,18,0,0,0,0,1"
#conn = "0,0,0,0,0,0,0,2,0,0,1746,14789,0,0,0,0,0,0,0,0,0,0,1,1,0,0,18,18,0,0,0,0,1"
#conn = "6,39,0,0,0,0,0,3,0,0,4,31,0,0,22,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,1,0,1"
#conn = "0,0,799,2291,0,0,1006,16,-1"
#conn = "0,1007,1235,0,18,18,1"
conn = "0,1234,4649,0,3,3,1"
#if(sys.argv[1] == 0):
if(len(sys.argv) == 1):
feature_values = conn.split(",")
if(len(feature_values) == n_features):
feature_values = [int(i) for i in feature_values]
df = pd.DataFrame(np.array(feature_values).reshape(1,n_features))
feature_values = df.values
output = model.predict(feature_values)
print(output)
else:
leg_count = 0
att_count = 0
file_to_classify = sys.argv[1]
with open(file_to_classify,'r') as fp:
for line in fp:
line = line.strip()
if("class" in line):
continue
conn = line
feature_values = conn.split(",")
if(len(feature_values) == n_features):
feature_values = [int(i) for i in feature_values]
df = pd.DataFrame(np.array(feature_values).reshape(1,n_features))
feature_values = df.values
output = model.predict(feature_values)
if(output[0] == 1):
leg_count = leg_count + 1
else:
att_count = att_count + 1
print("Classified connection " + line + " as " + str(output[0]))
print("Leg Count = " + str(leg_count))
print("Att Count = " + str(att_count))
| [
"numpy.array"
] | [((1571, 1595), 'numpy.array', 'np.array', (['feature_values'], {}), '(feature_values)\n', (1579, 1595), True, 'import numpy as np\n'), ((2178, 2202), 'numpy.array', 'np.array', (['feature_values'], {}), '(feature_values)\n', (2186, 2202), True, 'import numpy as np\n')] |
"""
TONAS Loader
.. admonition:: Dataset Info
:class: dropdown
This dataset contains a music collection of 72 sung excerpts representative of three a cappella singing styles
(Deblas, and two variants of Martinete). It has been developed within the COFLA research project context.
The distribution is as follows:
1. 16 Deblas
2. 36 Martinete 1
3. 20 Martinete 2
This collection was built in the context of a study on similarity and style classification of flamenco a cappella
singing styles (Tonas) by the flamenco expert Dr. <NAME>, Universidad de Sevilla.
We refer to (Mora et al. 2010) for a comprehensive description of the considered styles and their musical
characteristics. All 72 excerpts are monophonic, their average duration is 30 seconds and there is enough
variability for a proper evaluation of our methods, including a variety of singers, recording conditions,
presence of percussion, clapping, background voices and noise. We also provide manual melodic transcriptions,
generated by the COFLA team and <NAME>.
The annotations are represented by specifying the value (in this case, Notes and F0) at the related timestamps.
TONAS' note and F0 annotations also have "Energy" information, which refers to the average energy value through
all the frames in which a note or a F0 value is comprised.
Using this dataset:
TONAS dataset can be obtained upon request. Please refer to this link: https://zenodo.org/record/1290722 to
request access and follow the indications of the .download() method for a proper storing and organization
of the TONAS dataset.
Citing this dataset:
When TONAS is used for academic research, we would highly appreciate if scientific publications of works partly
based on the TONAS dataset quote the following publication:
- Music material: <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. (2010). Melodic
Characterization and Similarity in A Cappella Flamenco Cantes. 11th International Society for Music Information
Retrieval Conference (ISMIR 2010).
- Transcriptions: <NAME>., <NAME>. (in Press). Towards Computer-Assisted Flamenco Transcription: An
Experimental Comparison of Automatic Transcription Algorithms As Applied to A Cappella Singing.
Computer Music Journal.
"""
import csv
import logging
import os
from typing import TextIO, Tuple, Optional
from deprecated.sphinx import deprecated
import librosa
import numpy as np
from smart_open import open
from mirdata import annotations, jams_utils, core, io
BIBTEX = """
Music material:
@inproceedings{tonas_music,
author = {<NAME> <NAME> <NAME>
and <NAME> and <NAME>},
year = {2010},
month = {01},
pages = {351-356},
title = {Characterization and Similarity in A Cappella Flamenco Cantes.}
}
Transcriptions:
@inproceedings{tonas_annotations,
author = {E. {Gómez} and J. {Bonada}},
journal = {Computer Music Journal},
title = {Towards Computer-Assisted Flamenco Transcription: An Experimental
Comparison of Automatic Transcription Algorithms as Applied to A
Cappella Singing},
year = {2013},
volume = {37},
number = {2},
pages = {73-90},
doi = {10.1162/COMJ_a_00180}}
"""
INDEXES = {
"default": "1.0",
"test": "1.0",
"1.0": core.Index(filename="tonas_index_1.0.json"),
}
DOWNLOAD_INFO = """
PLEASE READ CAREFULLY ALL THE INFORMATION SO YOU DON'T MISS ANY STEP:
Unfortunately, the TONAS dataset is not available to be shared openly. However,
you can request access to the dataset in the following link, providing a brief
explanation of what your are going to use the dataset for:
==> https://zenodo.org/record/1290722
Then, unzip the dataset, change the dataset name to: "tonas" (with lowercase),
and locate it to {}. If you unzip it into a different path, please remember to set the
right data_home when initializing the dataset.
"""
LICENSE_INFO = """
The TONAS dataset is offered free of charge for internal non-commercial use only. You can not redistribute it nor
modify it. Dataset by COFLA team. Copyright © 2012 COFLA project, Universidad de Sevilla. Distribution rights granted
to Music Technology Group, Universitat Pompeu Fabra. All Rights Reserved.
"""
class Track(core.Track):
"""TONAS track class
Args:
track_id (str): track id of the track
data_home (str): Local path where the dataset is stored.
If `None`, looks for the data in the default directory, `~/mir_datasets/TONAS`
Attributes:
f0_path (str): local path where f0 melody annotation file is stored
notes_path(str): local path where notation annotation file is stored
audio_path(str): local path where audio file is stored
track_id (str): track id
singer (str): performing singer (cantaor)
title (str): title of the track song
tuning_frequency (float): tuning frequency of the symbolic notation
Cached Properties:
f0_automatic (F0Data): automatically extracted f0
f0_corrected (F0Data): manually corrected f0 annotations
notes (NoteData): annotated notes
"""
def __init__(
self,
track_id,
data_home,
dataset_name,
index,
metadata,
):
super().__init__(
track_id,
data_home,
dataset_name,
index,
metadata,
)
self.f0_path = self.get_path("f0")
self.notes_path = self.get_path("notes")
self.audio_path = self.get_path("audio")
@property
def style(self):
return self._track_metadata.get("style")
@property
def singer(self):
return self._track_metadata.get("singer")
@property
def title(self):
return self._track_metadata.get("title")
@property
def tuning_frequency(self):
return _load_tuning_frequency(self.notes_path)
@property
def audio(self) -> Tuple[np.ndarray, float]:
"""The track's audio
Returns:
* np.ndarray - audio signal
* float - sample rate
"""
return load_audio(self.audio_path)
@core.cached_property
def f0_automatic(self) -> Optional[annotations.F0Data]:
return load_f0(self.f0_path, False)
@core.cached_property
def f0_corrected(self) -> Optional[annotations.F0Data]:
return load_f0(self.f0_path, True)
@property
def f0(self):
logging.warning(
"Track.f0 is deprecated as of 0.3.4 and will be removed in a future version. Use"
" Track.f0_automatic or Track.f0_corrected"
)
return self.f0_corrected
@core.cached_property
def notes(self) -> Optional[annotations.NoteData]:
return load_notes(self.notes_path)
def to_jams(self):
"""Get the track's data in jams format
Returns:
jams.JAMS: the track's data in jams format
"""
return jams_utils.jams_converter(
audio_path=self.audio_path,
f0_data=[(self.f0, "pitch_contour")],
note_data=[(self.notes, "note_hz")],
metadata=self._track_metadata,
)
def load_audio(fhandle: str) -> Tuple[np.ndarray, float]:
"""Load a TONAS audio file.
Args:
fhandle (str): path to an audio file
Returns:
* np.ndarray - the mono audio signal
* float - The sample rate of the audio file
"""
return librosa.load(fhandle, sr=44100, mono=True)
# no decorator because of https://github.com/mir-dataset-loaders/mirdata/issues/503
def load_f0(fpath: str, corrected: bool) -> Optional[annotations.F0Data]:
"""Load TONAS f0 annotations
Args:
fpath (str): path pointing to f0 annotation file
corrected (bool): if True, loads manually corrected frequency values
otherwise, loads automatically extracted frequency values
Returns:
F0Data: predominant f0 melody
"""
times = []
freqs = []
freqs_corr = []
energies = []
with open(fpath, "r") as fhandle:
reader = np.genfromtxt(fhandle)
for line in reader:
times.append(float(line[0]))
freqs.append(float(line[2]))
freqs_corr.append(float(line[3]))
energies.append(float(line[1]))
freq_array = np.array(freqs_corr if corrected else freqs, dtype="float")
energy_array = np.array(energies, dtype="float")
voicing_array = (freq_array > 0).astype("float")
return annotations.F0Data(
np.array(times, dtype="float"),
"s",
freq_array,
"hz",
voicing_array,
"binary",
energy_array,
"energy",
)
@io.coerce_to_string_io
def load_notes(fhandle: TextIO) -> Optional[annotations.NoteData]:
"""Load TONAS note data from the annotation files
Args:
fhandle (str or file-like): path or file-like object pointing to a notes annotation file
Returns:
NoteData: note annotations
"""
intervals = []
pitches = []
energy = []
reader = csv.reader(fhandle, delimiter=",")
tuning = next(reader)[0]
for line in reader:
intervals.append([line[0], float(line[0]) + float(line[1])])
# Convert midi value to frequency
note_hz = _midi_to_hz(float(line[2]), float(tuning))
pitches.append(note_hz)
energy.append(float(line[3]))
note_data = annotations.NoteData(
np.array(intervals, dtype="float"),
"s",
np.array(pitches, dtype="float"),
"hz",
np.array(energy, dtype="float"),
"energy",
)
return note_data
@io.coerce_to_string_io
def _load_tuning_frequency(fhandle: TextIO) -> float:
"""Load tuning frequency of the track with re
Args:
fhandle (str or file-like): path or file-like object pointing to a notes annotation file
Returns:
tuning_frequency (float): returns new tuning frequency considering the deviation
"""
# Compute tuning frequency
cents_deviation = float(next(csv.reader(fhandle, delimiter=","))[0])
tuning_frequency = 440 * (
2 ** (cents_deviation / 1200)
) # Frequency of A (common value is 440Hz)
return tuning_frequency
def _midi_to_hz(midi_note, tuning_deviation):
"""Function to convert MIDI to Hz with certain tuning freq
Args:
midi_note (float): note represented in midi value
tuning_deviation (float): deviation in cents with respect to 440Hz
Returns:
(float): note in Hz considering the new tuning frequency
"""
tuning_frequency = 440 * (
2 ** (tuning_deviation / 1200)
) # Frequency of A (common value is 440Hz)
return (tuning_frequency / 32) * (2 ** ((midi_note - 9) / 12))
@core.docstring_inherit(core.Dataset)
class Dataset(core.Dataset):
"""
The TONAS dataset
"""
def __init__(self, data_home=None, version="default"):
super().__init__(
data_home,
version,
name="tonas",
track_class=Track,
bibtex=BIBTEX,
indexes=INDEXES,
download_info=DOWNLOAD_INFO,
license_info=LICENSE_INFO,
)
@core.cached_property
def _metadata(self):
metadata_path = os.path.join(self.data_home, "TONAS-Metadata.txt")
metadata = {}
try:
with open(metadata_path, "r", errors="ignore") as f:
reader = csv.reader(
(x.replace("\0", "") for x in f), delimiter="\t"
) # Fix wrong byte
for line in reader:
if line: # Do not consider empty lines
index = line[0].replace(".wav", "")
metadata[index] = {
"style": line[1],
"title": line[2],
"singer": line[3],
}
except FileNotFoundError:
raise FileNotFoundError("Metadata not found. Did you run .download()?")
return metadata
@deprecated(
reason="Use mirdata.datasets.tonas.load_audio",
version="0.3.4",
)
def load_audio(self, *args, **kwargs):
return load_audio(*args, **kwargs)
@deprecated(
reason="Use mirdata.datasets.tonas.load_f0",
version="0.3.4",
)
def load_f0(self, *args, **kwargs):
return load_f0(*args, **kwargs)
@deprecated(
reason="Use mirdata.datasets.tonas.load_notes",
version="0.3.4",
)
def load_notes(self, *args, **kwargs):
return load_notes(*args, **kwargs)
| [
"numpy.genfromtxt",
"logging.warning",
"mirdata.core.Index",
"smart_open.open",
"os.path.join",
"mirdata.core.docstring_inherit",
"numpy.array",
"deprecated.sphinx.deprecated",
"mirdata.jams_utils.jams_converter",
"csv.reader",
"librosa.load"
] | [((10911, 10947), 'mirdata.core.docstring_inherit', 'core.docstring_inherit', (['core.Dataset'], {}), '(core.Dataset)\n', (10933, 10947), False, 'from mirdata import annotations, jams_utils, core, io\n'), ((3345, 3388), 'mirdata.core.Index', 'core.Index', ([], {'filename': '"""tonas_index_1.0.json"""'}), "(filename='tonas_index_1.0.json')\n", (3355, 3388), False, 'from mirdata import annotations, jams_utils, core, io\n'), ((7583, 7625), 'librosa.load', 'librosa.load', (['fhandle'], {'sr': '(44100)', 'mono': '(True)'}), '(fhandle, sr=44100, mono=True)\n', (7595, 7625), False, 'import librosa\n'), ((8459, 8518), 'numpy.array', 'np.array', (['(freqs_corr if corrected else freqs)'], {'dtype': '"""float"""'}), "(freqs_corr if corrected else freqs, dtype='float')\n", (8467, 8518), True, 'import numpy as np\n'), ((8538, 8571), 'numpy.array', 'np.array', (['energies'], {'dtype': '"""float"""'}), "(energies, dtype='float')\n", (8546, 8571), True, 'import numpy as np\n'), ((9210, 9244), 'csv.reader', 'csv.reader', (['fhandle'], {'delimiter': '""","""'}), "(fhandle, delimiter=',')\n", (9220, 9244), False, 'import csv\n'), ((12232, 12307), 'deprecated.sphinx.deprecated', 'deprecated', ([], {'reason': '"""Use mirdata.datasets.tonas.load_audio"""', 'version': '"""0.3.4"""'}), "(reason='Use mirdata.datasets.tonas.load_audio', version='0.3.4')\n", (12242, 12307), False, 'from deprecated.sphinx import deprecated\n'), ((12423, 12495), 'deprecated.sphinx.deprecated', 'deprecated', ([], {'reason': '"""Use mirdata.datasets.tonas.load_f0"""', 'version': '"""0.3.4"""'}), "(reason='Use mirdata.datasets.tonas.load_f0', version='0.3.4')\n", (12433, 12495), False, 'from deprecated.sphinx import deprecated\n'), ((12605, 12680), 'deprecated.sphinx.deprecated', 'deprecated', ([], {'reason': '"""Use mirdata.datasets.tonas.load_notes"""', 'version': '"""0.3.4"""'}), "(reason='Use mirdata.datasets.tonas.load_notes', version='0.3.4')\n", (12615, 12680), False, 'from deprecated.sphinx import deprecated\n'), ((6578, 6727), 'logging.warning', 'logging.warning', (['"""Track.f0 is deprecated as of 0.3.4 and will be removed in a future version. Use Track.f0_automatic or Track.f0_corrected"""'], {}), "(\n 'Track.f0 is deprecated as of 0.3.4 and will be removed in a future version. Use Track.f0_automatic or Track.f0_corrected'\n )\n", (6593, 6727), False, 'import logging\n'), ((7085, 7253), 'mirdata.jams_utils.jams_converter', 'jams_utils.jams_converter', ([], {'audio_path': 'self.audio_path', 'f0_data': "[(self.f0, 'pitch_contour')]", 'note_data': "[(self.notes, 'note_hz')]", 'metadata': 'self._track_metadata'}), "(audio_path=self.audio_path, f0_data=[(self.f0,\n 'pitch_contour')], note_data=[(self.notes, 'note_hz')], metadata=self.\n _track_metadata)\n", (7110, 7253), False, 'from mirdata import annotations, jams_utils, core, io\n'), ((8172, 8188), 'smart_open.open', 'open', (['fpath', '"""r"""'], {}), "(fpath, 'r')\n", (8176, 8188), False, 'from smart_open import open\n'), ((8218, 8240), 'numpy.genfromtxt', 'np.genfromtxt', (['fhandle'], {}), '(fhandle)\n', (8231, 8240), True, 'import numpy as np\n'), ((8665, 8695), 'numpy.array', 'np.array', (['times'], {'dtype': '"""float"""'}), "(times, dtype='float')\n", (8673, 8695), True, 'import numpy as np\n'), ((9587, 9621), 'numpy.array', 'np.array', (['intervals'], {'dtype': '"""float"""'}), "(intervals, dtype='float')\n", (9595, 9621), True, 'import numpy as np\n'), ((9644, 9676), 'numpy.array', 'np.array', (['pitches'], {'dtype': '"""float"""'}), "(pitches, dtype='float')\n", (9652, 9676), True, 'import numpy as np\n'), ((9700, 9731), 'numpy.array', 'np.array', (['energy'], {'dtype': '"""float"""'}), "(energy, dtype='float')\n", (9708, 9731), True, 'import numpy as np\n'), ((11424, 11474), 'os.path.join', 'os.path.join', (['self.data_home', '"""TONAS-Metadata.txt"""'], {}), "(self.data_home, 'TONAS-Metadata.txt')\n", (11436, 11474), False, 'import os\n'), ((10194, 10228), 'csv.reader', 'csv.reader', (['fhandle'], {'delimiter': '""","""'}), "(fhandle, delimiter=',')\n", (10204, 10228), False, 'import csv\n'), ((11528, 11569), 'smart_open.open', 'open', (['metadata_path', '"""r"""'], {'errors': '"""ignore"""'}), "(metadata_path, 'r', errors='ignore')\n", (11532, 11569), False, 'from smart_open import open\n')] |
import os
import shutil
from functools import partial
import neptune
import numpy as np
import pandas as pd
from attrdict import AttrDict
from steppy.adapter import Adapter, E
from steppy.base import IdentityOperation, Step
from common_blocks import augmentation as aug
from common_blocks import metrics
from common_blocks import models
from common_blocks import pipelines
from common_blocks import postprocessing
from common_blocks.utils import io, misc
CTX = neptune.Context()
LOGGER = misc.init_logger()
# ______ ______ .__ __. _______ __ _______ _______.
# / | / __ \ | \ | | | ____|| | / _____| / |
# | ,----'| | | | | \| | | |__ | | | | __ | (----`
# | | | | | | | . ` | | __| | | | | |_ | \ \
# | `----.| `--' | | |\ | | | | | | |__| | .----) |
# \______| \______/ |__| \__| |__| |__| \______| |_______/
#
EXPERIMENT_DIR = '/output/experiment'
CLONE_EXPERIMENT_DIR_FROM = '' # When running eval in the cloud specify this as for example /input/SHIP-14/output/experiment
OVERWRITE_EXPERIMENT_DIR = False
DEV_MODE = False
USE_TTA = True
INFERENCE_WITH_SHIP_NO_SHIP = True
if OVERWRITE_EXPERIMENT_DIR and os.path.isdir(EXPERIMENT_DIR):
shutil.rmtree(EXPERIMENT_DIR)
if CLONE_EXPERIMENT_DIR_FROM != '':
if os.path.exists(EXPERIMENT_DIR):
shutil.rmtree(EXPERIMENT_DIR)
shutil.copytree(CLONE_EXPERIMENT_DIR_FROM, EXPERIMENT_DIR)
if CTX.params.__class__.__name__ == 'OfflineContextParams':
PARAMS = misc.read_yaml().parameters
else:
PARAMS = CTX.params
MEAN = [0.485, 0.456, 0.406]
STD = [0.229, 0.224, 0.225]
CHUNK_SIZE_SHIP_NO_SHIP = 2500
CHUNK_SIZE_SEGMENTATION = 2500
SEED = 1234
ID_COLUMN = 'id'
ID_BIG_IMAGE = 'BigImageId'
IS_NOT_EMPTY_COLUMN = 'is_not_empty'
X_COLUMN = 'file_path_image'
Y_COLUMN = 'file_path_mask'
CONFIG = AttrDict({
'execution': {'experiment_dir': EXPERIMENT_DIR,
'num_workers': PARAMS.num_workers,
'num_threads': PARAMS.num_threads
},
'general': {'img_H-W': (PARAMS.image_h, PARAMS.image_w),
'loader_mode': PARAMS.loader_mode,
'num_classes': 2,
'original_size': (768, 768),
},
'meta_reader': {
'segmentation_network': {'x_columns': [X_COLUMN],
'y_columns': [Y_COLUMN, IS_NOT_EMPTY_COLUMN],
},
'ship_no_ship_network': {'x_columns': [X_COLUMN],
'y_columns': [IS_NOT_EMPTY_COLUMN],
},
},
'loaders': {'resize': {'dataset_params': {'h': PARAMS.image_h,
'w': PARAMS.image_w,
'sns_h': PARAMS.sns_image_h,
'sns_w': PARAMS.sns_image_w,
'image_source': PARAMS.image_source,
'target_format': PARAMS.target_format,
'empty_fraction': PARAMS.training_sampler_empty_fraction,
'sample_size': PARAMS.training_sampler_size,
'sns_empty_fraction': PARAMS.sns_training_sampler_empty_fracion,
'MEAN': MEAN,
'STD': STD
},
'loader_params': {'training': {'batch_size': PARAMS.batch_size_train,
'shuffle': False,
'num_workers': PARAMS.num_workers,
'pin_memory': PARAMS.pin_memory
},
'inference': {'batch_size': PARAMS.batch_size_inference,
'shuffle': False,
'num_workers': PARAMS.num_workers,
'pin_memory': PARAMS.pin_memory
},
},
'augmentation_params': {'image_augment_train': aug.intensity_seq,
'image_augment_with_target_train': aug.resize_seq(
resize_target_size=PARAMS.resize_target_size),
'image_augment_inference': aug.resize_to_fit_net(
resize_target_size=PARAMS.resize_target_size),
'image_augment_with_target_inference': aug.resize_to_fit_net(
resize_target_size=PARAMS.resize_target_size)
},
},
'resize_tta': {'dataset_params': {'h': PARAMS.image_h,
'w': PARAMS.image_w,
'image_source': PARAMS.image_source,
'target_format': PARAMS.target_format,
'empty_fraction': PARAMS.training_sampler_empty_fraction,
'sample_size': PARAMS.training_sampler_size,
'MEAN': MEAN,
'STD': STD
},
'loader_params': {'training': {'batch_size': PARAMS.batch_size_train,
'shuffle': False,
'num_workers': PARAMS.num_workers,
'pin_memory': PARAMS.pin_memory
},
'inference': {'batch_size': PARAMS.batch_size_inference,
'shuffle': False,
'num_workers': PARAMS.num_workers,
'pin_memory': PARAMS.pin_memory
},
},
'augmentation_params': {
'image_augment_inference': aug.resize_to_fit_net(
resize_target_size=PARAMS.resize_target_size),
'image_augment_with_target_inference': aug.resize_to_fit_net(
resize_target_size=PARAMS.resize_target_size),
'tta_transform': aug.test_time_augmentation_transform
},
},
},
'model': {
'segmentation_network': {
'architecture_config': {'model_params': {'in_channels': PARAMS.image_channels,
'out_channels': PARAMS.network_output_channels,
'architecture': PARAMS.architecture,
'encoder': PARAMS.encoder,
'activation': PARAMS.network_activation,
},
'optimizer_params': {'lr': PARAMS.lr,
},
'regularizer_params': {'regularize': True,
'weight_decay_conv2d': PARAMS.l2_reg_conv,
},
'weights_init': {'function': 'xavier',
},
},
'training_config': {'epochs': PARAMS.epochs_nr,
'shuffle': True,
'batch_size': PARAMS.batch_size_train,
'fine_tuning': PARAMS.fine_tuning,
},
'callbacks_config': {'model_checkpoint': {
'filepath': os.path.join(EXPERIMENT_DIR, 'checkpoints', 'segmentation_network', 'best.torch'),
'epoch_every': 1,
'metric_name': PARAMS.validation_metric_name,
'minimize': PARAMS.minimize_validation_metric},
"one_cycle_scheduler": {
"enabled": PARAMS.use_one_cycle,
"number_of_batches_per_full_cycle": PARAMS.one_cycle_number_of_batches_per_full_cycle,
"max_lr": PARAMS.one_cycle_max_lr,
"momentum_range": (0.95, 0.8),
"prcnt_annihilate": 10,
"div": 10
},
'exponential_lr_scheduler': {'gamma': PARAMS.gamma,
'epoch_every': 1},
'reduce_lr_on_plateau_scheduler': {'metric_name': PARAMS.validation_metric_name,
'minimize': PARAMS.minimize_validation_metric,
'reduce_factor': PARAMS.reduce_factor,
'reduce_patience': PARAMS.reduce_patience,
'min_lr': PARAMS.min_lr},
'training_monitor': {'batch_every': 1,
'epoch_every': 1},
'experiment_timing': {'batch_every': 10,
'epoch_every': 1},
'validation_monitor': {'epoch_every': 1,
'data_dir': PARAMS.train_images_dir,
'loader_mode': PARAMS.loader_mode},
'neptune_monitor': {'model_name': 'network',
'image_nr': 16,
'image_resize': 1.0,
'image_every': 1},
'early_stopping': {'patience': PARAMS.patience,
'metric_name': PARAMS.validation_metric_name,
'minimize': PARAMS.minimize_validation_metric},
}
},
'ship_no_ship_network': {
'architecture_config': {'model_params': {'architecture': PARAMS.sns_architecture,
'activation': 'sigmoid'},
'optimizer_params': {'lr': PARAMS.sns_lr,
},
'regularizer_params': {'regularize': True,
'weight_decay_conv2d': PARAMS.sns_l2_reg_conv,
},
'weights_init': {'function': 'xavier',
},
},
'training_config': {'epochs': PARAMS.sns_epochs_nr,
'shuffle': True,
'batch_size': PARAMS.sns_batch_size_train,
'fine_tuning': PARAMS.fine_tuning,
},
'callbacks_config': {'model_checkpoint': {
'filepath': os.path.join(EXPERIMENT_DIR, 'checkpoints', 'ship_no_ship_network', 'best.torch'),
'epoch_every': 1,
'metric_name': PARAMS.sns_validation_metric_name,
'minimize': PARAMS.sns_minimize_validation_metric
},
"one_cycle_scheduler": {
"enabled": PARAMS.sns_use_one_cycle,
"number_of_batches_per_full_cycle": PARAMS.sns_one_cycle_number_of_batches_per_full_cycle,
"max_lr": PARAMS.sns_one_cycle_max_lr,
"momentum_range": (0.95, 0.7),
"prcnt_annihilate": 10,
"div": 10
},
'exponential_lr_scheduler': {'gamma': PARAMS.gamma,
'epoch_every': 1},
'reduce_lr_on_plateau_scheduler': {'metric_name': PARAMS.sns_validation_metric_name,
'minimize': PARAMS.sns_minimize_validation_metric,
'reduce_factor': PARAMS.reduce_factor,
'reduce_patience': PARAMS.reduce_patience,
'min_lr': PARAMS.min_lr},
'training_monitor': {'batch_every': 10,
'epoch_every': 1},
'experiment_timing': {'batch_every': 10,
'epoch_every': 1},
'validation_monitor': {'epoch_every': 1,
'data_dir': PARAMS.train_images_dir,
'loader_mode': PARAMS.loader_mode},
'neptune_monitor': {'model_name': 'network',
'image_nr': 16,
'image_resize': 1.0,
'image_every': 1},
'early_stopping': {'patience': PARAMS.patience,
'metric_name': PARAMS.validation_metric_name,
'minimize': PARAMS.minimize_validation_metric},
}
},
},
'tta_generator': {'flip_ud': True,
'flip_lr': True,
'rotation': True,
'color_shift_runs': False},
'tta_aggregator': {'tta_inverse_transform': aug.test_time_augmentation_inverse_transform,
'method': PARAMS.tta_aggregation_method,
'nthreads': PARAMS.num_threads
},
'thresholder': {'threshold_masks': PARAMS.threshold_masks,
'threshold_ship_no_ship': PARAMS.sns_threshold,
},
})
# .______ __ .______ _______ __ __ .__ __. _______ _______.
# | _ \ | | | _ \ | ____|| | | | | \ | | | ____| / |
# | |_) | | | | |_) | | |__ | | | | | \| | | |__ | (----`
# | ___/ | | | ___/ | __| | | | | | . ` | | __| \ \
# | | | | | | | |____ | `----.| | | |\ | | |____.----) |
# | _| |__| | _| |_______||_______||__| |__| \__| |_______|_______/
#
def ship_no_ship_pipeline(config, suffix='_ship_no_ship', train_mode=True):
if train_mode:
preprocessing = pipelines.preprocessing_binary_train(config, model_name='ship_no_ship_network', suffix=suffix)
else:
preprocessing = pipelines.preprocessing_binary_inference(config, model_name='ship_no_ship_network',
suffix=suffix)
preprocessing.set_parameters_upstream({'is_fittable': False})
sns_network = misc.FineTuneStep(name='ship_no_ship_network',
transformer=models.BinaryModel(**config.model['ship_no_ship_network']),
input_steps=[preprocessing],
)
sns_network.set_mode_train()
sns_network.set_parameters_upstream({'experiment_directory': config.execution.experiment_dir,
})
sns_network.force_fitting = False
sns_network.fine_tuning = config.model.segmentation_network.training_config.fine_tuning
if train_mode:
return sns_network
else:
class_prediction = Step(name='class_prediction',
transformer=misc.make_apply_transformer(
partial(postprocessing.get_class,
threshold=config.thresholder.threshold_ship_no_ship),
output_name='classes',
apply_on=['predictions']),
input_steps=[sns_network],
adapter=Adapter({'predictions': E(sns_network.name, 'ship_no_ship_prediction'),
}),
is_fittable=False
)
return class_prediction
def train_segmentation_pipeline(config):
preprocessing = pipelines.preprocessing_train(config, model_name='segmentation_network')
segmentation_network = misc.FineTuneStep(name='segmentation_network',
transformer=models.SegmentationModel(
**config.model['segmentation_network']),
input_data=['callback_input'],
input_steps=[preprocessing],
adapter=Adapter({'datagen': E(preprocessing.name, 'datagen'),
'validation_datagen': E(preprocessing.name,
'validation_datagen'),
'meta_valid': E('callback_input', 'meta_valid'),
}))
segmentation_network.set_mode_train()
segmentation_network.set_parameters_upstream({'experiment_directory': config.execution.experiment_dir,
})
segmentation_network.force_fitting = False
segmentation_network.fine_tuning = config.model.segmentation_network.training_config.fine_tuning
return segmentation_network
def inference_segmentation_pipeline(config):
if config.general.loader_mode == 'resize_and_pad':
size_adjustment_function = partial(postprocessing.crop_image, target_size=config.general.original_size)
elif config.general.loader_mode == 'resize' or config.general.loader_mode == 'stacking':
size_adjustment_function = partial(postprocessing.resize_image, target_size=config.general.original_size)
else:
raise NotImplementedError
if USE_TTA:
preprocessing, tta_generator = pipelines.preprocessing_inference_tta(config, model_name='segmentation_network')
segmentation_network = Step(name='segmentation_network',
transformer=models.SegmentationModel(**config.model['segmentation_network']),
input_steps=[preprocessing])
tta_aggregator = pipelines.aggregator('tta_aggregator', segmentation_network,
tta_generator=tta_generator,
config=config.tta_aggregator)
prediction_renamed = Step(name='prediction_renamed',
transformer=IdentityOperation(),
input_steps=[tta_aggregator],
adapter=Adapter({'mask_prediction': E(tta_aggregator.name, 'aggregated_prediction')
}))
mask_resize = Step(name='mask_resize',
transformer=misc.make_apply_transformer(size_adjustment_function,
output_name='resized_images',
apply_on=['images'],
n_threads=config.execution.num_threads,
),
input_steps=[prediction_renamed],
adapter=Adapter({'images': E(prediction_renamed.name, 'mask_prediction'),
}))
else:
preprocessing = pipelines.preprocessing_inference(config, model_name='segmentation_network')
segmentation_network = misc.FineTuneStep(name='segmentation_network',
transformer=models.SegmentationModel(
**config.model['segmentation_network']),
input_steps=[preprocessing],
)
mask_resize = Step(name='mask_resize',
transformer=misc.make_apply_transformer(size_adjustment_function,
output_name='resized_images',
apply_on=['images'],
n_threads=config.execution.num_threads,
),
input_steps=[segmentation_network],
adapter=Adapter({'images': E(segmentation_network.name, 'mask_prediction'),
}),
)
binarizer = Step(name='binarizer',
transformer=misc.make_apply_transformer(
partial(postprocessing.binarize, threshold=config.thresholder.threshold_masks),
output_name='binarized_images',
apply_on=['images'],
n_threads=config.execution.num_threads
),
input_steps=[mask_resize],
adapter=Adapter({'images': E(mask_resize.name, 'resized_images'),
}))
labeler = Step(name='labeler',
transformer=misc.make_apply_transformer(postprocessing.label,
output_name='labeled_images',
apply_on=['images'],
n_threads=config.execution.num_threads,
),
input_steps=[binarizer],
adapter=Adapter({'images': E(binarizer.name, 'binarized_images'),
}))
mask_postprocessing = Step(name='mask_postprocessing',
transformer=misc.make_apply_transformer(postprocessing.mask_postprocessing,
output_name='labeled_images',
apply_on=['images'],
n_threads=config.execution.num_threads,
),
input_steps=[labeler],
adapter=Adapter({'images': E(labeler.name, 'labeled_images'),
}))
mask_postprocessing.set_mode_inference()
mask_postprocessing.set_parameters_upstream({'experiment_directory': config.execution.experiment_dir,
'is_fittable': False
})
segmentation_network.is_fittable = True
return mask_postprocessing
# __________ ___ _______ ______ __ __ .___________. __ ______ .__ __.
# | ____\ \ / / | ____| / || | | | | || | / __ \ | \ | |
# | |__ \ V / | |__ | ,----'| | | | `---| |----`| | | | | | | \| |
# | __| > < | __| | | | | | | | | | | | | | | | . ` |
# | |____ / . \ | |____ | `----.| `--' | | | | | | `--' | | |\ |
# |_______/__/ \__\ |_______| \______| \______/ |__| |__| \______/ |__| \__|
#
def train_ship_no_ship():
meta = pd.read_csv(PARAMS.metadata_filepath)
meta_train = meta[meta['is_train'] == 1]
meta_train = add_big_image_id(meta_train)
meta_train_split, meta_valid_split = misc.train_test_split_with_empty_fraction_with_groups(meta_train,
groups=meta_train[
ID_BIG_IMAGE],
empty_fraction=PARAMS.evaluation_empty_fraction,
test_size=PARAMS.evaluation_size,
shuffle=True,
random_state=SEED)
meta_train_split = meta_train_split.sample(frac=1, random_state=SEED)
meta_valid_split = meta_valid_split.sample(frac=1, random_state=SEED)
if DEV_MODE:
meta_train_split = meta_train_split.sample(PARAMS.dev_mode_size, random_state=SEED)
meta_valid_split = meta_valid_split.sample(int(PARAMS.dev_mode_size / 2), random_state=SEED)
data = {'input': {'meta': meta_train_split
},
'callback_input': {'meta_valid': meta_valid_split
}
}
sns_pipe = ship_no_ship_pipeline(config=CONFIG, train_mode=True)
sns_pipe.fit_transform(data)
def train():
meta = pd.read_csv(PARAMS.metadata_filepath)
meta_train = meta[meta['is_train'] == 1]
meta_train = add_big_image_id(meta_train)
meta_train_split, meta_valid_split = misc.train_test_split_with_empty_fraction_with_groups(meta_train,
groups=meta_train[
ID_BIG_IMAGE],
empty_fraction=PARAMS.evaluation_empty_fraction,
test_size=PARAMS.evaluation_size,
shuffle=True,
random_state=SEED)
meta_valid_split = meta_valid_split[meta_valid_split[IS_NOT_EMPTY_COLUMN] == 1].sample(
PARAMS.in_train_evaluation_size, random_state=SEED)
if DEV_MODE:
meta_train_split = meta_train_split.sample(PARAMS.dev_mode_size, random_state=SEED)
meta_valid_split = meta_valid_split.sample(int(PARAMS.dev_mode_size / 2), random_state=SEED)
data = {'input': {'meta': meta_train_split
},
'callback_input': {'meta_valid': meta_valid_split
}
}
pipeline = train_segmentation_pipeline(config=CONFIG)
pipeline.fit_transform(data)
def evaluate():
meta = pd.read_csv(PARAMS.metadata_filepath)
meta_train = meta[meta['is_train'] == 1]
meta_train = add_big_image_id(meta_train)
_, meta_valid_split = misc.train_test_split_with_empty_fraction_with_groups(meta_train,
groups=meta_train[ID_BIG_IMAGE],
empty_fraction=PARAMS.evaluation_empty_fraction,
test_size=PARAMS.evaluation_size,
shuffle=True, random_state=SEED)
if DEV_MODE:
_, meta_valid_split = misc.train_test_split_with_empty_fraction_with_groups(meta_valid_split,
groups=meta_valid_split[
ID_BIG_IMAGE],
empty_fraction=PARAMS.evaluation_empty_fraction,
test_size=PARAMS.evaluation_size,
shuffle=True, random_state=SEED)
segm_pipe = inference_segmentation_pipeline(config=CONFIG)
valid_ids = meta_valid_split[ID_COLUMN] + '.jpg'
if INFERENCE_WITH_SHIP_NO_SHIP:
sns_pipe = ship_no_ship_pipeline(config=CONFIG, train_mode=False)
ids_ship, ids_no_ship = predict_ship_no_ship(meta_valid_split, sns_pipe, CHUNK_SIZE_SHIP_NO_SHIP)
meta_valid_ship = meta_valid_split[valid_ids.isin(ids_ship)]
prediction_ship = generate_submission(meta_valid_ship, segm_pipe, CHUNK_SIZE_SEGMENTATION)
prediction = misc.combine_two_stage_predictions(ids_no_ship, prediction_ship, valid_ids)
else:
prediction = generate_submission(meta_valid_split, segm_pipe, CHUNK_SIZE_SEGMENTATION)
gt = io.read_gt_subset(PARAMS.annotation_file, valid_ids)
f2_per_image, image_ids = metrics.f_beta_metric(gt, prediction, beta=2, apply_mean=False)
f2 = np.mean(f2_per_image)
LOGGER.info('f2 {}'.format(f2))
CTX.channel_send('f2', 0, f2)
LOGGER.info('preparing results')
results = misc.prepare_results(gt, prediction, meta_valid_split, f2_per_image, image_ids)
results_filepath = os.path.join(EXPERIMENT_DIR, 'validation_results.csv')
results.to_csv(results_filepath, index=None)
def predict():
meta = pd.read_csv(PARAMS.metadata_filepath)
meta_test = meta[meta['is_train'] == 0]
if DEV_MODE:
meta_test = meta_test.sample(PARAMS.dev_mode_size, random_state=SEED)
segm_pipe = inference_segmentation_pipeline(config=CONFIG)
test_ids = meta_test[ID_COLUMN] + '.jpg'
if INFERENCE_WITH_SHIP_NO_SHIP:
sns_pipe = ship_no_ship_pipeline(config=CONFIG, train_mode=False)
ids_ship, ids_no_ship = predict_ship_no_ship(meta_test, sns_pipe, CHUNK_SIZE_SHIP_NO_SHIP)
meta_test_ship = meta_test[test_ids.isin(ids_ship)]
prediction_ship = generate_submission(meta_test_ship, segm_pipe, CHUNK_SIZE_SEGMENTATION)
submission = misc.combine_two_stage_predictions(ids_no_ship, prediction_ship, test_ids)
else:
submission = generate_submission(meta_test, segm_pipe, CHUNK_SIZE_SEGMENTATION)
submission_filepath = os.path.join(EXPERIMENT_DIR, 'submission.csv')
submission.to_csv(submission_filepath, index=None, encoding='utf-8')
LOGGER.info('submission saved to {}'.format(submission_filepath))
LOGGER.info('submission head \n\n{}'.format(submission.head()))
# __ __ .___________. __ __ _______.
# | | | | | || | | | / |
# | | | | `---| |----`| | | | | (----`
# | | | | | | | | | | \ \
# | `--' | | | | | | `----.----) |
# \______/ |__| |__| |_______|_______/
#
def add_big_image_id(meta):
big_image_ids = pd.read_csv('big-images-ids_v2.csv')
meta['ImageId'] = meta[ID_COLUMN] + '.jpg'
meta_joined = pd.merge(meta, big_image_ids, on='ImageId')
return meta_joined
def generate_submission(meta_data, pipeline, chunk_size):
if chunk_size is not None:
return _generate_submission_in_chunks(meta_data, pipeline, chunk_size)
else:
return _generate_submission(meta_data, pipeline)
def _generate_submission(meta_data, pipeline):
prediction = _generate_prediction(meta_data, pipeline)
submission = misc.create_submission(meta_data[ID_COLUMN] + '.jpg', prediction)
return submission
def _generate_submission_in_chunks(meta_data, pipeline, chunk_size):
submissions = []
for meta_chunk in misc.generate_data_frame_chunks(meta_data, chunk_size):
prediction_chunk = _generate_prediction(meta_chunk, pipeline)
submission_chunk = misc.create_submission(meta_chunk[ID_COLUMN] + '.jpg', prediction_chunk)
submissions.append(submission_chunk)
submission = pd.concat(submissions)
return submission
def _generate_prediction(meta_data, pipeline):
data = {'input': {'meta': meta_data,
},
'callback_input': {'meta_valid': None
}
}
output = pipeline.transform(data)
y_pred = output['labeled_images']
return y_pred
def predict_ship_no_ship(meta_data, pipeline, chunk_size):
if chunk_size is not None:
return _predict_ship_no_ship_in_chunks(meta_data, pipeline, chunk_size)
else:
return _predict_ship_no_ship(meta_data, pipeline)
def _predict_ship_no_ship(meta_data, pipeline):
prediction = _generate_prediction_ship_no_ship(meta_data, pipeline)
ids_ship, ids_no_ship = misc.get_ship_no_ship_ids(meta_data[ID_COLUMN] + '.jpg', prediction)
return ids_ship, ids_no_ship
def _predict_ship_no_ship_in_chunks(meta_data, pipeline, chunk_size):
ids_ship, ids_no_ship = [], []
for meta_chunk in misc.generate_data_frame_chunks(meta_data, chunk_size):
prediction_chunk = _generate_prediction_ship_no_ship(meta_chunk, pipeline)
ids_ship_chunk, ids_no_ship_chunk = misc.get_ship_no_ship_ids(meta_chunk[ID_COLUMN] + '.jpg', prediction_chunk)
ids_ship.extend(ids_ship_chunk)
ids_no_ship.extend(ids_no_ship_chunk)
return ids_ship, ids_no_ship
def _generate_prediction_ship_no_ship(meta_data, pipeline):
data = {'input': {'meta': meta_data,
},
'callback_input': {'meta_valid': None
}
}
output = pipeline.transform(data)
y_pred = output['classes']
return y_pred
# .___ ___. ___ __ .__ __.
# | \/ | / \ | | | \ | |
# | \ / | / ^ \ | | | \| |
# | |\/| | / /_\ \ | | | . ` |
# | | | | / _____ \ | | | |\ |
# |__| |__| /__/ \__\ |__| |__| \__|
#
if __name__ == '__main__':
train_ship_no_ship()
train()
evaluate()
predict()
| [
"pandas.read_csv",
"steppy.adapter.E",
"common_blocks.utils.misc.generate_data_frame_chunks",
"common_blocks.augmentation.resize_to_fit_net",
"common_blocks.metrics.f_beta_metric",
"os.path.exists",
"numpy.mean",
"common_blocks.utils.misc.read_yaml",
"common_blocks.models.BinaryModel",
"common_blo... | [((464, 481), 'neptune.Context', 'neptune.Context', ([], {}), '()\n', (479, 481), False, 'import neptune\n'), ((491, 509), 'common_blocks.utils.misc.init_logger', 'misc.init_logger', ([], {}), '()\n', (507, 509), False, 'from common_blocks.utils import io, misc\n'), ((1227, 1256), 'os.path.isdir', 'os.path.isdir', (['EXPERIMENT_DIR'], {}), '(EXPERIMENT_DIR)\n', (1240, 1256), False, 'import os\n'), ((1262, 1291), 'shutil.rmtree', 'shutil.rmtree', (['EXPERIMENT_DIR'], {}), '(EXPERIMENT_DIR)\n', (1275, 1291), False, 'import shutil\n'), ((1335, 1365), 'os.path.exists', 'os.path.exists', (['EXPERIMENT_DIR'], {}), '(EXPERIMENT_DIR)\n', (1349, 1365), False, 'import os\n'), ((1409, 1467), 'shutil.copytree', 'shutil.copytree', (['CLONE_EXPERIMENT_DIR_FROM', 'EXPERIMENT_DIR'], {}), '(CLONE_EXPERIMENT_DIR_FROM, EXPERIMENT_DIR)\n', (1424, 1467), False, 'import shutil\n'), ((17421, 17493), 'common_blocks.pipelines.preprocessing_train', 'pipelines.preprocessing_train', (['config'], {'model_name': '"""segmentation_network"""'}), "(config, model_name='segmentation_network')\n", (17450, 17493), False, 'from common_blocks import pipelines\n'), ((25046, 25083), 'pandas.read_csv', 'pd.read_csv', (['PARAMS.metadata_filepath'], {}), '(PARAMS.metadata_filepath)\n', (25057, 25083), True, 'import pandas as pd\n'), ((25217, 25446), 'common_blocks.utils.misc.train_test_split_with_empty_fraction_with_groups', 'misc.train_test_split_with_empty_fraction_with_groups', (['meta_train'], {'groups': 'meta_train[ID_BIG_IMAGE]', 'empty_fraction': 'PARAMS.evaluation_empty_fraction', 'test_size': 'PARAMS.evaluation_size', 'shuffle': '(True)', 'random_state': 'SEED'}), '(meta_train, groups=\n meta_train[ID_BIG_IMAGE], empty_fraction=PARAMS.\n evaluation_empty_fraction, test_size=PARAMS.evaluation_size, shuffle=\n True, random_state=SEED)\n', (25270, 25446), False, 'from common_blocks.utils import io, misc\n'), ((26678, 26715), 'pandas.read_csv', 'pd.read_csv', (['PARAMS.metadata_filepath'], {}), '(PARAMS.metadata_filepath)\n', (26689, 26715), True, 'import pandas as pd\n'), ((26849, 27078), 'common_blocks.utils.misc.train_test_split_with_empty_fraction_with_groups', 'misc.train_test_split_with_empty_fraction_with_groups', (['meta_train'], {'groups': 'meta_train[ID_BIG_IMAGE]', 'empty_fraction': 'PARAMS.evaluation_empty_fraction', 'test_size': 'PARAMS.evaluation_size', 'shuffle': '(True)', 'random_state': 'SEED'}), '(meta_train, groups=\n meta_train[ID_BIG_IMAGE], empty_fraction=PARAMS.\n evaluation_empty_fraction, test_size=PARAMS.evaluation_size, shuffle=\n True, random_state=SEED)\n', (26902, 27078), False, 'from common_blocks.utils import io, misc\n'), ((28306, 28343), 'pandas.read_csv', 'pd.read_csv', (['PARAMS.metadata_filepath'], {}), '(PARAMS.metadata_filepath)\n', (28317, 28343), True, 'import pandas as pd\n'), ((28462, 28691), 'common_blocks.utils.misc.train_test_split_with_empty_fraction_with_groups', 'misc.train_test_split_with_empty_fraction_with_groups', (['meta_train'], {'groups': 'meta_train[ID_BIG_IMAGE]', 'empty_fraction': 'PARAMS.evaluation_empty_fraction', 'test_size': 'PARAMS.evaluation_size', 'shuffle': '(True)', 'random_state': 'SEED'}), '(meta_train, groups=\n meta_train[ID_BIG_IMAGE], empty_fraction=PARAMS.\n evaluation_empty_fraction, test_size=PARAMS.evaluation_size, shuffle=\n True, random_state=SEED)\n', (28515, 28691), False, 'from common_blocks.utils import io, misc\n'), ((30411, 30463), 'common_blocks.utils.io.read_gt_subset', 'io.read_gt_subset', (['PARAMS.annotation_file', 'valid_ids'], {}), '(PARAMS.annotation_file, valid_ids)\n', (30428, 30463), False, 'from common_blocks.utils import io, misc\n'), ((30494, 30557), 'common_blocks.metrics.f_beta_metric', 'metrics.f_beta_metric', (['gt', 'prediction'], {'beta': '(2)', 'apply_mean': '(False)'}), '(gt, prediction, beta=2, apply_mean=False)\n', (30515, 30557), False, 'from common_blocks import metrics\n'), ((30567, 30588), 'numpy.mean', 'np.mean', (['f2_per_image'], {}), '(f2_per_image)\n', (30574, 30588), True, 'import numpy as np\n'), ((30711, 30790), 'common_blocks.utils.misc.prepare_results', 'misc.prepare_results', (['gt', 'prediction', 'meta_valid_split', 'f2_per_image', 'image_ids'], {}), '(gt, prediction, meta_valid_split, f2_per_image, image_ids)\n', (30731, 30790), False, 'from common_blocks.utils import io, misc\n'), ((30814, 30868), 'os.path.join', 'os.path.join', (['EXPERIMENT_DIR', '"""validation_results.csv"""'], {}), "(EXPERIMENT_DIR, 'validation_results.csv')\n", (30826, 30868), False, 'import os\n'), ((30946, 30983), 'pandas.read_csv', 'pd.read_csv', (['PARAMS.metadata_filepath'], {}), '(PARAMS.metadata_filepath)\n', (30957, 30983), True, 'import pandas as pd\n'), ((31822, 31868), 'os.path.join', 'os.path.join', (['EXPERIMENT_DIR', '"""submission.csv"""'], {}), "(EXPERIMENT_DIR, 'submission.csv')\n", (31834, 31868), False, 'import os\n'), ((32447, 32483), 'pandas.read_csv', 'pd.read_csv', (['"""big-images-ids_v2.csv"""'], {}), "('big-images-ids_v2.csv')\n", (32458, 32483), True, 'import pandas as pd\n'), ((32549, 32592), 'pandas.merge', 'pd.merge', (['meta', 'big_image_ids'], {'on': '"""ImageId"""'}), "(meta, big_image_ids, on='ImageId')\n", (32557, 32592), True, 'import pandas as pd\n'), ((32978, 33043), 'common_blocks.utils.misc.create_submission', 'misc.create_submission', (["(meta_data[ID_COLUMN] + '.jpg')", 'prediction'], {}), "(meta_data[ID_COLUMN] + '.jpg', prediction)\n", (33000, 33043), False, 'from common_blocks.utils import io, misc\n'), ((33180, 33234), 'common_blocks.utils.misc.generate_data_frame_chunks', 'misc.generate_data_frame_chunks', (['meta_data', 'chunk_size'], {}), '(meta_data, chunk_size)\n', (33211, 33234), False, 'from common_blocks.utils import io, misc\n'), ((33468, 33490), 'pandas.concat', 'pd.concat', (['submissions'], {}), '(submissions)\n', (33477, 33490), True, 'import pandas as pd\n'), ((34209, 34277), 'common_blocks.utils.misc.get_ship_no_ship_ids', 'misc.get_ship_no_ship_ids', (["(meta_data[ID_COLUMN] + '.jpg')", 'prediction'], {}), "(meta_data[ID_COLUMN] + '.jpg', prediction)\n", (34234, 34277), False, 'from common_blocks.utils import io, misc\n'), ((34440, 34494), 'common_blocks.utils.misc.generate_data_frame_chunks', 'misc.generate_data_frame_chunks', (['meta_data', 'chunk_size'], {}), '(meta_data, chunk_size)\n', (34471, 34494), False, 'from common_blocks.utils import io, misc\n'), ((1375, 1404), 'shutil.rmtree', 'shutil.rmtree', (['EXPERIMENT_DIR'], {}), '(EXPERIMENT_DIR)\n', (1388, 1404), False, 'import shutil\n'), ((1542, 1558), 'common_blocks.utils.misc.read_yaml', 'misc.read_yaml', ([], {}), '()\n', (1556, 1558), False, 'from common_blocks.utils import io, misc\n'), ((15599, 15698), 'common_blocks.pipelines.preprocessing_binary_train', 'pipelines.preprocessing_binary_train', (['config'], {'model_name': '"""ship_no_ship_network"""', 'suffix': 'suffix'}), "(config, model_name=\n 'ship_no_ship_network', suffix=suffix)\n", (15635, 15698), False, 'from common_blocks import pipelines\n'), ((15728, 15831), 'common_blocks.pipelines.preprocessing_binary_inference', 'pipelines.preprocessing_binary_inference', (['config'], {'model_name': '"""ship_no_ship_network"""', 'suffix': 'suffix'}), "(config, model_name=\n 'ship_no_ship_network', suffix=suffix)\n", (15768, 15831), False, 'from common_blocks import pipelines\n'), ((18911, 18987), 'functools.partial', 'partial', (['postprocessing.crop_image'], {'target_size': 'config.general.original_size'}), '(postprocessing.crop_image, target_size=config.general.original_size)\n', (18918, 18987), False, 'from functools import partial\n'), ((19295, 19380), 'common_blocks.pipelines.preprocessing_inference_tta', 'pipelines.preprocessing_inference_tta', (['config'], {'model_name': '"""segmentation_network"""'}), "(config, model_name='segmentation_network'\n )\n", (19332, 19380), False, 'from common_blocks import pipelines\n'), ((19647, 19771), 'common_blocks.pipelines.aggregator', 'pipelines.aggregator', (['"""tta_aggregator"""', 'segmentation_network'], {'tta_generator': 'tta_generator', 'config': 'config.tta_aggregator'}), "('tta_aggregator', segmentation_network, tta_generator=\n tta_generator, config=config.tta_aggregator)\n", (19667, 19771), False, 'from common_blocks import pipelines\n'), ((20974, 21050), 'common_blocks.pipelines.preprocessing_inference', 'pipelines.preprocessing_inference', (['config'], {'model_name': '"""segmentation_network"""'}), "(config, model_name='segmentation_network')\n", (21007, 21050), False, 'from common_blocks import pipelines\n'), ((29045, 29285), 'common_blocks.utils.misc.train_test_split_with_empty_fraction_with_groups', 'misc.train_test_split_with_empty_fraction_with_groups', (['meta_valid_split'], {'groups': 'meta_valid_split[ID_BIG_IMAGE]', 'empty_fraction': 'PARAMS.evaluation_empty_fraction', 'test_size': 'PARAMS.evaluation_size', 'shuffle': '(True)', 'random_state': 'SEED'}), '(meta_valid_split,\n groups=meta_valid_split[ID_BIG_IMAGE], empty_fraction=PARAMS.\n evaluation_empty_fraction, test_size=PARAMS.evaluation_size, shuffle=\n True, random_state=SEED)\n', (29098, 29285), False, 'from common_blocks.utils import io, misc\n'), ((30220, 30295), 'common_blocks.utils.misc.combine_two_stage_predictions', 'misc.combine_two_stage_predictions', (['ids_no_ship', 'prediction_ship', 'valid_ids'], {}), '(ids_no_ship, prediction_ship, valid_ids)\n', (30254, 30295), False, 'from common_blocks.utils import io, misc\n'), ((31622, 31696), 'common_blocks.utils.misc.combine_two_stage_predictions', 'misc.combine_two_stage_predictions', (['ids_no_ship', 'prediction_ship', 'test_ids'], {}), '(ids_no_ship, prediction_ship, test_ids)\n', (31656, 31696), False, 'from common_blocks.utils import io, misc\n'), ((33333, 33405), 'common_blocks.utils.misc.create_submission', 'misc.create_submission', (["(meta_chunk[ID_COLUMN] + '.jpg')", 'prediction_chunk'], {}), "(meta_chunk[ID_COLUMN] + '.jpg', prediction_chunk)\n", (33355, 33405), False, 'from common_blocks.utils import io, misc\n'), ((34623, 34698), 'common_blocks.utils.misc.get_ship_no_ship_ids', 'misc.get_ship_no_ship_ids', (["(meta_chunk[ID_COLUMN] + '.jpg')", 'prediction_chunk'], {}), "(meta_chunk[ID_COLUMN] + '.jpg', prediction_chunk)\n", (34648, 34698), False, 'from common_blocks.utils import io, misc\n'), ((16072, 16130), 'common_blocks.models.BinaryModel', 'models.BinaryModel', ([], {}), "(**config.model['ship_no_ship_network'])\n", (16090, 16130), False, 'from common_blocks import models\n'), ((17626, 17690), 'common_blocks.models.SegmentationModel', 'models.SegmentationModel', ([], {}), "(**config.model['segmentation_network'])\n", (17650, 17690), False, 'from common_blocks import models\n'), ((19116, 19194), 'functools.partial', 'partial', (['postprocessing.resize_image'], {'target_size': 'config.general.original_size'}), '(postprocessing.resize_image, target_size=config.general.original_size)\n', (19123, 19194), False, 'from functools import partial\n'), ((22829, 22979), 'common_blocks.utils.misc.make_apply_transformer', 'misc.make_apply_transformer', (['postprocessing.label'], {'output_name': '"""labeled_images"""', 'apply_on': "['images']", 'n_threads': 'config.execution.num_threads'}), "(postprocessing.label, output_name=\n 'labeled_images', apply_on=['images'], n_threads=config.execution.\n num_threads)\n", (22856, 22979), False, 'from common_blocks.utils import io, misc\n'), ((23480, 23644), 'common_blocks.utils.misc.make_apply_transformer', 'misc.make_apply_transformer', (['postprocessing.mask_postprocessing'], {'output_name': '"""labeled_images"""', 'apply_on': "['images']", 'n_threads': 'config.execution.num_threads'}), "(postprocessing.mask_postprocessing, output_name\n ='labeled_images', apply_on=['images'], n_threads=config.execution.\n num_threads)\n", (23507, 23644), False, 'from common_blocks.utils import io, misc\n'), ((19490, 19554), 'common_blocks.models.SegmentationModel', 'models.SegmentationModel', ([], {}), "(**config.model['segmentation_network'])\n", (19514, 19554), False, 'from common_blocks import models\n'), ((19968, 19987), 'steppy.base.IdentityOperation', 'IdentityOperation', ([], {}), '()\n', (19985, 19987), False, 'from steppy.base import IdentityOperation, Step\n'), ((20313, 20467), 'common_blocks.utils.misc.make_apply_transformer', 'misc.make_apply_transformer', (['size_adjustment_function'], {'output_name': '"""resized_images"""', 'apply_on': "['images']", 'n_threads': 'config.execution.num_threads'}), "(size_adjustment_function, output_name=\n 'resized_images', apply_on=['images'], n_threads=config.execution.\n num_threads)\n", (20340, 20467), False, 'from common_blocks.utils import io, misc\n'), ((21191, 21255), 'common_blocks.models.SegmentationModel', 'models.SegmentationModel', ([], {}), "(**config.model['segmentation_network'])\n", (21215, 21255), False, 'from common_blocks import models\n'), ((21528, 21682), 'common_blocks.utils.misc.make_apply_transformer', 'misc.make_apply_transformer', (['size_adjustment_function'], {'output_name': '"""resized_images"""', 'apply_on': "['images']", 'n_threads': 'config.execution.num_threads'}), "(size_adjustment_function, output_name=\n 'resized_images', apply_on=['images'], n_threads=config.execution.\n num_threads)\n", (21555, 21682), False, 'from common_blocks.utils import io, misc\n'), ((22314, 22392), 'functools.partial', 'partial', (['postprocessing.binarize'], {'threshold': 'config.thresholder.threshold_masks'}), '(postprocessing.binarize, threshold=config.thresholder.threshold_masks)\n', (22321, 22392), False, 'from functools import partial\n'), ((4640, 4700), 'common_blocks.augmentation.resize_seq', 'aug.resize_seq', ([], {'resize_target_size': 'PARAMS.resize_target_size'}), '(resize_target_size=PARAMS.resize_target_size)\n', (4654, 4700), True, 'from common_blocks import augmentation as aug\n'), ((4836, 4903), 'common_blocks.augmentation.resize_to_fit_net', 'aug.resize_to_fit_net', ([], {'resize_target_size': 'PARAMS.resize_target_size'}), '(resize_target_size=PARAMS.resize_target_size)\n', (4857, 4903), True, 'from common_blocks import augmentation as aug\n'), ((5051, 5118), 'common_blocks.augmentation.resize_to_fit_net', 'aug.resize_to_fit_net', ([], {'resize_target_size': 'PARAMS.resize_target_size'}), '(resize_target_size=PARAMS.resize_target_size)\n', (5072, 5118), True, 'from common_blocks import augmentation as aug\n'), ((7012, 7079), 'common_blocks.augmentation.resize_to_fit_net', 'aug.resize_to_fit_net', ([], {'resize_target_size': 'PARAMS.resize_target_size'}), '(resize_target_size=PARAMS.resize_target_size)\n', (7033, 7079), True, 'from common_blocks import augmentation as aug\n'), ((7195, 7262), 'common_blocks.augmentation.resize_to_fit_net', 'aug.resize_to_fit_net', ([], {'resize_target_size': 'PARAMS.resize_target_size'}), '(resize_target_size=PARAMS.resize_target_size)\n', (7216, 7262), True, 'from common_blocks import augmentation as aug\n'), ((16764, 16855), 'functools.partial', 'partial', (['postprocessing.get_class'], {'threshold': 'config.thresholder.threshold_ship_no_ship'}), '(postprocessing.get_class, threshold=config.thresholder.\n threshold_ship_no_ship)\n', (16771, 16855), False, 'from functools import partial\n'), ((17965, 17997), 'steppy.adapter.E', 'E', (['preprocessing.name', '"""datagen"""'], {}), "(preprocessing.name, 'datagen')\n", (17966, 17997), False, 'from steppy.adapter import Adapter, E\n'), ((18083, 18126), 'steppy.adapter.E', 'E', (['preprocessing.name', '"""validation_datagen"""'], {}), "(preprocessing.name, 'validation_datagen')\n", (18084, 18126), False, 'from steppy.adapter import Adapter, E\n'), ((18290, 18323), 'steppy.adapter.E', 'E', (['"""callback_input"""', '"""meta_valid"""'], {}), "('callback_input', 'meta_valid')\n", (18291, 18323), False, 'from steppy.adapter import Adapter, E\n'), ((22681, 22718), 'steppy.adapter.E', 'E', (['mask_resize.name', '"""resized_images"""'], {}), "(mask_resize.name, 'resized_images')\n", (22682, 22718), False, 'from steppy.adapter import Adapter, E\n'), ((23299, 23336), 'steppy.adapter.E', 'E', (['binarizer.name', '"""binarized_images"""'], {}), "(binarizer.name, 'binarized_images')\n", (23300, 23336), False, 'from steppy.adapter import Adapter, E\n'), ((24034, 24067), 'steppy.adapter.E', 'E', (['labeler.name', '"""labeled_images"""'], {}), "(labeler.name, 'labeled_images')\n", (24035, 24067), False, 'from steppy.adapter import Adapter, E\n'), ((8953, 9038), 'os.path.join', 'os.path.join', (['EXPERIMENT_DIR', '"""checkpoints"""', '"""segmentation_network"""', '"""best.torch"""'], {}), "(EXPERIMENT_DIR, 'checkpoints', 'segmentation_network',\n 'best.torch')\n", (8965, 9038), False, 'import os\n'), ((12232, 12317), 'os.path.join', 'os.path.join', (['EXPERIMENT_DIR', '"""checkpoints"""', '"""ship_no_ship_network"""', '"""best.torch"""'], {}), "(EXPERIMENT_DIR, 'checkpoints', 'ship_no_ship_network',\n 'best.torch')\n", (12244, 12317), False, 'import os\n'), ((17141, 17187), 'steppy.adapter.E', 'E', (['sns_network.name', '"""ship_no_ship_prediction"""'], {}), "(sns_network.name, 'ship_no_ship_prediction')\n", (17142, 17187), False, 'from steppy.adapter import Adapter, E\n'), ((20123, 20170), 'steppy.adapter.E', 'E', (['tta_aggregator.name', '"""aggregated_prediction"""'], {}), "(tta_aggregator.name, 'aggregated_prediction')\n", (20124, 20170), False, 'from steppy.adapter import Adapter, E\n'), ((20844, 20889), 'steppy.adapter.E', 'E', (['prediction_renamed.name', '"""mask_prediction"""'], {}), "(prediction_renamed.name, 'mask_prediction')\n", (20845, 20889), False, 'from steppy.adapter import Adapter, E\n'), ((22061, 22108), 'steppy.adapter.E', 'E', (['segmentation_network.name', '"""mask_prediction"""'], {}), "(segmentation_network.name, 'mask_prediction')\n", (22062, 22108), False, 'from steppy.adapter import Adapter, E\n')] |
# @Author: <NAME> <narsi>
# @Date: 2020-01-18T20:22:17-06:00
# @Email: <EMAIL>
# @Last modified by: narsi
# @Last modified time: 2020-01-18T20:24:43-06:00
import numpy as np
import json
from PIL import Image
from .msssim import MultiScaleSSIM
from tqdm import tqdm
def evaluate2(submission_images, target_images, settings={}):
"""
Calculates metrics for the given images.
"""
if settings is None:
settings = {}
metrics = settings.get('metrics', ['PSNR', 'MSSSIM'])
num_dims = 0
sqerror_values = []
msssim_values = []
for img1, img0 in tqdm(zip(submission_images, target_images)):
image0 = np.asarray(Image.open(img0).convert('RGB'), dtype=np.float32)
image1 = np.asarray(Image.open(img1).convert('RGB'), dtype=np.float32)
num_dims += image0.size
if 'PSNR' in metrics:
sqerror_values.append(mse(image1, image0))
if 'MSSSIM' in metrics:
msssim_values.append(msssim(image0, image1) * image0.size)
results = {}
if 'PSNR' in metrics:
results['PSNR'] = mse2psnr(np.sum(sqerror_values) / num_dims)
if 'MSSSIM' in metrics:
results['MSSSIM'] = np.sum(msssim_values) / num_dims
return results
def evaluate(submission_images, target_images, settings={}):
"""
Calculates metrics for the given images.
"""
if settings is None:
settings = {}
metrics = settings.get('metrics', ['PSNR', 'MSSSIM'])
num_dims = 0
sqerror_values = []
msssim_values = []
for name in target_images:
image0 = np.asarray(Image.open(target_images[name]).convert('RGB'), dtype=np.float32)
image1 = np.asarray(Image.open(submission_images[name]).convert('RGB'), dtype=np.float32)
num_dims += image0.size
if 'PSNR' in metrics:
sqerror_values.append(mse(image1, image0))
if 'MSSSIM' in metrics:
msssim_values.append(msssim(image0, image1) * image0.size)
results = {}
if 'PSNR' in metrics:
results['PSNR'] = mse2psnr(np.sum(sqerror_values) / num_dims)
if 'MSSSIM' in metrics:
results['MSSSIM'] = np.sum(msssim_values) / num_dims
return results
def mse(image0, image1):
return np.sum(np.square(image1 - image0))
def mse2psnr(mse):
return 20. * np.log10(255.) - 10. * np.log10(mse)
def msssim(image0, image1):
return MultiScaleSSIM(image0[None], image1[None])
| [
"numpy.sum",
"numpy.log10",
"PIL.Image.open",
"numpy.square"
] | [((2032, 2058), 'numpy.square', 'np.square', (['(image1 - image0)'], {}), '(image1 - image0)\n', (2041, 2058), True, 'import numpy as np\n'), ((1082, 1103), 'numpy.sum', 'np.sum', (['msssim_values'], {}), '(msssim_values)\n', (1088, 1103), True, 'import numpy as np\n'), ((1940, 1961), 'numpy.sum', 'np.sum', (['msssim_values'], {}), '(msssim_values)\n', (1946, 1961), True, 'import numpy as np\n'), ((2095, 2110), 'numpy.log10', 'np.log10', (['(255.0)'], {}), '(255.0)\n', (2103, 2110), True, 'import numpy as np\n'), ((2118, 2131), 'numpy.log10', 'np.log10', (['mse'], {}), '(mse)\n', (2126, 2131), True, 'import numpy as np\n'), ((1000, 1022), 'numpy.sum', 'np.sum', (['sqerror_values'], {}), '(sqerror_values)\n', (1006, 1022), True, 'import numpy as np\n'), ((1858, 1880), 'numpy.sum', 'np.sum', (['sqerror_values'], {}), '(sqerror_values)\n', (1864, 1880), True, 'import numpy as np\n'), ((622, 638), 'PIL.Image.open', 'Image.open', (['img0'], {}), '(img0)\n', (632, 638), False, 'from PIL import Image\n'), ((695, 711), 'PIL.Image.open', 'Image.open', (['img1'], {}), '(img1)\n', (705, 711), False, 'from PIL import Image\n'), ((1446, 1477), 'PIL.Image.open', 'Image.open', (['target_images[name]'], {}), '(target_images[name])\n', (1456, 1477), False, 'from PIL import Image\n'), ((1534, 1569), 'PIL.Image.open', 'Image.open', (['submission_images[name]'], {}), '(submission_images[name])\n', (1544, 1569), False, 'from PIL import Image\n')] |
#
# This file is part of the chi repository
# (https://github.com/DavAug/chi/) which is released under the
# BSD 3-clause license. See accompanying LICENSE.md for copyright notice and
# full license details.
#
import unittest
import numpy as np
from chi import plots
from chi.library import DataLibrary
class TestResidualPlot(unittest.TestCase):
"""
Tests the chi.plots.ResidualPlot class.
"""
@classmethod
def setUpClass(cls):
# Create test datasets
cls.measurements = DataLibrary().lung_cancer_control_group()
cls.data = cls.measurements.rename(
columns={'Measurement': 'Sample'})
# Create test figure
cls.fig = plots.ResidualPlot(cls.measurements)
def test_bad_instantiation(self):
# Create data of wrong type
measurements = np.ones(shape=(10, 4))
with self.assertRaisesRegex(TypeError, 'Measurements has to be'):
plots.ResidualPlot(measurements)
# Wrong ID key
with self.assertRaisesRegex(ValueError, 'Measurements does not have'):
plots.ResidualPlot(self.measurements, id_key='Wrong')
# Wrong time key
with self.assertRaisesRegex(ValueError, 'Measurements does not have'):
plots.ResidualPlot(self.measurements, time_key='Wrong')
# Wrong biomarker key
with self.assertRaisesRegex(ValueError, 'Measurements does not have'):
plots.ResidualPlot(self.measurements, biom_key='Wrong')
# Wrong measurement key
with self.assertRaisesRegex(ValueError, 'Measurements does not have'):
plots.ResidualPlot(self.measurements, meas_key='Wrong')
def test_add_data_wrong_data_type(self):
# Create data of wrong type
data = np.ones(shape=(10, 4))
with self.assertRaisesRegex(TypeError, 'Data has to be'):
self.fig.add_data(data)
def test_add_data_wrong_biomarker(self):
# Biomarker does not exist in prediction dataframe
biomarker = 'Does not exist'
with self.assertRaisesRegex(ValueError, 'The biomarker could not be'):
self.fig.add_data(self.data, biomarker)
# Biomarker does not exist in measurement dataframe
data = self.data.copy()
data['Biomarker'] = 'Does not exist'
biomarker = 'Does not exist'
with self.assertRaisesRegex(ValueError, 'The biomarker <Does not'):
self.fig.add_data(data, biomarker)
def test_add_data_wrong_individual(self):
individual = 'does not exist'
self.assertRaisesRegex(
ValueError, 'The ID <does not exist> does not exist.',
self.fig.add_data, self.data, individual=individual)
def test_add_data_wrong_time_key(self):
# Rename time key
data = self.data.rename(columns={'Time': 'SOME NON-STANDARD KEY'})
self.assertRaisesRegex(
ValueError, 'Data does not have the key <Time>.',
self.fig.add_data, data)
def test_add_data_wrong_biom_key(self):
# Rename biomarker key
data = self.data.rename(columns={'Biomarker': 'SOME NON-STANDARD KEY'})
self.assertRaisesRegex(
ValueError, 'Data does not have the key <Biomarker>.',
self.fig.add_data, data)
def test_add_data_wrong_sample_key(self):
# Rename measurement key
data = self.data.rename(
columns={'Sample': 'SOME NON-STANDARD KEY'})
self.assertRaisesRegex(
ValueError, 'Data does not have the key <Sample>.',
self.fig.add_data, data)
def test_add_data_time_key_mapping(self):
# Rename time key
data = self.data.rename(columns={'Time': 'SOME NON-STANDARD KEY'})
# Test that it works with correct mapping
self.fig.add_data(
data, time_key='SOME NON-STANDARD KEY')
# Test that it fails with wrong mapping
with self.assertRaisesRegex(
ValueError, 'Data does not have the key <SOME WRONG KEY>.'):
self.fig.add_data(
data, time_key='SOME WRONG KEY')
def test_add_data_biom_key_mapping(self):
# Rename biomarker key
data = self.data.rename(columns={'Biomarker': 'SOME NON-STANDARD KEY'})
# Test that it works with correct mapping
self.fig.add_data(
data, biom_key='SOME NON-STANDARD KEY')
# Test that it fails with wrong mapping
with self.assertRaisesRegex(
ValueError, 'Data does not have the key <SOME WRONG KEY>.'):
self.fig.add_data(
data, biom_key='SOME WRONG KEY')
def test_add_data_sample_key_mapping(self):
# Rename measurement key
data = self.data.rename(
columns={'Sample': 'SOME NON-STANDARD KEY'})
# Test that it works with correct mapping
self.fig.add_data(
data, sample_key='SOME NON-STANDARD KEY')
# Test that it fails with wrong mapping
with self.assertRaisesRegex(
ValueError, 'Data does not have the key <SOME WRONG KEY>.'):
self.fig.add_data(
data, sample_key='SOME WRONG KEY')
def test_add_data_show_relative(self):
self.fig.add_data(self.data, show_relative=True)
def test_data_wrong_time_points(self):
# Not all measured time points can be found in the prediction
# dataframe
data = self.data.copy()
data['Time'] = 1
with self.assertRaisesRegex(
ValueError, 'The prediction dataframe is not'):
self.fig.add_data(data)
def test_add_data_individual(self):
# Select an individual
self.fig.add_data(self.data, individual=40)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"chi.plots.ResidualPlot",
"chi.library.DataLibrary",
"numpy.ones"
] | [((5779, 5794), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5792, 5794), False, 'import unittest\n'), ((693, 729), 'chi.plots.ResidualPlot', 'plots.ResidualPlot', (['cls.measurements'], {}), '(cls.measurements)\n', (711, 729), False, 'from chi import plots\n'), ((828, 850), 'numpy.ones', 'np.ones', ([], {'shape': '(10, 4)'}), '(shape=(10, 4))\n', (835, 850), True, 'import numpy as np\n'), ((1767, 1789), 'numpy.ones', 'np.ones', ([], {'shape': '(10, 4)'}), '(shape=(10, 4))\n', (1774, 1789), True, 'import numpy as np\n'), ((937, 969), 'chi.plots.ResidualPlot', 'plots.ResidualPlot', (['measurements'], {}), '(measurements)\n', (955, 969), False, 'from chi import plots\n'), ((1085, 1138), 'chi.plots.ResidualPlot', 'plots.ResidualPlot', (['self.measurements'], {'id_key': '"""Wrong"""'}), "(self.measurements, id_key='Wrong')\n", (1103, 1138), False, 'from chi import plots\n'), ((1256, 1311), 'chi.plots.ResidualPlot', 'plots.ResidualPlot', (['self.measurements'], {'time_key': '"""Wrong"""'}), "(self.measurements, time_key='Wrong')\n", (1274, 1311), False, 'from chi import plots\n'), ((1434, 1489), 'chi.plots.ResidualPlot', 'plots.ResidualPlot', (['self.measurements'], {'biom_key': '"""Wrong"""'}), "(self.measurements, biom_key='Wrong')\n", (1452, 1489), False, 'from chi import plots\n'), ((1614, 1669), 'chi.plots.ResidualPlot', 'plots.ResidualPlot', (['self.measurements'], {'meas_key': '"""Wrong"""'}), "(self.measurements, meas_key='Wrong')\n", (1632, 1669), False, 'from chi import plots\n'), ((512, 525), 'chi.library.DataLibrary', 'DataLibrary', ([], {}), '()\n', (523, 525), False, 'from chi.library import DataLibrary\n')] |
"""
This data implement model's metric
:paper author: hxq
:code author: hxq
:code convert: shy
"""
import math
from sklearn import metrics
import numpy as np
import bottleneck as bn
def ndcg_binary_at_k_batch(x_pred, heldout_batch, k=100):
"""
normalized discounted cumulative gain@k for binary relevance
ASSUMPTIONS: all the 0's in heldout_data indicate 0 relevance
"""
batch_users = x_pred.shape[0]
idx_topk_part = bn.argpartition(-x_pred, k, axis=1) # shape和x_pred一样
topk_part = x_pred[np.arange(batch_users)[:, np.newaxis],
idx_topk_part[:, :k]] # 所有用户的topk item同时挑出来
idx_part = np.argsort(-topk_part, axis=1)
# X_pred[np.arange(batch_users)[:, np.newaxis], idx_topk] is the sorted
# topk predicted score
idx_topk = idx_topk_part[np.arange(batch_users)[:, np.newaxis], idx_part]
# build the discount template
tset_pre = 1. / np.log2(np.arange(2, k + 2))
dcg = (heldout_batch[np.arange(batch_users)[:, np.newaxis],
idx_topk].toarray() * tset_pre).sum(axis=1)
idcg = np.array([(tset_pre[:min(n, k)]).sum()
for n in heldout_batch.getnnz(axis=1)])
ndcg = dcg / idcg
ndcg[np.isnan(ndcg)] = 0
return ndcg
def precision_recall_at_k_batch(x_pred, heldout_batch, k=100,
observe_fair=False, attr_indicator_list=None):
"""[summary]
Args:
x_pred ([type]): [description]
heldout_batch ([type]): [description]
k (int, optional): [description]. Defaults to 100.
observe_fair (bool, optional): [description]. Defaults to False.
attr_indicator_list ([type], optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
print(observe_fair, attr_indicator_list)
batch_users = x_pred.shape[0]
idx = bn.argpartition(-x_pred, k, axis=1)
x_pred_binary = np.zeros_like(x_pred, dtype=bool)
x_pred_binary[np.arange(batch_users)[:, np.newaxis], idx[:, :k]] = True
x_true_binary = (heldout_batch > 0).toarray()
# ranked_list = x_pred_binary
tmp = (np.logical_and(x_true_binary, x_pred_binary).sum(axis=1)).astype(
np.float32)
recall = tmp / x_true_binary.sum(axis=1)
precision = tmp / k
precision[np.isnan(precision)] = 0
recall[np.isnan(recall)] = 0
f1_recall = 2 * recall * precision / (precision + recall)
f1_recall[np.isnan(f1_recall)] = 0
return precision, recall, f1_recall
def update_threshold(x_pred, id_onehots_ph, threshold_ph, k=100):
"""[summary]
Args:
x_pred ([type]): [description]
id_onehots_ph ([type]): [description]
threshold_ph ([type]): [description]
k (int, optional): [description]. Defaults to 100.
Returns:
[type]: [description]
"""
batch_users = x_pred.shape[0]
idx = bn.argpartition(-x_pred, k, axis=1)
#epsion = 1e-10
#threshold_ph_batch = x_pred[:, idx[:, k]]-epsion
#print('shape(threshold_ph_batch)', threshold_ph_batch.shape)
threshold_ph[np.nonzero(id_onehots_ph)[1]] = \
x_pred[np.arange(batch_users), idx[:, k]].reshape(-1, 1)
#threshold_ph = np.dot(threshold_ph.T, id_onehots_ph.toarray())
return threshold_ph
def average_precision(ranked_list, ground_truth):
"""Compute the average precision (AP) of a list of ranked items
"""
hits = 0
sum_precs = 0
for index in range(len(ranked_list)):
if ranked_list[index] in ground_truth:
hits += 1
sum_precs += hits / (index + 1.0)
if hits > 0:
return sum_precs / len(ground_truth)
return 0
def hit(gt_items, pred_items): # HR为所有用户的hits/所有用户的grounf truth总个数
"""[summary]
Args:
gt_items ([type]): [description]
pred_items ([type]): [description]
Returns:
[type]: [description]
"""
count = 0
for item in pred_items:
if item in gt_items:
count += 1
return count
def auc(label, prob): # prob 为预测为正的概率
"""[summary]
Args:
label ([type]): [description]
prob ([type]): [description]
Returns:
[type]: [description]
"""
precision, recall, thresholds = metrics.precision_recall_curve(label, prob)
print(thresholds)
area = metrics.auc(recall, precision)
return area
# sklearn
# precision, recall, _thresholds = metrics.precision_recall_curve(label, prob)
# area = metrics.auc(recall, precision)
# return area
# area = metrics.roc_auc_score(label, prob)
# return area
def hit_precision_recall_ndcg_k(train_set_batch, test_set_batch,
pred_scores_batch, max_train_count,
k=20, ranked_tag=False, vad_set_batch=None):
"""[summary]
Args:
train_set_batch ([type]): [description]
test_set_batch ([type]): [description]
pred_scores_batch ([type]): [description]
max_train_count ([type]): [description]
k (int, optional): [description]. Defaults to 20.
ranked_tag (bool, optional): [description]. Defaults to False.
vad_set_batch ([type], optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
recall_k, precision_k, ndcg_k, hits_list = [], [], [], []
if not ranked_tag:
batch_users = pred_scores_batch.shape[0]
idx_topk_part = bn.argpartition(-pred_scores_batch, k+max_train_count, axis=1)
topk_part = pred_scores_batch[np.arange(batch_users)[:, np.newaxis],
idx_topk_part[:, :(k+max_train_count)]]
idx_part = np.argsort(-topk_part, axis=1)
top_items = idx_topk_part[np.arange(batch_users)[:, np.newaxis], idx_part]
else:
top_items = pred_scores_batch
if vad_set_batch is None:
for train_set, test_set, ranked in zip(train_set_batch, test_set_batch, top_items):
n_k = k if len(test_set) > k else len(test_set) # n_k = min(k, len(test_k))
n_idcg, n_dcg = 0, 0
for pos in range(n_k):
n_idcg += 1.0 / math.log(pos + 2, 2)
tops_sub_train = []
n_top_items = 0
for val in ranked:
if val not in train_set:
tops_sub_train.append(val)
n_top_items += 1
if n_top_items >= k: # 控制topK个item是从用户没交互过的商品中选的
break
hits_set = [(idx, item_id) for idx, item_id in enumerate(tops_sub_train) if item_id in test_set]
cnt_hits = len(hits_set)
for idx in range(cnt_hits):
n_dcg += 1.0 / math.log(hits_set[idx][0] + 2, 2)
precision_k.append(float(cnt_hits / k))
recall_k.append(float(cnt_hits / len(test_set)))
ndcg_k.append(float(n_dcg / n_idcg))
hits_list.append(cnt_hits)
else:
hits_list, precision_k, recall_k, ndcg_k = \
calc_second(train_set_batch, test_set_batch, top_items, vad_set_batch, k)
return hits_list, precision_k, recall_k, ndcg_k
def calc_second(train_set_batch, test_set_batch, top_items, vad_set_batch, k):
"""[summary]
Args:
train_set_batch ([type]): [description]
test_set_batch ([type]): [description]
top_items ([type]): [description]
vad_set_batch ([type]): [description]
k ([type]): [description]
Returns:
[type]: [description]
"""
recall_k, precision_k, ndcg_k, hits_list = [], [], [], []
for train_set, test_set, ranked, vad_set in \
zip(train_set_batch, test_set_batch, top_items, vad_set_batch):
n_k = k if len(test_set) > k else len(test_set) # n_k = min(k, len(test_k))
n_idcg, n_dcg = 0, 0
for pos in range(n_k):
n_idcg += 1.0 / math.log(pos + 2, 2)
tops_sub_train = []
n_top_items = 0
for val in ranked:
if val not in train_set and val not in vad_set:
tops_sub_train.append(val)
n_top_items += 1
if n_top_items >= k: # 控制topK个item是从用户没交互过的商品中选的
break
hits_set = [(idx, item_id) for idx, item_id in \
enumerate(tops_sub_train) if item_id in test_set]
cnt_hits = len(hits_set)
for idx in range(cnt_hits):
n_dcg += 1.0 /math.log(hits_set[idx][0] + 2, 2)
precision_k.append(float(cnt_hits / k))
recall_k.append(float(cnt_hits / len(test_set)))
ndcg_k.append(float(n_dcg / n_idcg))
hits_list.append(cnt_hits)
return hits_list, precision_k, recall_k, ndcg_k
| [
"numpy.logical_and",
"numpy.arange",
"sklearn.metrics.auc",
"sklearn.metrics.precision_recall_curve",
"math.log",
"numpy.argsort",
"numpy.isnan",
"numpy.nonzero",
"numpy.zeros_like",
"bottleneck.argpartition"
] | [((444, 479), 'bottleneck.argpartition', 'bn.argpartition', (['(-x_pred)', 'k'], {'axis': '(1)'}), '(-x_pred, k, axis=1)\n', (459, 479), True, 'import bottleneck as bn\n'), ((641, 671), 'numpy.argsort', 'np.argsort', (['(-topk_part)'], {'axis': '(1)'}), '(-topk_part, axis=1)\n', (651, 671), True, 'import numpy as np\n'), ((1859, 1894), 'bottleneck.argpartition', 'bn.argpartition', (['(-x_pred)', 'k'], {'axis': '(1)'}), '(-x_pred, k, axis=1)\n', (1874, 1894), True, 'import bottleneck as bn\n'), ((1915, 1948), 'numpy.zeros_like', 'np.zeros_like', (['x_pred'], {'dtype': 'bool'}), '(x_pred, dtype=bool)\n', (1928, 1948), True, 'import numpy as np\n'), ((2872, 2907), 'bottleneck.argpartition', 'bn.argpartition', (['(-x_pred)', 'k'], {'axis': '(1)'}), '(-x_pred, k, axis=1)\n', (2887, 2907), True, 'import bottleneck as bn\n'), ((4219, 4262), 'sklearn.metrics.precision_recall_curve', 'metrics.precision_recall_curve', (['label', 'prob'], {}), '(label, prob)\n', (4249, 4262), False, 'from sklearn import metrics\n'), ((4296, 4326), 'sklearn.metrics.auc', 'metrics.auc', (['recall', 'precision'], {}), '(recall, precision)\n', (4307, 4326), False, 'from sklearn import metrics\n'), ((1212, 1226), 'numpy.isnan', 'np.isnan', (['ndcg'], {}), '(ndcg)\n', (1220, 1226), True, 'import numpy as np\n'), ((2292, 2311), 'numpy.isnan', 'np.isnan', (['precision'], {}), '(precision)\n', (2300, 2311), True, 'import numpy as np\n'), ((2328, 2344), 'numpy.isnan', 'np.isnan', (['recall'], {}), '(recall)\n', (2336, 2344), True, 'import numpy as np\n'), ((2426, 2445), 'numpy.isnan', 'np.isnan', (['f1_recall'], {}), '(f1_recall)\n', (2434, 2445), True, 'import numpy as np\n'), ((5393, 5457), 'bottleneck.argpartition', 'bn.argpartition', (['(-pred_scores_batch)', '(k + max_train_count)'], {'axis': '(1)'}), '(-pred_scores_batch, k + max_train_count, axis=1)\n', (5408, 5457), True, 'import bottleneck as bn\n'), ((5630, 5660), 'numpy.argsort', 'np.argsort', (['(-topk_part)'], {'axis': '(1)'}), '(-topk_part, axis=1)\n', (5640, 5660), True, 'import numpy as np\n'), ((915, 934), 'numpy.arange', 'np.arange', (['(2)', '(k + 2)'], {}), '(2, k + 2)\n', (924, 934), True, 'import numpy as np\n'), ((3065, 3090), 'numpy.nonzero', 'np.nonzero', (['id_onehots_ph'], {}), '(id_onehots_ph)\n', (3075, 3090), True, 'import numpy as np\n'), ((520, 542), 'numpy.arange', 'np.arange', (['batch_users'], {}), '(batch_users)\n', (529, 542), True, 'import numpy as np\n'), ((804, 826), 'numpy.arange', 'np.arange', (['batch_users'], {}), '(batch_users)\n', (813, 826), True, 'import numpy as np\n'), ((1967, 1989), 'numpy.arange', 'np.arange', (['batch_users'], {}), '(batch_users)\n', (1976, 1989), True, 'import numpy as np\n'), ((7822, 7842), 'math.log', 'math.log', (['(pos + 2)', '(2)'], {}), '(pos + 2, 2)\n', (7830, 7842), False, 'import math\n'), ((8363, 8396), 'math.log', 'math.log', (['(hits_set[idx][0] + 2)', '(2)'], {}), '(hits_set[idx][0] + 2, 2)\n', (8371, 8396), False, 'import math\n'), ((2123, 2167), 'numpy.logical_and', 'np.logical_and', (['x_true_binary', 'x_pred_binary'], {}), '(x_true_binary, x_pred_binary)\n', (2137, 2167), True, 'import numpy as np\n'), ((3110, 3132), 'numpy.arange', 'np.arange', (['batch_users'], {}), '(batch_users)\n', (3119, 3132), True, 'import numpy as np\n'), ((5494, 5516), 'numpy.arange', 'np.arange', (['batch_users'], {}), '(batch_users)\n', (5503, 5516), True, 'import numpy as np\n'), ((5695, 5717), 'numpy.arange', 'np.arange', (['batch_users'], {}), '(batch_users)\n', (5704, 5717), True, 'import numpy as np\n'), ((6105, 6125), 'math.log', 'math.log', (['(pos + 2)', '(2)'], {}), '(pos + 2, 2)\n', (6113, 6125), False, 'import math\n'), ((6650, 6683), 'math.log', 'math.log', (['(hits_set[idx][0] + 2)', '(2)'], {}), '(hits_set[idx][0] + 2, 2)\n', (6658, 6683), False, 'import math\n'), ((962, 984), 'numpy.arange', 'np.arange', (['batch_users'], {}), '(batch_users)\n', (971, 984), True, 'import numpy as np\n')] |
"""
Function for working with patches from tensors.
See the :doc:`tutorials/patches` tutorial for more details.
"""
from typing import Iterable
import numpy as np
from .shape_ops import crop_to_box
from .axes import broadcast_to_axes, fill_by_indices, AxesLike
from .box import make_box_, Box
from dpipe.itertools import zip_equal, peek
from .shape_utils import shape_after_full_convolution
from .utils import build_slices
__all__ = 'get_boxes', 'divide', 'combine'
def get_boxes(shape: AxesLike, box_size: AxesLike, stride: AxesLike, axes: AxesLike = None,
valid: bool = True) -> Iterable[Box]:
"""
Yield boxes appropriate for a tensor of shape ``shape`` in a convolution-like fashion.
Parameters
----------
shape
the input tensor's shape.
box_size
axes
axes along which the slices will be taken.
stride
the stride (step-size) of the slice.
valid
whether boxes of size smaller than ``box_size`` should be left out.
References
----------
See the :doc:`tutorials/patches` tutorial for more details.
"""
final_shape = shape_after_full_convolution(shape, box_size, axes, stride, valid=valid)
box_size, stride = np.broadcast_arrays(box_size, stride)
full_box = fill_by_indices(shape, box_size, axes)
full_stride = fill_by_indices(np.ones_like(shape), stride, axes)
for start in np.ndindex(*final_shape):
start = np.asarray(start) * full_stride
yield make_box_([start, np.minimum(start + full_box, shape)])
def divide(x: np.ndarray, patch_size: AxesLike, stride: AxesLike, axes: AxesLike = None,
valid: bool = False) -> Iterable[np.ndarray]:
"""
A convolution-like approach to generating patches from a tensor.
Parameters
----------
x
patch_size
axes
dimensions along which the slices will be taken.
stride
the stride (step-size) of the slice.
valid
whether patches of size smaller than ``patch_size`` should be left out.
References
----------
See the :doc:`tutorials/patches` tutorial for more details.
"""
for box in get_boxes(x.shape, patch_size, stride, axes, valid=valid):
yield crop_to_box(x, box)
def combine(patches: Iterable[np.ndarray], output_shape: AxesLike, stride: AxesLike,
axes: AxesLike = None, valid: bool = False) -> np.ndarray:
"""
Build a tensor of shape ``output_shape`` from ``patches`` obtained in a convolution-like approach
with corresponding parameters. The overlapping parts are averaged.
References
----------
See the :doc:`tutorials/patches` tutorial for more details.
"""
axes, stride = broadcast_to_axes(axes, stride)
patch, patches = peek(patches)
patch_size = np.array(patch.shape)[list(axes)]
if len(np.atleast_1d(output_shape)) != patch.ndim:
output_shape = fill_by_indices(patch.shape, output_shape, axes)
dtype = patch.dtype
if not np.issubdtype(dtype, np.floating):
dtype = float
result = np.zeros(output_shape, dtype)
counts = np.zeros(output_shape, int)
for box, patch in zip_equal(get_boxes(output_shape, patch_size, stride, axes, valid), patches):
slc = build_slices(*box)
result[slc] += patch
counts[slc] += 1
np.true_divide(result, counts, out=result, where=counts > 0)
return result
| [
"numpy.ones_like",
"numpy.minimum",
"numpy.asarray",
"numpy.ndindex",
"numpy.array",
"numpy.zeros",
"numpy.issubdtype",
"numpy.true_divide",
"dpipe.itertools.peek",
"numpy.broadcast_arrays",
"numpy.atleast_1d"
] | [((1221, 1258), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['box_size', 'stride'], {}), '(box_size, stride)\n', (1240, 1258), True, 'import numpy as np\n'), ((1401, 1425), 'numpy.ndindex', 'np.ndindex', (['*final_shape'], {}), '(*final_shape)\n', (1411, 1425), True, 'import numpy as np\n'), ((2759, 2772), 'dpipe.itertools.peek', 'peek', (['patches'], {}), '(patches)\n', (2763, 2772), False, 'from dpipe.itertools import zip_equal, peek\n'), ((3058, 3087), 'numpy.zeros', 'np.zeros', (['output_shape', 'dtype'], {}), '(output_shape, dtype)\n', (3066, 3087), True, 'import numpy as np\n'), ((3101, 3128), 'numpy.zeros', 'np.zeros', (['output_shape', 'int'], {}), '(output_shape, int)\n', (3109, 3128), True, 'import numpy as np\n'), ((3321, 3381), 'numpy.true_divide', 'np.true_divide', (['result', 'counts'], {'out': 'result', 'where': '(counts > 0)'}), '(result, counts, out=result, where=counts > 0)\n', (3335, 3381), True, 'import numpy as np\n'), ((1348, 1367), 'numpy.ones_like', 'np.ones_like', (['shape'], {}), '(shape)\n', (1360, 1367), True, 'import numpy as np\n'), ((2790, 2811), 'numpy.array', 'np.array', (['patch.shape'], {}), '(patch.shape)\n', (2798, 2811), True, 'import numpy as np\n'), ((2987, 3020), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.floating'], {}), '(dtype, np.floating)\n', (3000, 3020), True, 'import numpy as np\n'), ((1443, 1460), 'numpy.asarray', 'np.asarray', (['start'], {}), '(start)\n', (1453, 1460), True, 'import numpy as np\n'), ((2835, 2862), 'numpy.atleast_1d', 'np.atleast_1d', (['output_shape'], {}), '(output_shape)\n', (2848, 2862), True, 'import numpy as np\n'), ((1507, 1542), 'numpy.minimum', 'np.minimum', (['(start + full_box)', 'shape'], {}), '(start + full_box, shape)\n', (1517, 1542), True, 'import numpy as np\n')] |
import glob
from pylab import *
import brewer2mpl
import numpy as np
import sys
import math
import gzip
import matplotlib.gridspec as gridspec
from collections import defaultdict
from matplotlib import pyplot as plt
# brewer2mpl.get_map args: set name set type number of colors
bmap = brewer2mpl.get_map('Set2', 'qualitative', 7)
colors = bmap.mpl_colors
params = {
'axes.labelsize': 8,
'legend.fontsize': 10,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'text.usetex': False,
'figure.figsize': [6, 8]
}
rcParams.update(params)
def customize_axis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.tick_params(axis='y', length=0)
#ax.get_yaxis().tick_left()
# offset the spines
for spine in ax.spines.values():
spine.set_position(('outward', 5))
# put the grid behind
ax.set_axisbelow(True)
ax.grid(axis='y', color="0.9", linestyle='--', linewidth=1)
fig = figure(frameon=False) # no frame
#plt.box(False)
#plt.ticklabel_format(axis='both', style='sci', scilimits=(-2,2))
ax1 = fig.add_subplot(311)
k = 0
for i in sys.argv[1:]:
data = np.loadtxt(i)
ax1.plot(data[:,0], data[:, 1], '-', linewidth=2, color=colors[k], label=i)
k += 1
ax1.set_title('Coverage')
customize_axis(ax1)
ax2 = fig.add_subplot(312)
k = 0
for i in sys.argv[1:]:
data = np.loadtxt(i)
ax2.plot(data[:,0], data[:, 3], '-', linewidth=2, color=colors[k], label=i)
k += 1
ax2.set_title('Mean fitness')
customize_axis(ax2)
ax3 = fig.add_subplot(313)
ax3.grid(axis='y', color="0.9", linestyle='--', linewidth=1)
k = 0
for i in sys.argv[1:]:
data = np.loadtxt(i)
ax3.plot(data[:,0], data[:, 2], '-', linewidth=2, color=colors[k], label=i)
k += 1
ax3.set_title('Max fitness')
customize_axis(ax3)
legend = ax1.legend(loc=4)#bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=(3))
frame = legend.get_frame()
frame.set_facecolor('0.9')
frame.set_edgecolor('1.0')
fig.tight_layout()
fig.savefig('progress.pdf')
fig.savefig('progress.svg')
| [
"numpy.loadtxt",
"brewer2mpl.get_map"
] | [((290, 334), 'brewer2mpl.get_map', 'brewer2mpl.get_map', (['"""Set2"""', '"""qualitative"""', '(7)'], {}), "('Set2', 'qualitative', 7)\n", (308, 334), False, 'import brewer2mpl\n'), ((1225, 1238), 'numpy.loadtxt', 'np.loadtxt', (['i'], {}), '(i)\n', (1235, 1238), True, 'import numpy as np\n'), ((1444, 1457), 'numpy.loadtxt', 'np.loadtxt', (['i'], {}), '(i)\n', (1454, 1457), True, 'import numpy as np\n'), ((1730, 1743), 'numpy.loadtxt', 'np.loadtxt', (['i'], {}), '(i)\n', (1740, 1743), True, 'import numpy as np\n')] |
import numpy as np
import argparse
import config
import os
import datetime
import sys
import tensorflow.keras as keras
from tensorflow.keras.layers import Input, Conv2D, Flatten, Dense, Conv2DTranspose, Lambda, Reshape, Layer
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend as K
import tensorflow as tf
DIR_NAME = './data/rollout/'
SCREEN_SIZE_X = 64
SCREEN_SIZE_Y = 64
batch_size = 10
IM_DIM = 64
DEPTH = 32
LATENT_DEPTH = 512
K_SIZE = 5
def sampling(args):
mean, logsigma = args
epsilon = keras.backend.random_normal(shape=keras.backend.shape(mean))
return mean + tf.exp(logsigma / 2) * epsilon
def encoder():
input_E = keras.layers.Input(shape=(IM_DIM, IM_DIM, 3))
X = keras.layers.Conv2D(filters=DEPTH*2, kernel_size=K_SIZE, strides=2, padding='same')(input_E)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.Conv2D(filters=DEPTH*4, kernel_size=K_SIZE, strides=2, padding='same')(X)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.Conv2D(filters=DEPTH*8, kernel_size=K_SIZE, strides=2, padding='same')(X)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.Flatten()(X)
X = keras.layers.Dense(LATENT_DEPTH)(X)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
mean = keras.layers.Dense(LATENT_DEPTH,activation="tanh")(X)
logsigma = keras.layers.Dense(LATENT_DEPTH,activation="tanh")(X)
latent = keras.layers.Lambda(sampling, output_shape=(LATENT_DEPTH,))([mean, logsigma])
kl_loss = 1 + logsigma - keras.backend.square(mean) - keras.backend.exp(logsigma)
kl_loss = keras.backend.mean(kl_loss, axis=-1)
kl_loss *= -0.5
return keras.models.Model(input_E, [latent,kl_loss])
def generator():
input_G = keras.layers.Input(shape=(LATENT_DEPTH,))
X = keras.layers.Dense(8*8*DEPTH*8)(input_G)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.Reshape((8, 8, DEPTH * 8))(X)
X = keras.layers.Conv2DTranspose(filters=DEPTH*8, kernel_size=K_SIZE, strides=2, padding='same')(X)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.Conv2DTranspose(filters=DEPTH*4, kernel_size=K_SIZE, strides=2, padding='same')(X)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.Conv2DTranspose(filters=DEPTH, kernel_size=K_SIZE, strides=2, padding='same')(X)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.Conv2D(filters=3, kernel_size=K_SIZE, padding='same')(X)
X = keras.layers.Activation('sigmoid')(X)
return keras.models.Model(input_G, X)
def discriminator():
input_D = keras.layers.Input(shape=(IM_DIM, IM_DIM, 3))
X = keras.layers.Conv2D(filters=DEPTH, kernel_size=K_SIZE, strides=2, padding='same')(input_D)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.Conv2D(filters=DEPTH*4, kernel_size=K_SIZE, strides=2, padding='same')(input_D)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.Conv2D(filters=DEPTH*8, kernel_size=K_SIZE, strides=2, padding='same')(X)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.Conv2D(filters=DEPTH*8, kernel_size=K_SIZE, padding='same')(X)
inner_output = keras.layers.Flatten()(X)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
X = keras.layers.Flatten()(X)
X = keras.layers.Dense(DEPTH*8)(X)
X = keras.layers.BatchNormalization()(X)
X = keras.layers.LeakyReLU(alpha=0.2)(X)
output = keras.layers.Dense(1)(X)
return keras.models.Model(input_D, [output, inner_output])
def import_data(N, M):
filelist = os.listdir(DIR_NAME)
filelist = [x for x in filelist if x != '.DS_Store']
filelist.sort()
length_filelist = len(filelist)
if length_filelist > N:
filelist = filelist[:N]
if length_filelist < N:
N = length_filelist
data = np.zeros((M*N, SCREEN_SIZE_X, SCREEN_SIZE_Y, 3), dtype=np.float32)
idx = 0
file_count = 0
for file in filelist:
try:
new_data = np.load(DIR_NAME + file)['obs']
data[idx:(idx + M), :, :, :] = new_data
idx = idx + M
file_count += 1
if file_count%50==0:
print('Imported {} / {} ::: Current data size = {} observations'.format(file_count, N, idx))
except Exception as e:
print(e)
print('Skipped {}...'.format(file))
print('Imported {} / {} ::: Current data size = {} observations'.format(file_count, N, idx))
return data, N
E = encoder()
G = generator()
D = discriminator()
lr=0.001
#lr=0.0001
E_opt = keras.optimizers.Adam(lr=lr)
G_opt = keras.optimizers.Adam(lr=lr)
D_opt = keras.optimizers.Adam(lr=lr)
inner_loss_coef = 1
normal_coef = 0.1
kl_coef = 0.5
@tf.function
def train_step_vaegan(x):
lattent_r = tf.random.normal((100, LATENT_DEPTH))
with tf.GradientTape(persistent=True) as tape :
lattent,kl_loss = E(x)
fake = G(lattent)
dis_fake,dis_inner_fake = D(fake)
dis_fake_r,_ = D(G(lattent_r))
dis_true,dis_inner_true = D(x)
vae_inner = dis_inner_fake-dis_inner_true
vae_inner = vae_inner*vae_inner
mean,var = tf.nn.moments(E(x)[0], axes=0)
var_to_one = var - 1
normal_loss = tf.reduce_mean(mean*mean) + tf.reduce_mean(var_to_one*var_to_one)
kl_loss = tf.reduce_mean(kl_loss)
vae_diff_loss = tf.reduce_mean(vae_inner)
f_dis_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(tf.zeros_like(dis_fake), dis_fake))
r_dis_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(tf.zeros_like(dis_fake_r), dis_fake_r))
t_dis_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(tf.ones_like(dis_true), dis_true))
gan_loss = (0.5*t_dis_loss + 0.25*f_dis_loss + 0.25*r_dis_loss)
vae_loss = tf.reduce_mean(tf.abs(x-fake))
E_loss = vae_diff_loss + kl_coef*kl_loss + normal_coef*normal_loss
G_loss = inner_loss_coef*vae_diff_loss - gan_loss
D_loss = gan_loss
E_grad = tape.gradient(E_loss,E.trainable_variables)
G_grad = tape.gradient(G_loss,G.trainable_variables)
D_grad = tape.gradient(D_loss,D.trainable_variables)
del tape
E_opt.apply_gradients(zip(E_grad, E.trainable_variables))
G_opt.apply_gradients(zip(G_grad, G.trainable_variables))
D_opt.apply_gradients(zip(D_grad, D.trainable_variables))
return [gan_loss, vae_loss, f_dis_loss, r_dis_loss, t_dis_loss, vae_diff_loss, E_loss, D_loss, kl_loss, normal_loss]
def main(args):
new_model = args.new_model
N = int(args.N)
M = int(args.time_steps)
epochs = int(args.epochs)
try:
data, N = import_data(N, M)
except:
print('NO DATA FOUND')
raise
if not new_model:
try:
D.load_weights("./saved-models/D_training_.h5")
E.load_weights("./saved-models/E_training_.h5")
G.load_weights("./saved-models/G_training_.h5")
except:
print("Either set --new_model or ensure ./vae/weights.h5 exists")
raise
print('DATA SHAPE = {}'.format(data.shape))
step = 0
max_step = 100
log_freq = 1
metrics_names = ["gan_loss", "vae_loss", "fake_dis_loss", "r_dis_loss", "t_dis_loss", "vae_inner_loss", "E_loss", "D_loss", "kl_loss", "normal_loss"]
metrics = []
for m in metrics_names :
metrics.append(tf.keras.metrics.Mean('m', dtype=tf.float32))
def save_model():
D.save('saved-models/D_training_' + '.h5')
G.save('saved-models/G_training_' + '.h5')
E.save('saved-models/E_training_' + '.h5')
def print_metrics():
s = ""
for name,metric in zip(metrics_names,metrics) :
s+= " " + name + " " + str(np.around(metric.result().numpy(), 3))
print(f"\rStep : " + str(step) + " " + s, end="", flush=True)
for metric in metrics :
metric.reset_states()
for i in range(2000,5001,100):
step+=1
if not i % log_freq :
print_metrics()
results = train_step_vaegan(data[i-100:i])
for metric,result in zip(metrics, results) :
metric(result)
save_model()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=('Train VAE'))
parser.add_argument('--N',default = 10000, help='number of episodes to use to train')
parser.add_argument('--new_model', action='store_true', help='start a new model from scratch?')
parser.add_argument('--time_steps', type=int, default=300,
help='how many timesteps at start of episode?')
parser.add_argument('--epochs', default = 10, help='number of epochs to train for')
args = parser.parse_args()
main(args)
| [
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.GradientTape",
"tensorflow.keras.layers.Dense",
"tensorflow.ones_like",
"tensorflow.reduce_mean",
"tensorflow.keras.backend.shape",
"tensorflow.keras.layers.Input",
"tensorflow.random.normal",
"os.listdir",
"tensorflow.keras.backend.mean",
... | [((5084, 5112), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (5105, 5112), True, 'import tensorflow.keras as keras\n'), ((5121, 5149), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (5142, 5149), True, 'import tensorflow.keras as keras\n'), ((5158, 5186), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (5179, 5186), True, 'import tensorflow.keras as keras\n'), ((720, 765), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(IM_DIM, IM_DIM, 3)'}), '(shape=(IM_DIM, IM_DIM, 3))\n', (738, 765), True, 'import tensorflow.keras as keras\n'), ((1846, 1882), 'tensorflow.keras.backend.mean', 'keras.backend.mean', (['kl_loss'], {'axis': '(-1)'}), '(kl_loss, axis=-1)\n', (1864, 1882), True, 'import tensorflow.keras as keras\n'), ((1919, 1965), 'tensorflow.keras.models.Model', 'keras.models.Model', (['input_E', '[latent, kl_loss]'], {}), '(input_E, [latent, kl_loss])\n', (1937, 1965), True, 'import tensorflow.keras as keras\n'), ((1997, 2038), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(LATENT_DEPTH,)'}), '(shape=(LATENT_DEPTH,))\n', (2015, 2038), True, 'import tensorflow.keras as keras\n'), ((2962, 2992), 'tensorflow.keras.models.Model', 'keras.models.Model', (['input_G', 'X'], {}), '(input_G, X)\n', (2980, 2992), True, 'import tensorflow.keras as keras\n'), ((3029, 3074), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(IM_DIM, IM_DIM, 3)'}), '(shape=(IM_DIM, IM_DIM, 3))\n', (3047, 3074), True, 'import tensorflow.keras as keras\n'), ((4057, 4108), 'tensorflow.keras.models.Model', 'keras.models.Model', (['input_D', '[output, inner_output]'], {}), '(input_D, [output, inner_output])\n', (4075, 4108), True, 'import tensorflow.keras as keras\n'), ((4146, 4166), 'os.listdir', 'os.listdir', (['DIR_NAME'], {}), '(DIR_NAME)\n', (4156, 4166), False, 'import os\n'), ((4391, 4459), 'numpy.zeros', 'np.zeros', (['(M * N, SCREEN_SIZE_X, SCREEN_SIZE_Y, 3)'], {'dtype': 'np.float32'}), '((M * N, SCREEN_SIZE_X, SCREEN_SIZE_Y, 3), dtype=np.float32)\n', (4399, 4459), True, 'import numpy as np\n'), ((5297, 5334), 'tensorflow.random.normal', 'tf.random.normal', (['(100, LATENT_DEPTH)'], {}), '((100, LATENT_DEPTH))\n', (5313, 5334), True, 'import tensorflow as tf\n'), ((8775, 8823), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train VAE"""'}), "(description='Train VAE')\n", (8798, 8823), False, 'import argparse\n'), ((779, 868), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(DEPTH * 2)', 'kernel_size': 'K_SIZE', 'strides': '(2)', 'padding': '"""same"""'}), "(filters=DEPTH * 2, kernel_size=K_SIZE, strides=2,\n padding='same')\n", (798, 868), True, 'import tensorflow.keras as keras\n'), ((880, 913), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (911, 913), True, 'import tensorflow.keras as keras\n'), ((925, 958), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (947, 958), True, 'import tensorflow.keras as keras\n'), ((971, 1060), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(DEPTH * 4)', 'kernel_size': 'K_SIZE', 'strides': '(2)', 'padding': '"""same"""'}), "(filters=DEPTH * 4, kernel_size=K_SIZE, strides=2,\n padding='same')\n", (990, 1060), True, 'import tensorflow.keras as keras\n'), ((1066, 1099), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (1097, 1099), True, 'import tensorflow.keras as keras\n'), ((1111, 1144), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1133, 1144), True, 'import tensorflow.keras as keras\n'), ((1157, 1246), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(DEPTH * 8)', 'kernel_size': 'K_SIZE', 'strides': '(2)', 'padding': '"""same"""'}), "(filters=DEPTH * 8, kernel_size=K_SIZE, strides=2,\n padding='same')\n", (1176, 1246), True, 'import tensorflow.keras as keras\n'), ((1252, 1285), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (1283, 1285), True, 'import tensorflow.keras as keras\n'), ((1297, 1330), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1319, 1330), True, 'import tensorflow.keras as keras\n'), ((1347, 1369), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (1367, 1369), True, 'import tensorflow.keras as keras\n'), ((1381, 1413), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['LATENT_DEPTH'], {}), '(LATENT_DEPTH)\n', (1399, 1413), True, 'import tensorflow.keras as keras\n'), ((1429, 1462), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (1460, 1462), True, 'import tensorflow.keras as keras\n'), ((1474, 1507), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1496, 1507), True, 'import tensorflow.keras as keras\n'), ((1527, 1578), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['LATENT_DEPTH'], {'activation': '"""tanh"""'}), "(LATENT_DEPTH, activation='tanh')\n", (1545, 1578), True, 'import tensorflow.keras as keras\n'), ((1596, 1647), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['LATENT_DEPTH'], {'activation': '"""tanh"""'}), "(LATENT_DEPTH, activation='tanh')\n", (1614, 1647), True, 'import tensorflow.keras as keras\n'), ((1663, 1722), 'tensorflow.keras.layers.Lambda', 'keras.layers.Lambda', (['sampling'], {'output_shape': '(LATENT_DEPTH,)'}), '(sampling, output_shape=(LATENT_DEPTH,))\n', (1682, 1722), True, 'import tensorflow.keras as keras\n'), ((1804, 1831), 'tensorflow.keras.backend.exp', 'keras.backend.exp', (['logsigma'], {}), '(logsigma)\n', (1821, 1831), True, 'import tensorflow.keras as keras\n'), ((2048, 2085), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(8 * 8 * DEPTH * 8)'], {}), '(8 * 8 * DEPTH * 8)\n', (2066, 2085), True, 'import tensorflow.keras as keras\n'), ((2097, 2130), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (2128, 2130), True, 'import tensorflow.keras as keras\n'), ((2142, 2175), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (2164, 2175), True, 'import tensorflow.keras as keras\n'), ((2187, 2226), 'tensorflow.keras.layers.Reshape', 'keras.layers.Reshape', (['(8, 8, DEPTH * 8)'], {}), '((8, 8, DEPTH * 8))\n', (2207, 2226), True, 'import tensorflow.keras as keras\n'), ((2243, 2342), 'tensorflow.keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', ([], {'filters': '(DEPTH * 8)', 'kernel_size': 'K_SIZE', 'strides': '(2)', 'padding': '"""same"""'}), "(filters=DEPTH * 8, kernel_size=K_SIZE, strides\n =2, padding='same')\n", (2271, 2342), True, 'import tensorflow.keras as keras\n'), ((2347, 2380), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (2378, 2380), True, 'import tensorflow.keras as keras\n'), ((2392, 2425), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (2414, 2425), True, 'import tensorflow.keras as keras\n'), ((2438, 2537), 'tensorflow.keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', ([], {'filters': '(DEPTH * 4)', 'kernel_size': 'K_SIZE', 'strides': '(2)', 'padding': '"""same"""'}), "(filters=DEPTH * 4, kernel_size=K_SIZE, strides\n =2, padding='same')\n", (2466, 2537), True, 'import tensorflow.keras as keras\n'), ((2542, 2575), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (2573, 2575), True, 'import tensorflow.keras as keras\n'), ((2587, 2620), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (2609, 2620), True, 'import tensorflow.keras as keras\n'), ((2637, 2731), 'tensorflow.keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', ([], {'filters': 'DEPTH', 'kernel_size': 'K_SIZE', 'strides': '(2)', 'padding': '"""same"""'}), "(filters=DEPTH, kernel_size=K_SIZE, strides=2,\n padding='same')\n", (2665, 2731), True, 'import tensorflow.keras as keras\n'), ((2739, 2772), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (2770, 2772), True, 'import tensorflow.keras as keras\n'), ((2784, 2817), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (2806, 2817), True, 'import tensorflow.keras as keras\n'), ((2834, 2900), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(3)', 'kernel_size': 'K_SIZE', 'padding': '"""same"""'}), "(filters=3, kernel_size=K_SIZE, padding='same')\n", (2853, 2900), True, 'import tensorflow.keras as keras\n'), ((2912, 2946), 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (2935, 2946), True, 'import tensorflow.keras as keras\n'), ((3088, 3174), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': 'DEPTH', 'kernel_size': 'K_SIZE', 'strides': '(2)', 'padding': '"""same"""'}), "(filters=DEPTH, kernel_size=K_SIZE, strides=2, padding=\n 'same')\n", (3107, 3174), True, 'import tensorflow.keras as keras\n'), ((3187, 3220), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (3209, 3220), True, 'import tensorflow.keras as keras\n'), ((3237, 3326), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(DEPTH * 4)', 'kernel_size': 'K_SIZE', 'strides': '(2)', 'padding': '"""same"""'}), "(filters=DEPTH * 4, kernel_size=K_SIZE, strides=2,\n padding='same')\n", (3256, 3326), True, 'import tensorflow.keras as keras\n'), ((3338, 3371), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (3360, 3371), True, 'import tensorflow.keras as keras\n'), ((3383, 3416), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (3414, 3416), True, 'import tensorflow.keras as keras\n'), ((3429, 3518), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(DEPTH * 8)', 'kernel_size': 'K_SIZE', 'strides': '(2)', 'padding': '"""same"""'}), "(filters=DEPTH * 8, kernel_size=K_SIZE, strides=2,\n padding='same')\n", (3448, 3518), True, 'import tensorflow.keras as keras\n'), ((3524, 3557), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (3555, 3557), True, 'import tensorflow.keras as keras\n'), ((3569, 3602), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (3591, 3602), True, 'import tensorflow.keras as keras\n'), ((3615, 3689), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(DEPTH * 8)', 'kernel_size': 'K_SIZE', 'padding': '"""same"""'}), "(filters=DEPTH * 8, kernel_size=K_SIZE, padding='same')\n", (3634, 3689), True, 'import tensorflow.keras as keras\n'), ((3710, 3732), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (3730, 3732), True, 'import tensorflow.keras as keras\n'), ((3744, 3777), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (3775, 3777), True, 'import tensorflow.keras as keras\n'), ((3789, 3822), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (3811, 3822), True, 'import tensorflow.keras as keras\n'), ((3839, 3861), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (3859, 3861), True, 'import tensorflow.keras as keras\n'), ((3873, 3902), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(DEPTH * 8)'], {}), '(DEPTH * 8)\n', (3891, 3902), True, 'import tensorflow.keras as keras\n'), ((3912, 3945), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (3943, 3945), True, 'import tensorflow.keras as keras\n'), ((3957, 3990), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (3979, 3990), True, 'import tensorflow.keras as keras\n'), ((4012, 4033), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {}), '(1)\n', (4030, 4033), True, 'import tensorflow.keras as keras\n'), ((5344, 5376), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'persistent': '(True)'}), '(persistent=True)\n', (5359, 5376), True, 'import tensorflow as tf\n'), ((5867, 5890), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['kl_loss'], {}), '(kl_loss)\n', (5881, 5890), True, 'import tensorflow as tf\n'), ((5915, 5940), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['vae_inner'], {}), '(vae_inner)\n', (5929, 5940), True, 'import tensorflow as tf\n'), ((614, 639), 'tensorflow.keras.backend.shape', 'keras.backend.shape', (['mean'], {}), '(mean)\n', (633, 639), True, 'import tensorflow.keras as keras\n'), ((659, 679), 'tensorflow.exp', 'tf.exp', (['(logsigma / 2)'], {}), '(logsigma / 2)\n', (665, 679), True, 'import tensorflow as tf\n'), ((1775, 1801), 'tensorflow.keras.backend.square', 'keras.backend.square', (['mean'], {}), '(mean)\n', (1795, 1801), True, 'import tensorflow.keras as keras\n'), ((5774, 5801), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(mean * mean)'], {}), '(mean * mean)\n', (5788, 5801), True, 'import tensorflow as tf\n'), ((5802, 5841), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(var_to_one * var_to_one)'], {}), '(var_to_one * var_to_one)\n', (5816, 5841), True, 'import tensorflow as tf\n'), ((6386, 6402), 'tensorflow.abs', 'tf.abs', (['(x - fake)'], {}), '(x - fake)\n', (6392, 6402), True, 'import tensorflow as tf\n'), ((7943, 7987), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', (['"""m"""'], {'dtype': 'tf.float32'}), "('m', dtype=tf.float32)\n", (7964, 7987), True, 'import tensorflow as tf\n'), ((4541, 4565), 'numpy.load', 'np.load', (['(DIR_NAME + file)'], {}), '(DIR_NAME + file)\n', (4548, 4565), True, 'import numpy as np\n'), ((6017, 6040), 'tensorflow.zeros_like', 'tf.zeros_like', (['dis_fake'], {}), '(dis_fake)\n', (6030, 6040), True, 'import tensorflow as tf\n'), ((6129, 6154), 'tensorflow.zeros_like', 'tf.zeros_like', (['dis_fake_r'], {}), '(dis_fake_r)\n', (6142, 6154), True, 'import tensorflow as tf\n'), ((6245, 6267), 'tensorflow.ones_like', 'tf.ones_like', (['dis_true'], {}), '(dis_true)\n', (6257, 6267), True, 'import tensorflow as tf\n')] |
from torch.utils.data import DataLoader, Dataset
from abc import abstractmethod
from torch import Tensor
import json
import numpy as np
from sqlalchemy import create_engine
from torchvision.io import read_image
import pandas as pd
import configparser
import os
from gensim.models import Word2Vec
config = configparser.ConfigParser()
config.read('config.ini')
COVER_LOC = config['IMAGES']['ImageLocation']
dbconf = config["DATABASE"]
uname = dbconf['UserName']
pword = dbconf['Password']
addrs = dbconf['Address']
dname = dbconf['Database']
connstring = f'mysql+pymysql://{uname}:{pword}@{addrs}/{dname}?charset=utf8mb4'
ENGINE = create_engine(connstring)
class BandcampDatasetBase(Dataset):
def __init__(self, engine=ENGINE, loc=COVER_LOC, transform=None):
self.df = pd.read_sql('SELECT * FROM albums', engine)
self.img_dir = loc
self.transform = transform
self.img_lines = self.df['id'] + '.jpg'
def __len__(self):
return len(self.img_lines)
def get_img(self, item):
img_path = os.path.join(self.img_dir, self.img_lines[item])
image = read_image(img_path).moveaxis([0, 1, 2], [-1, -3, -2])
if self.transform:
image = self.transform(image)
return image
@abstractmethod
def __getitem__(self, item):
raise NotImplementedError
class BandcampTagDataset(BandcampDatasetBase):
def __init__(self, **kwargs):
super(BandcampTagDataset, self).__init__(**kwargs)
self.tag_jsons = self.df['tags']
self.w2v_model = Word2Vec.load('./models/tags.model')
def __getitem__(self, item):
image = self.get_img(item)
tags = self.tag_jsons.iloc[item]
tag_list = json.loads(tags)
tag_list = np.concatenate([s.split(' ') for s in tag_list])
cbow = np.mean(self.w2v_model.wv[tag_list], axis=0)
return image, cbow
| [
"numpy.mean",
"json.loads",
"configparser.ConfigParser",
"sqlalchemy.create_engine",
"gensim.models.Word2Vec.load",
"os.path.join",
"torchvision.io.read_image",
"pandas.read_sql"
] | [((307, 334), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (332, 334), False, 'import configparser\n'), ((631, 656), 'sqlalchemy.create_engine', 'create_engine', (['connstring'], {}), '(connstring)\n', (644, 656), False, 'from sqlalchemy import create_engine\n'), ((783, 826), 'pandas.read_sql', 'pd.read_sql', (['"""SELECT * FROM albums"""', 'engine'], {}), "('SELECT * FROM albums', engine)\n", (794, 826), True, 'import pandas as pd\n'), ((1045, 1093), 'os.path.join', 'os.path.join', (['self.img_dir', 'self.img_lines[item]'], {}), '(self.img_dir, self.img_lines[item])\n', (1057, 1093), False, 'import os\n'), ((1551, 1587), 'gensim.models.Word2Vec.load', 'Word2Vec.load', (['"""./models/tags.model"""'], {}), "('./models/tags.model')\n", (1564, 1587), False, 'from gensim.models import Word2Vec\n'), ((1717, 1733), 'json.loads', 'json.loads', (['tags'], {}), '(tags)\n', (1727, 1733), False, 'import json\n'), ((1817, 1861), 'numpy.mean', 'np.mean', (['self.w2v_model.wv[tag_list]'], {'axis': '(0)'}), '(self.w2v_model.wv[tag_list], axis=0)\n', (1824, 1861), True, 'import numpy as np\n'), ((1110, 1130), 'torchvision.io.read_image', 'read_image', (['img_path'], {}), '(img_path)\n', (1120, 1130), False, 'from torchvision.io import read_image\n')] |
import argparse
import skimage
import skimage.io
import skimage.transform
from PIL import Image
from skimage import io
from math import log10
import sys
import shutil
import os
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from time import time
from struct import unpack
import matplotlib.pyplot as plt
import re
import numpy as np
import pdb
from path import Path
from utils.preprocess import scale_disp, default_transform
from networks.FADNet import FADNet
from networks.DispNetC import DispNetC
from networks.GANet_deep import GANet
from networks.stackhourglass import PSMNet
from networks.aanet import AANet
parser = argparse.ArgumentParser(description='FADNet')
parser.add_argument('--crop_height', type=int, required=True, help="crop height")
parser.add_argument('--crop_width', type=int, required=True, help="crop width")
parser.add_argument('--sceneflow', type=int, default=0, help='sceneflow dataset? Default=False')
parser.add_argument('--kitti2012', type=int, default=0, help='kitti 2012? Default=False')
parser.add_argument('--kitti2015', type=int, default=0, help='kitti 2015? Default=False')
parser.add_argument('--middlebury', type=int, default=0, help='Middlebury? Default=False')
parser.add_argument('--eth3d', type=int, default=0, help='ETH3D? Default=False')
parser.add_argument('--datapath', default='/media/jiaren/ImageNet/data_scene_flow_2015/testing/',
help='data path')
parser.add_argument('--list', default='lists/middeval_test.list',
help='list of stereo images')
parser.add_argument('--loadmodel', default=None,
help='loading model')
parser.add_argument('--savepath', default='results/',
help='path to save the results.')
parser.add_argument('--model', default='fadnet',
help='select model')
parser.add_argument('--maxdisp', type=int, default=192,
help='maxium disparity')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--devices', type=str, help='indicates CUDA devices, e.g. 0,1,2', default='0')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
opt = parser.parse_args()
print(opt)
torch.backends.cudnn.benchmark = True
opt.cuda = not opt.no_cuda and torch.cuda.is_available()
if not os.path.exists(opt.savepath):
os.makedirs(opt.savepath)
torch.manual_seed(opt.seed)
if opt.cuda:
torch.cuda.manual_seed(opt.seed)
if opt.model == 'psmnet':
model = PSMNet(opt.maxdisp)
elif opt.model == 'ganet':
model = GANet(opt.maxdisp)
elif opt.model == 'aanet':
model = AANet(opt.maxdisp)
elif opt.model == 'fadnet':
model = FADNet(maxdisp=opt.maxdisp)
elif opt.model == 'dispnetc':
model = DispNetC(resBlock=False, maxdisp=opt.maxdisp)
elif opt.model == 'crl':
model = FADNet(resBlock=False, maxdisp=opt.maxdisp)
else:
print('no model')
sys.exit(-1)
model = nn.DataParallel(model, device_ids=[0])
model.cuda()
if opt.loadmodel is not None:
state_dict = torch.load(opt.loadmodel)
model.load_state_dict(state_dict['state_dict'])
print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
def readPFM(file):
with open(file, "rb") as f:
# Line 1: PF=>RGB (3 channels), Pf=>Greyscale (1 channel)
type = f.readline().decode('latin-1')
if "PF" in type:
channels = 3
elif "Pf" in type:
channels = 1
else:
sys.exit(1)
# Line 2: width height
line = f.readline().decode('latin-1')
width, height = re.findall('\d+', line)
width = int(width)
height = int(height)
# Line 3: +ve number means big endian, negative means little endian
line = f.readline().decode('latin-1')
BigEndian = True
if "-" in line:
BigEndian = False
# Slurp all binary data
samples = width * height * channels;
buffer = f.read(samples * 4)
# Unpack floats with appropriate endianness
if BigEndian:
fmt = ">"
else:
fmt = "<"
fmt = fmt + str(samples) + "f"
img = unpack(fmt, buffer)
img = np.reshape(img, (height, width))
img = np.flipud(img)
return img, height, width
def save_pfm(filename, image, scale=1):
'''
Save a Numpy array to a PFM file.
'''
color = None
file = open(filename, "w")
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write('PF\n' if color else 'Pf\n')
file.write('%d %d\n' % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
file.write('%f\n' % scale)
image.tofile(file)
def test_transform(temp_data, crop_height, crop_width):
_, h, w=np.shape(temp_data)
if h <= crop_height and w <= crop_width:
# padding zero
temp = temp_data
temp_data = np.zeros([6, crop_height, crop_width], 'float32')
temp_data[:, crop_height - h: crop_height, crop_width - w: crop_width] = temp
else:
start_x = int((w - crop_width) / 2)
start_y = int((h - crop_height) / 2)
temp_data = temp_data[:, start_y: start_y + crop_height, start_x: start_x + crop_width]
left = np.ones([1, 3,crop_height,crop_width],'float32')
left[0, :, :, :] = temp_data[0: 3, :, :]
right = np.ones([1, 3, crop_height, crop_width], 'float32')
right[0, :, :, :] = temp_data[3: 6, :, :]
return torch.from_numpy(left).float(), torch.from_numpy(right).float(), h, w
def load_data(leftname, rightname):
left = Image.open(leftname)
right = Image.open(rightname)
size = np.shape(left)
height = size[0]
width = size[1]
temp_data = np.zeros([6, height, width], 'float32')
left = np.asarray(left)
right = np.asarray(right)
r = left[:, :, 0]
g = left[:, :, 1]
b = left[:, :, 2]
temp_data[0, :, :] = (r - np.mean(r[:])) / np.std(r[:])
temp_data[1, :, :] = (g - np.mean(g[:])) / np.std(g[:])
temp_data[2, :, :] = (b - np.mean(b[:])) / np.std(b[:])
r = right[:, :, 0]
g = right[:, :, 1]
b = right[:, :, 2]
#r,g,b,_ = right.split()
temp_data[3, :, :] = (r - np.mean(r[:])) / np.std(r[:])
temp_data[4, :, :] = (g - np.mean(g[:])) / np.std(g[:])
temp_data[5, :, :] = (b - np.mean(b[:])) / np.std(b[:])
return temp_data
def load_data_imn(leftname, rightname):
#left = Image.open(leftname)
#right = Image.open(rightname)
#h, w, c = np.shape(left)
left = io.imread(leftname)
right = io.imread(rightname)
h, w, _ = left.shape
normalize = 'instnorm'
if normalize == 'imagenet':
rgb_transform = default_transform()
img_left = rgb_transform(left)
img_right = rgb_transform(right)
else:
img_left = np.zeros([3, h, w], 'float32')
img_right = np.zeros([3, h, w], 'float32')
for c in range(3):
img_left[c, :, :] = (left[:, :, c] - np.mean(left[:, :, c])) / np.std(left[:, :, c])
img_right[c, :, :] = (right[:, :, c] - np.mean(right[:, :, c])) / np.std(right[:, :, c])
print(h, w)
bottom_pad = opt.crop_height-h
right_pad = opt.crop_width-w
img_left = np.lib.pad(img_left,((0,0),(0,bottom_pad),(0,right_pad)),mode='constant',constant_values=0)
img_right = np.lib.pad(img_right,((0,0),(0,bottom_pad),(0,right_pad)),mode='constant',constant_values=0)
return torch.from_numpy(img_left).float(), torch.from_numpy(img_right).float(), h, w
def test_md(leftname, rightname, savename):
print(savename)
epe = 0
input1, input2, height, width = load_data_imn(leftname, rightname)
input1 = Variable(input1, requires_grad = False)
input2 = Variable(input2, requires_grad = False)
model.eval()
if opt.cuda:
input1 = input1.cuda()
input2 = input2.cuda()
input_var = torch.cat((input1, input2), 0)
input_var = input_var.unsqueeze(0)
torch.cuda.synchronize()
start_time = time()
with torch.no_grad():
prediction = model(input_var)[1]
prediction = prediction.squeeze(0)
torch.cuda.synchronize()
end_time = time()
print("Processing time: {:.4f}".format(end_time - start_time))
temp = prediction.cpu()
temp = temp.detach().numpy()
temp = temp[0, :height, :width]
# print epe
thres = 400
if 'trainingQ' in leftname:
thres = 192
if 'training' in leftname:
gt_disp, _, _ = readPFM(leftname.replace('im0.png', 'disp0GT.pfm'))
gt_disp[np.isinf(gt_disp)] = 0
mask = (gt_disp > 0) & (gt_disp < thres)
epe = np.mean(np.abs(gt_disp[mask] - temp[mask]))
print(savename, epe, np.min(gt_disp), np.max(gt_disp))
savepfm_path = savename.replace('.png','')
temp = np.flipud(temp)
disppath = Path(savepfm_path)
disppath.makedirs_p()
save_pfm(savepfm_path+'/disp0FADNet++.pfm', temp, scale=1)
##########write time txt########
fp = open(savepfm_path+'/timeFADNet++.txt', 'w')
runtime = "%.4f" % (end_time - start_time)
fp.write(runtime)
fp.close()
return epe
def test_eth3d(leftname, rightname, savename):
print(savename)
epe = 0
input1, input2, height, width = load_data_imn(leftname, rightname)
input1 = Variable(input1, requires_grad = False)
input2 = Variable(input2, requires_grad = False)
model.eval()
if opt.cuda:
input1 = input1.cuda()
input2 = input2.cuda()
input_var = torch.cat((input1, input2), 0)
input_var = input_var.unsqueeze(0)
torch.cuda.synchronize()
start_time = time()
with torch.no_grad():
prediction = model(input_var)[1]
prediction = prediction.squeeze(0)
torch.cuda.synchronize()
end_time = time()
print("Processing time: {:.4f}".format(end_time - start_time))
temp = prediction.cpu()
temp = temp.detach().numpy()
temp = temp[0, :height, :width]
# print epe
if 'training' in leftname:
gt_disp, _, _ = readPFM(leftname.replace('im0.png', 'disp0GT.pfm').replace('training/', 'training_gt/'))
gt_disp[np.isinf(gt_disp)] = 0
mask = (gt_disp > 0) & (gt_disp < 192)
epe = np.mean(np.abs(gt_disp[mask] - temp[mask]))
print(savename, epe, np.min(gt_disp), np.max(gt_disp))
temp = np.flipud(temp)
disppath = Path('/'.join(savename.split('/')[:-1]))
disppath.makedirs_p()
save_pfm(savename, temp, scale=1)
##########write time txt########
fp = open(savename.replace("pfm", "txt"), 'w')
runtime = "runtime %.4f" % (end_time - start_time)
fp.write(runtime)
fp.close()
return epe
def test_kitti(leftname, rightname, savename):
print(savename)
epe = 0
input1, input2, height, width = load_data_imn(leftname, rightname)
input1 = Variable(input1, requires_grad = False)
input2 = Variable(input2, requires_grad = False)
model.eval()
if opt.cuda:
input1 = input1.cuda()
input2 = input2.cuda()
input_var = torch.cat((input1, input2), 0)
input_var = input_var.unsqueeze(0)
with torch.no_grad():
prediction = model(input_var)[1]
prediction = prediction.squeeze(0)
temp = prediction.cpu()
temp = temp.detach().numpy()
temp = temp[0, :height, :width]
# print epe
if 'training' in leftname:
gt_disp = Image.open(leftname.replace('colored_0', 'disp_occ').replace('image_2', 'disp_occ_0'))
gt_disp = np.ascontiguousarray(gt_disp,dtype=np.float32)/256
mask = (gt_disp > 0) & (gt_disp < 192)
epe = np.mean(np.abs(gt_disp[mask] - temp[mask]))
print(savename, epe, np.min(gt_disp), np.max(gt_disp))
skimage.io.imsave(savename, (temp * 256).astype('uint16'))
return epe
def test(leftname, rightname, savename, gt_disp):
input1, input2, height, width = load_data_imn(leftname, rightname)
input1 = Variable(input1, requires_grad = False)
input2 = Variable(input2, requires_grad = False)
model.eval()
start_time = time()
if opt.cuda:
input1 = input1.cuda()
input2 = input2.cuda()
input_var = torch.cat((input1, input2), 0)
input_var = input_var.unsqueeze(0)
with torch.no_grad():
predictions = model(input_var)
if len(predictions) == 5: # aanet
prediction = predictions[-1]
elif len(predictions) > 2: # ganet, psmnet
prediction = predictions[0]
elif len(predictions) == 2: # two-stage nets
prediction = predictions[1]
else:
prediction = predictions
if prediction.dim() == 4:
prediction = prediction.squeeze(0)
end_time = time()
print("Processing time: {:.4f}".format(end_time - start_time))
temp = prediction.cpu()
temp = temp.detach().numpy()
temp = temp[0, :height, :width]
plot_disparity(savename, temp, np.max(gt_disp)+5, cmap='rainbow')
mask = (gt_disp > 0) & (gt_disp < 192)
epe = np.mean(np.abs(gt_disp[mask] - temp[mask]))
err_map = np.abs(temp - gt_disp)
err_name = savename.replace('disp', 'err')
plot_disparity(err_name, err_map, 30, cmap='turbo')
savename_pfm = savename.replace('png','pfm')
temp = np.flipud(temp)
return epe
def plot_disparity(savename, data, max_disp, cmap='turbo'):
plt.imsave(savename, data, vmin=0, vmax=max_disp, cmap=cmap)
if __name__ == "__main__":
file_path = opt.datapath
file_list = opt.list
f = open(file_list, 'r')
filelist = f.readlines()
error = 0
for index in range(len(filelist)):
current_file = filelist[index].split()
if opt.kitti2015 or opt.kitti2012:
leftname = os.path.join(file_path, current_file[0])
rightname = os.path.join(file_path, current_file[1])
savename = os.path.join(opt.savepath, current_file[0].split("/")[-1])
error += test_kitti(leftname, rightname, savename)
if opt.sceneflow:
leftname = file_path + current_file[0]
rightname = file_path + current_file[1]
leftgtname = file_path + current_file[2]
disp_left_gt, height, width = readPFM(leftgtname)
savenamegt = opt.savepath + "gt_" + "_".join(current_file[2].split("/")[-4:]).replace('pfm', 'png').replace('left', 'disp')
plot_disparity(savenamegt, disp_left_gt, np.max(disp_left_gt)+5, cmap='rainbow')
savename = opt.savepath + "%s_" % opt.model + "_".join(current_file[2].split("/")[-4:]).replace('pfm', 'png').replace('left', 'disp')
epe = test(leftname, rightname, savename, disp_left_gt)
error += epe
print(leftname, rightname, savename, epe)
if opt.middlebury:
leftname = file_path + current_file[0]
rightname = file_path + current_file[1]
temppath = opt.savepath.replace(opt.savepath.split("/")[-2], opt.savepath.split("/")[-2]+"/images")
#img_path = Path(temppath)
#img_path.makedirs_p()
savename = opt.savepath + "/".join(leftname.split("/")[-4:-1]) + ".png"
error += test_md(leftname, rightname, savename)
if opt.eth3d:
leftname = file_path + current_file[0]
rightname = file_path + current_file[1]
savename = opt.savepath + "low_res_two_view/" + leftname.split("/")[-2] + ".pfm"
error += test_eth3d(leftname, rightname, savename)
if index > 200:
break
print("EPE:", error / len(filelist))
| [
"torch.from_numpy",
"numpy.ascontiguousarray",
"torch.cuda.synchronize",
"numpy.lib.pad",
"path.Path",
"torch.cuda.is_available",
"sys.exit",
"networks.DispNetC.DispNetC",
"networks.GANet_deep.GANet",
"networks.aanet.AANet",
"os.path.exists",
"numpy.mean",
"numpy.reshape",
"argparse.Argume... | [((780, 825), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""FADNet"""'}), "(description='FADNet')\n", (803, 825), False, 'import argparse\n'), ((2617, 2644), 'torch.manual_seed', 'torch.manual_seed', (['opt.seed'], {}), '(opt.seed)\n', (2634, 2644), False, 'import torch\n'), ((3161, 3199), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {'device_ids': '[0]'}), '(model, device_ids=[0])\n', (3176, 3199), True, 'import torch.nn as nn\n'), ((2522, 2547), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2545, 2547), False, 'import torch\n'), ((2556, 2584), 'os.path.exists', 'os.path.exists', (['opt.savepath'], {}), '(opt.savepath)\n', (2570, 2584), False, 'import os\n'), ((2590, 2615), 'os.makedirs', 'os.makedirs', (['opt.savepath'], {}), '(opt.savepath)\n', (2601, 2615), False, 'import os\n'), ((2662, 2694), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['opt.seed'], {}), '(opt.seed)\n', (2684, 2694), False, 'import torch\n'), ((2734, 2753), 'networks.stackhourglass.PSMNet', 'PSMNet', (['opt.maxdisp'], {}), '(opt.maxdisp)\n', (2740, 2753), False, 'from networks.stackhourglass import PSMNet\n'), ((3261, 3286), 'torch.load', 'torch.load', (['opt.loadmodel'], {}), '(opt.loadmodel)\n', (3271, 3286), False, 'import torch\n'), ((5457, 5476), 'numpy.shape', 'np.shape', (['temp_data'], {}), '(temp_data)\n', (5465, 5476), True, 'import numpy as np\n'), ((5939, 5990), 'numpy.ones', 'np.ones', (['[1, 3, crop_height, crop_width]', '"""float32"""'], {}), "([1, 3, crop_height, crop_width], 'float32')\n", (5946, 5990), True, 'import numpy as np\n'), ((6045, 6096), 'numpy.ones', 'np.ones', (['[1, 3, crop_height, crop_width]', '"""float32"""'], {}), "([1, 3, crop_height, crop_width], 'float32')\n", (6052, 6096), True, 'import numpy as np\n'), ((6272, 6292), 'PIL.Image.open', 'Image.open', (['leftname'], {}), '(leftname)\n', (6282, 6292), False, 'from PIL import Image\n'), ((6305, 6326), 'PIL.Image.open', 'Image.open', (['rightname'], {}), '(rightname)\n', (6315, 6326), False, 'from PIL import Image\n'), ((6338, 6352), 'numpy.shape', 'np.shape', (['left'], {}), '(left)\n', (6346, 6352), True, 'import numpy as np\n'), ((6410, 6449), 'numpy.zeros', 'np.zeros', (['[6, height, width]', '"""float32"""'], {}), "([6, height, width], 'float32')\n", (6418, 6449), True, 'import numpy as np\n'), ((6461, 6477), 'numpy.asarray', 'np.asarray', (['left'], {}), '(left)\n', (6471, 6477), True, 'import numpy as np\n'), ((6490, 6507), 'numpy.asarray', 'np.asarray', (['right'], {}), '(right)\n', (6500, 6507), True, 'import numpy as np\n'), ((7204, 7223), 'skimage.io.imread', 'io.imread', (['leftname'], {}), '(leftname)\n', (7213, 7223), False, 'from skimage import io\n'), ((7236, 7256), 'skimage.io.imread', 'io.imread', (['rightname'], {}), '(rightname)\n', (7245, 7256), False, 'from skimage import io\n'), ((7902, 8006), 'numpy.lib.pad', 'np.lib.pad', (['img_left', '((0, 0), (0, bottom_pad), (0, right_pad))'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(img_left, ((0, 0), (0, bottom_pad), (0, right_pad)), mode=\n 'constant', constant_values=0)\n", (7912, 8006), True, 'import numpy as np\n'), ((8010, 8115), 'numpy.lib.pad', 'np.lib.pad', (['img_right', '((0, 0), (0, bottom_pad), (0, right_pad))'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(img_right, ((0, 0), (0, bottom_pad), (0, right_pad)), mode=\n 'constant', constant_values=0)\n", (8020, 8115), True, 'import numpy as np\n'), ((8355, 8392), 'torch.autograd.Variable', 'Variable', (['input1'], {'requires_grad': '(False)'}), '(input1, requires_grad=False)\n', (8363, 8392), False, 'from torch.autograd import Variable\n'), ((8408, 8445), 'torch.autograd.Variable', 'Variable', (['input2'], {'requires_grad': '(False)'}), '(input2, requires_grad=False)\n', (8416, 8445), False, 'from torch.autograd import Variable\n'), ((8643, 8667), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (8665, 8667), False, 'import torch\n'), ((8685, 8691), 'time.time', 'time', ([], {}), '()\n', (8689, 8691), False, 'from time import time\n'), ((8806, 8830), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (8828, 8830), False, 'import torch\n'), ((8846, 8852), 'time.time', 'time', ([], {}), '()\n', (8850, 8852), False, 'from time import time\n'), ((9484, 9499), 'numpy.flipud', 'np.flipud', (['temp'], {}), '(temp)\n', (9493, 9499), True, 'import numpy as np\n'), ((9516, 9534), 'path.Path', 'Path', (['savepfm_path'], {}), '(savepfm_path)\n', (9520, 9534), False, 'from path import Path\n'), ((9985, 10022), 'torch.autograd.Variable', 'Variable', (['input1'], {'requires_grad': '(False)'}), '(input1, requires_grad=False)\n', (9993, 10022), False, 'from torch.autograd import Variable\n'), ((10038, 10075), 'torch.autograd.Variable', 'Variable', (['input2'], {'requires_grad': '(False)'}), '(input2, requires_grad=False)\n', (10046, 10075), False, 'from torch.autograd import Variable\n'), ((10273, 10297), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (10295, 10297), False, 'import torch\n'), ((10315, 10321), 'time.time', 'time', ([], {}), '()\n', (10319, 10321), False, 'from time import time\n'), ((10436, 10460), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (10458, 10460), False, 'import torch\n'), ((10476, 10482), 'time.time', 'time', ([], {}), '()\n', (10480, 10482), False, 'from time import time\n'), ((11033, 11048), 'numpy.flipud', 'np.flipud', (['temp'], {}), '(temp)\n', (11042, 11048), True, 'import numpy as np\n'), ((11536, 11573), 'torch.autograd.Variable', 'Variable', (['input1'], {'requires_grad': '(False)'}), '(input1, requires_grad=False)\n', (11544, 11573), False, 'from torch.autograd import Variable\n'), ((11589, 11626), 'torch.autograd.Variable', 'Variable', (['input2'], {'requires_grad': '(False)'}), '(input2, requires_grad=False)\n', (11597, 11626), False, 'from torch.autograd import Variable\n'), ((12654, 12691), 'torch.autograd.Variable', 'Variable', (['input1'], {'requires_grad': '(False)'}), '(input1, requires_grad=False)\n', (12662, 12691), False, 'from torch.autograd import Variable\n'), ((12707, 12744), 'torch.autograd.Variable', 'Variable', (['input2'], {'requires_grad': '(False)'}), '(input2, requires_grad=False)\n', (12715, 12744), False, 'from torch.autograd import Variable\n'), ((12782, 12788), 'time.time', 'time', ([], {}), '()\n', (12786, 12788), False, 'from time import time\n'), ((13443, 13449), 'time.time', 'time', ([], {}), '()\n', (13447, 13449), False, 'from time import time\n'), ((13799, 13821), 'numpy.abs', 'np.abs', (['(temp - gt_disp)'], {}), '(temp - gt_disp)\n', (13805, 13821), True, 'import numpy as np\n'), ((13987, 14002), 'numpy.flipud', 'np.flipud', (['temp'], {}), '(temp)\n', (13996, 14002), True, 'import numpy as np\n'), ((14084, 14144), 'matplotlib.pyplot.imsave', 'plt.imsave', (['savename', 'data'], {'vmin': '(0)', 'vmax': 'max_disp', 'cmap': 'cmap'}), '(savename, data, vmin=0, vmax=max_disp, cmap=cmap)\n', (14094, 14144), True, 'import matplotlib.pyplot as plt\n'), ((2793, 2811), 'networks.GANet_deep.GANet', 'GANet', (['opt.maxdisp'], {}), '(opt.maxdisp)\n', (2798, 2811), False, 'from networks.GANet_deep import GANet\n'), ((3848, 3872), 're.findall', 're.findall', (['"""\\\\d+"""', 'line'], {}), "('\\\\d+', line)\n", (3858, 3872), False, 'import re\n'), ((4433, 4452), 'struct.unpack', 'unpack', (['fmt', 'buffer'], {}), '(fmt, buffer)\n', (4439, 4452), False, 'from struct import unpack\n'), ((4467, 4499), 'numpy.reshape', 'np.reshape', (['img', '(height, width)'], {}), '(img, (height, width))\n', (4477, 4499), True, 'import numpy as np\n'), ((4514, 4528), 'numpy.flipud', 'np.flipud', (['img'], {}), '(img)\n', (4523, 4528), True, 'import numpy as np\n'), ((5593, 5642), 'numpy.zeros', 'np.zeros', (['[6, crop_height, crop_width]', '"""float32"""'], {}), "([6, crop_height, crop_width], 'float32')\n", (5601, 5642), True, 'import numpy as np\n'), ((6621, 6633), 'numpy.std', 'np.std', (['r[:]'], {}), '(r[:])\n', (6627, 6633), True, 'import numpy as np\n'), ((6681, 6693), 'numpy.std', 'np.std', (['g[:]'], {}), '(g[:])\n', (6687, 6693), True, 'import numpy as np\n'), ((6741, 6753), 'numpy.std', 'np.std', (['b[:]'], {}), '(b[:])\n', (6747, 6753), True, 'import numpy as np\n'), ((6900, 6912), 'numpy.std', 'np.std', (['r[:]'], {}), '(r[:])\n', (6906, 6912), True, 'import numpy as np\n'), ((6960, 6972), 'numpy.std', 'np.std', (['g[:]'], {}), '(g[:])\n', (6966, 6972), True, 'import numpy as np\n'), ((7020, 7032), 'numpy.std', 'np.std', (['b[:]'], {}), '(b[:])\n', (7026, 7032), True, 'import numpy as np\n'), ((7366, 7385), 'utils.preprocess.default_transform', 'default_transform', ([], {}), '()\n', (7383, 7385), False, 'from utils.preprocess import scale_disp, default_transform\n'), ((7495, 7525), 'numpy.zeros', 'np.zeros', (['[3, h, w]', '"""float32"""'], {}), "([3, h, w], 'float32')\n", (7503, 7525), True, 'import numpy as np\n'), ((7546, 7576), 'numpy.zeros', 'np.zeros', (['[3, h, w]', '"""float32"""'], {}), "([3, h, w], 'float32')\n", (7554, 7576), True, 'import numpy as np\n'), ((8565, 8595), 'torch.cat', 'torch.cat', (['(input1, input2)', '(0)'], {}), '((input1, input2), 0)\n', (8574, 8595), False, 'import torch\n'), ((8701, 8716), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8714, 8716), False, 'import torch\n'), ((10195, 10225), 'torch.cat', 'torch.cat', (['(input1, input2)', '(0)'], {}), '((input1, input2), 0)\n', (10204, 10225), False, 'import torch\n'), ((10331, 10346), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10344, 10346), False, 'import torch\n'), ((11746, 11776), 'torch.cat', 'torch.cat', (['(input1, input2)', '(0)'], {}), '((input1, input2), 0)\n', (11755, 11776), False, 'import torch\n'), ((11829, 11844), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11842, 11844), False, 'import torch\n'), ((12889, 12919), 'torch.cat', 'torch.cat', (['(input1, input2)', '(0)'], {}), '((input1, input2), 0)\n', (12898, 12919), False, 'import torch\n'), ((12972, 12987), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12985, 12987), False, 'import torch\n'), ((13748, 13782), 'numpy.abs', 'np.abs', (['(gt_disp[mask] - temp[mask])'], {}), '(gt_disp[mask] - temp[mask])\n', (13754, 13782), True, 'import numpy as np\n'), ((2851, 2869), 'networks.aanet.AANet', 'AANet', (['opt.maxdisp'], {}), '(opt.maxdisp)\n', (2856, 2869), False, 'from networks.aanet import AANet\n'), ((6604, 6617), 'numpy.mean', 'np.mean', (['r[:]'], {}), '(r[:])\n', (6611, 6617), True, 'import numpy as np\n'), ((6664, 6677), 'numpy.mean', 'np.mean', (['g[:]'], {}), '(g[:])\n', (6671, 6677), True, 'import numpy as np\n'), ((6724, 6737), 'numpy.mean', 'np.mean', (['b[:]'], {}), '(b[:])\n', (6731, 6737), True, 'import numpy as np\n'), ((6883, 6896), 'numpy.mean', 'np.mean', (['r[:]'], {}), '(r[:])\n', (6890, 6896), True, 'import numpy as np\n'), ((6943, 6956), 'numpy.mean', 'np.mean', (['g[:]'], {}), '(g[:])\n', (6950, 6956), True, 'import numpy as np\n'), ((7003, 7016), 'numpy.mean', 'np.mean', (['b[:]'], {}), '(b[:])\n', (7010, 7016), True, 'import numpy as np\n'), ((9230, 9247), 'numpy.isinf', 'np.isinf', (['gt_disp'], {}), '(gt_disp)\n', (9238, 9247), True, 'import numpy as np\n'), ((9324, 9358), 'numpy.abs', 'np.abs', (['(gt_disp[mask] - temp[mask])'], {}), '(gt_disp[mask] - temp[mask])\n', (9330, 9358), True, 'import numpy as np\n'), ((9390, 9405), 'numpy.min', 'np.min', (['gt_disp'], {}), '(gt_disp)\n', (9396, 9405), True, 'import numpy as np\n'), ((9407, 9422), 'numpy.max', 'np.max', (['gt_disp'], {}), '(gt_disp)\n', (9413, 9422), True, 'import numpy as np\n'), ((10829, 10846), 'numpy.isinf', 'np.isinf', (['gt_disp'], {}), '(gt_disp)\n', (10837, 10846), True, 'import numpy as np\n'), ((10921, 10955), 'numpy.abs', 'np.abs', (['(gt_disp[mask] - temp[mask])'], {}), '(gt_disp[mask] - temp[mask])\n', (10927, 10955), True, 'import numpy as np\n'), ((10987, 11002), 'numpy.min', 'np.min', (['gt_disp'], {}), '(gt_disp)\n', (10993, 11002), True, 'import numpy as np\n'), ((11004, 11019), 'numpy.max', 'np.max', (['gt_disp'], {}), '(gt_disp)\n', (11010, 11019), True, 'import numpy as np\n'), ((12215, 12262), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['gt_disp'], {'dtype': 'np.float32'}), '(gt_disp, dtype=np.float32)\n', (12235, 12262), True, 'import numpy as np\n'), ((12335, 12369), 'numpy.abs', 'np.abs', (['(gt_disp[mask] - temp[mask])'], {}), '(gt_disp[mask] - temp[mask])\n', (12341, 12369), True, 'import numpy as np\n'), ((12401, 12416), 'numpy.min', 'np.min', (['gt_disp'], {}), '(gt_disp)\n', (12407, 12416), True, 'import numpy as np\n'), ((12418, 12433), 'numpy.max', 'np.max', (['gt_disp'], {}), '(gt_disp)\n', (12424, 12433), True, 'import numpy as np\n'), ((13651, 13666), 'numpy.max', 'np.max', (['gt_disp'], {}), '(gt_disp)\n', (13657, 13666), True, 'import numpy as np\n'), ((14456, 14496), 'os.path.join', 'os.path.join', (['file_path', 'current_file[0]'], {}), '(file_path, current_file[0])\n', (14468, 14496), False, 'import os\n'), ((14521, 14561), 'os.path.join', 'os.path.join', (['file_path', 'current_file[1]'], {}), '(file_path, current_file[1])\n', (14533, 14561), False, 'import os\n'), ((2910, 2937), 'networks.FADNet.FADNet', 'FADNet', ([], {'maxdisp': 'opt.maxdisp'}), '(maxdisp=opt.maxdisp)\n', (2916, 2937), False, 'from networks.FADNet import FADNet\n'), ((3735, 3746), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3743, 3746), False, 'import sys\n'), ((6154, 6176), 'torch.from_numpy', 'torch.from_numpy', (['left'], {}), '(left)\n', (6170, 6176), False, 'import torch\n'), ((6186, 6209), 'torch.from_numpy', 'torch.from_numpy', (['right'], {}), '(right)\n', (6202, 6209), False, 'import torch\n'), ((7679, 7700), 'numpy.std', 'np.std', (['left[:, :, c]'], {}), '(left[:, :, c])\n', (7685, 7700), True, 'import numpy as np\n'), ((7779, 7801), 'numpy.std', 'np.std', (['right[:, :, c]'], {}), '(right[:, :, c])\n', (7785, 7801), True, 'import numpy as np\n'), ((8114, 8140), 'torch.from_numpy', 'torch.from_numpy', (['img_left'], {}), '(img_left)\n', (8130, 8140), False, 'import torch\n'), ((8150, 8177), 'torch.from_numpy', 'torch.from_numpy', (['img_right'], {}), '(img_right)\n', (8166, 8177), False, 'import torch\n'), ((2980, 3025), 'networks.DispNetC.DispNetC', 'DispNetC', ([], {'resBlock': '(False)', 'maxdisp': 'opt.maxdisp'}), '(resBlock=False, maxdisp=opt.maxdisp)\n', (2988, 3025), False, 'from networks.DispNetC import DispNetC\n'), ((7653, 7675), 'numpy.mean', 'np.mean', (['left[:, :, c]'], {}), '(left[:, :, c])\n', (7660, 7675), True, 'import numpy as np\n'), ((7752, 7775), 'numpy.mean', 'np.mean', (['right[:, :, c]'], {}), '(right[:, :, c])\n', (7759, 7775), True, 'import numpy as np\n'), ((15142, 15162), 'numpy.max', 'np.max', (['disp_left_gt'], {}), '(disp_left_gt)\n', (15148, 15162), True, 'import numpy as np\n'), ((3063, 3106), 'networks.FADNet.FADNet', 'FADNet', ([], {'resBlock': '(False)', 'maxdisp': 'opt.maxdisp'}), '(resBlock=False, maxdisp=opt.maxdisp)\n', (3069, 3106), False, 'from networks.FADNet import FADNet\n'), ((3139, 3151), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (3147, 3151), False, 'import sys\n')] |
import numpy as np
def init_spin_state_2d(nsize=16):
"""Initialize spin state"""
return 2*np.random.randint(2, size=(nsize, nsize)) - 1
def mcmh_algorithm(state, beta=1):
"""Apply Monte Carlo Metropolis-Hastings algorithm"""
# Get input dimensions
height, width = state.shape
energy = 0
for i in range(height):
for j in range(width):
# Periodic neighbors
up, down, left, right = (
(i - 1) % height, (i + 1) & height,
(j - 1) % width, (j + 1) & width
)
# Spin interaction energies
e_spin_init = J*(
state[i, j]*state[up, j]
+ state[i, j]*state[down, j]
+ state[i, j]*state[i, left]
+ state[i, j]*state[i, right]
)
e_spin_flip = J*(
-state[i, j]*state[up, j]
- state[i, j]*state[down, j]
- state[i, j]*state[i, left]
- state[i, j]*state[i, right]
)
delta_e = e_spin_flip - e_spin_init
energy += e_spin_flip
# Metropolis updates
if delta:
state[i, j] = -state[i, j]
elif np.random.rand() < np.exp(-beta*delta_e) :
state[i, j] = -state[i, j]
else:
pass
return state, energy/nsize**2., np.sum(state)/nsize**2.
def run_simulation(num_iter, beta=16):
"""Run Ising model simulation"""
# Randomly initialize spin state
state = init_spin_state(nsize=10)
for step in num_iter:
# Update state
state, energy, mag = mcmh_algorithm(state, beta)
# Update values
if step < relaxation: # Relaxation
continue
elif step == relaxation: # init values
avg_energy = energy/num_iter
avg_mag = mag/num_iter
sqd_energy = energy**2./num_iter
sqd_mag = mag**2./num_iter
else:
avg_energy += energy/num_iter
avg_mag += mag/num_iter
sqd_energy += energy**2./num_iter
sqd_mag += mag**2./num_iter
specific_heat = beta*(sqd_energy - avg_energy**2.)
magnetic_susceptibility = beta**2.*(sqd_mag - avg_mag**2.)
| [
"numpy.exp",
"numpy.sum",
"numpy.random.randint",
"numpy.random.rand"
] | [((100, 141), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(nsize, nsize)'}), '(2, size=(nsize, nsize))\n', (117, 141), True, 'import numpy as np\n'), ((1408, 1421), 'numpy.sum', 'np.sum', (['state'], {}), '(state)\n', (1414, 1421), True, 'import numpy as np\n'), ((1246, 1262), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1260, 1262), True, 'import numpy as np\n'), ((1265, 1288), 'numpy.exp', 'np.exp', (['(-beta * delta_e)'], {}), '(-beta * delta_e)\n', (1271, 1288), True, 'import numpy as np\n')] |
"""Utility method to draw the eye contact"""
import cv2 as cv
import numpy as np
def draw_contact(image_in, face, gaze_vector):
image_out = image_in
if (is_contact(gaze_vector)): #Draw green
cv.rectangle(
image_out, tuple(np.round(face[:2]).astype(np.int32)),
tuple(np.round(np.add(face[:2], face[2:])).astype(np.int32)),
color=(0, 255, 0), thickness=1, lineType=cv.LINE_AA,
)
else: #Draw red
cv.rectangle(
image_out, tuple(np.round(face[:2]).astype(np.int32)),
tuple(np.round(np.add(face[:2], face[2:])).astype(np.int32)),
color=(0, 0, 255), thickness=1, lineType=cv.LINE_AA,
)
return image_out
def is_contact(gaze_vector):
#The eye contact is defined as looking within a 3° range around the camera
#This seems to be a good compromise between accurracy and realism in live testing
max_angle = np.radians(3)
phi = np.absolute(gaze_vector[1])
theta = np.absolute(gaze_vector[0])
return phi < max_angle and theta < max_angle
| [
"numpy.radians",
"numpy.absolute",
"numpy.add",
"numpy.round"
] | [((1033, 1046), 'numpy.radians', 'np.radians', (['(3)'], {}), '(3)\n', (1043, 1046), True, 'import numpy as np\n'), ((1060, 1087), 'numpy.absolute', 'np.absolute', (['gaze_vector[1]'], {}), '(gaze_vector[1])\n', (1071, 1087), True, 'import numpy as np\n'), ((1101, 1128), 'numpy.absolute', 'np.absolute', (['gaze_vector[0]'], {}), '(gaze_vector[0])\n', (1112, 1128), True, 'import numpy as np\n'), ((267, 285), 'numpy.round', 'np.round', (['face[:2]'], {}), '(face[:2])\n', (275, 285), True, 'import numpy as np\n'), ((563, 581), 'numpy.round', 'np.round', (['face[:2]'], {}), '(face[:2])\n', (571, 581), True, 'import numpy as np\n'), ((341, 367), 'numpy.add', 'np.add', (['face[:2]', 'face[2:]'], {}), '(face[:2], face[2:])\n', (347, 367), True, 'import numpy as np\n'), ((637, 663), 'numpy.add', 'np.add', (['face[:2]', 'face[2:]'], {}), '(face[:2], face[2:])\n', (643, 663), True, 'import numpy as np\n')] |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Minimal data reader for GQN TFRecord datasets."""
import collections
import os
import tensorflow as tf
import numpy as np
from PIL import Image
DatasetInfo = collections.namedtuple(
'DatasetInfo',
['basepath', 'train_size', 'test_size', 'frame_size', 'sequence_size']
)
Context = collections.namedtuple('Context', ['frames', 'cameras'])
Query = collections.namedtuple('Query', ['context', 'query_camera'])
TaskData = collections.namedtuple('TaskData', ['query', 'target'])
_DATASETS = dict(
jaco=DatasetInfo(
basepath='jaco',
train_size=3600,
test_size=400,
frame_size=64,
sequence_size=11),
mazes=DatasetInfo(
basepath='mazes',
train_size=1080,
test_size=120,
frame_size=84,
sequence_size=300),
rooms_free_camera_with_object_rotations=DatasetInfo(
basepath='rooms_free_camera_with_object_rotations',
train_size=2034,
test_size=226,
frame_size=128,
sequence_size=10),
rooms_ring_camera=DatasetInfo(
basepath='rooms_ring_camera',
train_size=2160,
test_size=240,
frame_size=64,
sequence_size=10),
rooms_free_camera_no_object_rotations=DatasetInfo(
basepath='rooms_free_camera_no_object_rotations',
train_size=2160,
test_size=240,
frame_size=64,
sequence_size=10),
shepard_metzler_5_parts=DatasetInfo(
basepath='shepard_metzler_5_parts',
train_size=900,
test_size=100,
frame_size=64,
sequence_size=15),
shepard_metzler_7_parts=DatasetInfo(
basepath='shepard_metzler_7_parts',
train_size=900,
test_size=100,
frame_size=64,
sequence_size=15)
)
_NUM_CHANNELS = 3
_NUM_RAW_CAMERA_PARAMS = 5
def _get_dataset_files(dateset_info, mode, root):
"""Generates lists of files for a given dataset version."""
basepath = dateset_info.basepath
base = os.path.join(root, basepath, mode)
if mode == 'train':
num_files = dateset_info.train_size
else:
num_files = dateset_info.test_size
length = len(str(num_files))
template = '{:0%d}-of-{:0%d}.tfrecord' % (length, length)
return [os.path.join(base, template.format(i + 1, num_files))
for i in range(num_files)]
def _convert_frame_data(jpeg_data):
decoded_frames = tf.image.decode_jpeg(jpeg_data)
return tf.image.convert_image_dtype(decoded_frames, dtype=tf.float32)
def _get_randomized_indices(dataset_info, example_size):
"""Generates randomized indices into a sequence of a specific length."""
indices = tf.range(0, dataset_info.sequence_size)
indices = tf.random_shuffle(indices)
indices = tf.slice(indices, begin=[0], size=[example_size])
return indices
def _preprocess_frames(example, indices, dataset_info, example_size, custom_frame_size):
"""Instantiates the ops used to preprocess the frames data."""
frames = tf.concat(example['frames'], axis=0)
frames = tf.gather(frames, indices, axis=0)
frames = tf.map_fn(
_convert_frame_data, tf.reshape(frames, [-1]),
dtype=tf.float32, back_prop=False)
dataset_image_dimensions = tuple(
[dataset_info.frame_size] * 2 + [_NUM_CHANNELS])
# tf.Print(tf.shape(frames), [tf.shape(frames)], "Shape: ")
frames = tf.reshape(
frames, (-1, example_size) + dataset_image_dimensions)
# tf.Print(tf.shape(frames), [tf.shape(frames)], "Shape: ")
if (custom_frame_size and
custom_frame_size != dataset_info.frame_size):
frames = tf.reshape(frames, (-1,) + dataset_image_dimensions)
new_frame_dimensions = (custom_frame_size,) * 2 + (_NUM_CHANNELS,)
frames = tf.image.resize_bilinear(
frames, new_frame_dimensions[:2], align_corners=True)
frames = tf.reshape(
frames, (-1, example_size) + new_frame_dimensions)
return frames
def _preprocess_cameras(example, indices, dataset_info):
"""Instantiates the ops used to preprocess the cameras data."""
raw_pose_params = example['cameras']
raw_pose_params = tf.reshape(
raw_pose_params,
[-1, dataset_info.sequence_size, _NUM_RAW_CAMERA_PARAMS])
raw_pose_params = tf.gather(raw_pose_params, indices, axis=1)
pos = raw_pose_params[:, :, 0:3]
yaw = raw_pose_params[:, :, 3:4]
pitch = raw_pose_params[:, :, 4:5]
cameras = tf.concat(
[pos, yaw, pitch], axis=2)
return cameras
def _parse_function(example_proto, dataset_info, example_size, custom_frame_size):
feature_map = {
'frames': tf.FixedLenFeature(
shape=dataset_info.sequence_size, dtype=tf.string),
'cameras': tf.FixedLenFeature(
shape=[dataset_info.sequence_size * _NUM_RAW_CAMERA_PARAMS],
dtype=tf.float32)
}
example = tf.parse_single_example(example_proto, feature_map)
indices = _get_randomized_indices(dataset_info, example_size)
frames = _preprocess_frames(example, indices, dataset_info, example_size, custom_frame_size)
cameras = _preprocess_cameras(example, indices, dataset_info)
return frames, cameras
def make_dataset(dataset, root, context_size=5, mode='train', custom_frame_size=None, load_all=False):
dataset_info = _DATASETS[dataset]
file_names = _get_dataset_files(dataset_info, mode, root)
dataset = tf.data.TFRecordDataset(file_names)
if load_all:
context_size = dataset_info.sequence_size - 1
def parse_func(example_proto):
return _parse_function(example_proto, dataset_info=dataset_info, example_size=context_size + 1, custom_frame_size=custom_frame_size)
dataset = dataset.map(parse_func)
dataset = dataset.repeat(1)
return dataset
class DatasetWriter:
def __init__(self, dataset, mode, root):
"""
Writes images to files, and camera info csv
"""
self.dataset_info = _DATASETS[dataset]
self.mode = mode
self.root = root
self.counter = 0
# csv_header = "scene,view,x,y,z,yaw,pitch"
path = os.path.join(self.root, self.dataset_info.basepath, self.mode)
os.makedirs(path, exist_ok=True)
self.meta_file = os.path.join(path, "info.meta")
# self.csv = os.path.join(path, "info.csv")
# with open(self.csv, 'w+') as f:
# f.write(csv_header)
def save_multiple(self, records):
img_dir = os.path.join(self.root, self.dataset_info.basepath, self.mode)
frames = []
cameras = []
for rec_frames, rec_cameras in records:
rec_frames = np.squeeze(rec_frames)
rec_cameras = np.squeeze(rec_cameras)
frames.append(rec_frames)
cameras.append(rec_cameras)
frames = np.array(frames)
cameras = np.array(cameras)
# np.savez(os.path.join(img_dir, "record-{}.npy".format(self.counter + 1)), frames=frames, cameras=cameras)
np.savez_compressed(os.path.join(img_dir, "record-{}.npz".format(self.counter + 1)), frames=frames, cameras=cameras)
self.counter += 1
if self.mode == 'train':
num_files = 2e6 * 9 / 10
else:
num_files = 2e6 * 1 / 10
if self.counter % 1000 == 0:
print( "{}% complete".format(self.counter * 100 / num_files))
def save_record(self, record):
# image1, context1, image2, context2, ..., imageN, contextN
# context1 = x1, y1, z1, yaw1, pitch1
frames, cameras = record
img_dir = os.path.join(self.root, self.dataset_info.basepath, self.mode)
try:
frames = np.squeeze(frames)
cameras = np.squeeze(cameras)
except ValueError:
pass
if self.mode == 'train':
num_files = 2e6 * 9 / 10
else:
num_files = 2e6 * 1 / 10
rows = []
if self.counter % 1000 == 0:
print("{}% complete".format(self.counter * 100 / num_files))
self.counter += 1
| [
"tensorflow.data.TFRecordDataset",
"tensorflow.slice",
"collections.namedtuple",
"tensorflow.image.convert_image_dtype",
"os.makedirs",
"tensorflow.parse_single_example",
"tensorflow.image.resize_bilinear",
"os.path.join",
"tensorflow.FixedLenFeature",
"tensorflow.random_shuffle",
"tensorflow.ra... | [((739, 852), 'collections.namedtuple', 'collections.namedtuple', (['"""DatasetInfo"""', "['basepath', 'train_size', 'test_size', 'frame_size', 'sequence_size']"], {}), "('DatasetInfo', ['basepath', 'train_size',\n 'test_size', 'frame_size', 'sequence_size'])\n", (761, 852), False, 'import collections\n'), ((869, 925), 'collections.namedtuple', 'collections.namedtuple', (['"""Context"""', "['frames', 'cameras']"], {}), "('Context', ['frames', 'cameras'])\n", (891, 925), False, 'import collections\n'), ((934, 994), 'collections.namedtuple', 'collections.namedtuple', (['"""Query"""', "['context', 'query_camera']"], {}), "('Query', ['context', 'query_camera'])\n", (956, 994), False, 'import collections\n'), ((1006, 1061), 'collections.namedtuple', 'collections.namedtuple', (['"""TaskData"""', "['query', 'target']"], {}), "('TaskData', ['query', 'target'])\n", (1028, 1061), False, 'import collections\n'), ((2547, 2581), 'os.path.join', 'os.path.join', (['root', 'basepath', 'mode'], {}), '(root, basepath, mode)\n', (2559, 2581), False, 'import os\n'), ((2941, 2972), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['jpeg_data'], {}), '(jpeg_data)\n', (2961, 2972), True, 'import tensorflow as tf\n'), ((2982, 3044), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['decoded_frames'], {'dtype': 'tf.float32'}), '(decoded_frames, dtype=tf.float32)\n', (3010, 3044), True, 'import tensorflow as tf\n'), ((3194, 3233), 'tensorflow.range', 'tf.range', (['(0)', 'dataset_info.sequence_size'], {}), '(0, dataset_info.sequence_size)\n', (3202, 3233), True, 'import tensorflow as tf\n'), ((3248, 3274), 'tensorflow.random_shuffle', 'tf.random_shuffle', (['indices'], {}), '(indices)\n', (3265, 3274), True, 'import tensorflow as tf\n'), ((3289, 3338), 'tensorflow.slice', 'tf.slice', (['indices'], {'begin': '[0]', 'size': '[example_size]'}), '(indices, begin=[0], size=[example_size])\n', (3297, 3338), True, 'import tensorflow as tf\n'), ((3528, 3564), 'tensorflow.concat', 'tf.concat', (["example['frames']"], {'axis': '(0)'}), "(example['frames'], axis=0)\n", (3537, 3564), True, 'import tensorflow as tf\n'), ((3578, 3612), 'tensorflow.gather', 'tf.gather', (['frames', 'indices'], {'axis': '(0)'}), '(frames, indices, axis=0)\n', (3587, 3612), True, 'import tensorflow as tf\n'), ((3907, 3972), 'tensorflow.reshape', 'tf.reshape', (['frames', '((-1, example_size) + dataset_image_dimensions)'], {}), '(frames, (-1, example_size) + dataset_image_dimensions)\n', (3917, 3972), True, 'import tensorflow as tf\n'), ((4673, 4762), 'tensorflow.reshape', 'tf.reshape', (['raw_pose_params', '[-1, dataset_info.sequence_size, _NUM_RAW_CAMERA_PARAMS]'], {}), '(raw_pose_params, [-1, dataset_info.sequence_size,\n _NUM_RAW_CAMERA_PARAMS])\n', (4683, 4762), True, 'import tensorflow as tf\n'), ((4798, 4841), 'tensorflow.gather', 'tf.gather', (['raw_pose_params', 'indices'], {'axis': '(1)'}), '(raw_pose_params, indices, axis=1)\n', (4807, 4841), True, 'import tensorflow as tf\n'), ((4969, 5005), 'tensorflow.concat', 'tf.concat', (['[pos, yaw, pitch]'], {'axis': '(2)'}), '([pos, yaw, pitch], axis=2)\n', (4978, 5005), True, 'import tensorflow as tf\n'), ((5402, 5453), 'tensorflow.parse_single_example', 'tf.parse_single_example', (['example_proto', 'feature_map'], {}), '(example_proto, feature_map)\n', (5425, 5453), True, 'import tensorflow as tf\n'), ((5928, 5963), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['file_names'], {}), '(file_names)\n', (5951, 5963), True, 'import tensorflow as tf\n'), ((3666, 3690), 'tensorflow.reshape', 'tf.reshape', (['frames', '[-1]'], {}), '(frames, [-1])\n', (3676, 3690), True, 'import tensorflow as tf\n'), ((4147, 4199), 'tensorflow.reshape', 'tf.reshape', (['frames', '((-1,) + dataset_image_dimensions)'], {}), '(frames, (-1,) + dataset_image_dimensions)\n', (4157, 4199), True, 'import tensorflow as tf\n'), ((4288, 4366), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['frames', 'new_frame_dimensions[:2]'], {'align_corners': '(True)'}), '(frames, new_frame_dimensions[:2], align_corners=True)\n', (4312, 4366), True, 'import tensorflow as tf\n'), ((4393, 4454), 'tensorflow.reshape', 'tf.reshape', (['frames', '((-1, example_size) + new_frame_dimensions)'], {}), '(frames, (-1, example_size) + new_frame_dimensions)\n', (4403, 4454), True, 'import tensorflow as tf\n'), ((5156, 5225), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', ([], {'shape': 'dataset_info.sequence_size', 'dtype': 'tf.string'}), '(shape=dataset_info.sequence_size, dtype=tf.string)\n', (5174, 5225), True, 'import tensorflow as tf\n'), ((5259, 5360), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', ([], {'shape': '[dataset_info.sequence_size * _NUM_RAW_CAMERA_PARAMS]', 'dtype': 'tf.float32'}), '(shape=[dataset_info.sequence_size *\n _NUM_RAW_CAMERA_PARAMS], dtype=tf.float32)\n', (5277, 5360), True, 'import tensorflow as tf\n'), ((6633, 6695), 'os.path.join', 'os.path.join', (['self.root', 'self.dataset_info.basepath', 'self.mode'], {}), '(self.root, self.dataset_info.basepath, self.mode)\n', (6645, 6695), False, 'import os\n'), ((6704, 6736), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (6715, 6736), False, 'import os\n'), ((6762, 6793), 'os.path.join', 'os.path.join', (['path', '"""info.meta"""'], {}), "(path, 'info.meta')\n", (6774, 6793), False, 'import os\n'), ((6992, 7054), 'os.path.join', 'os.path.join', (['self.root', 'self.dataset_info.basepath', 'self.mode'], {}), '(self.root, self.dataset_info.basepath, self.mode)\n', (7004, 7054), False, 'import os\n'), ((7337, 7353), 'numpy.array', 'np.array', (['frames'], {}), '(frames)\n', (7345, 7353), True, 'import numpy as np\n'), ((7372, 7389), 'numpy.array', 'np.array', (['cameras'], {}), '(cameras)\n', (7380, 7389), True, 'import numpy as np\n'), ((8095, 8157), 'os.path.join', 'os.path.join', (['self.root', 'self.dataset_info.basepath', 'self.mode'], {}), '(self.root, self.dataset_info.basepath, self.mode)\n', (8107, 8157), False, 'import os\n'), ((7169, 7191), 'numpy.squeeze', 'np.squeeze', (['rec_frames'], {}), '(rec_frames)\n', (7179, 7191), True, 'import numpy as np\n'), ((7218, 7241), 'numpy.squeeze', 'np.squeeze', (['rec_cameras'], {}), '(rec_cameras)\n', (7228, 7241), True, 'import numpy as np\n'), ((8192, 8210), 'numpy.squeeze', 'np.squeeze', (['frames'], {}), '(frames)\n', (8202, 8210), True, 'import numpy as np\n'), ((8233, 8252), 'numpy.squeeze', 'np.squeeze', (['cameras'], {}), '(cameras)\n', (8243, 8252), True, 'import numpy as np\n')] |
import joblib
import pytest
from pydefect.analyzer.grids import Grids
from pymatgen.core import Lattice, Structure
import numpy as np
from pymatgen.io.vasp import Chgcar
from vise.tests.helpers.assertion import assert_dataclass_almost_equal
@pytest.fixture
def grids():
return Grids(lattice=Lattice.cubic(10),
dim=(1, 1, 5),
distance_data=np.array([[[0.0, 2.0, 4.0, 4.0, 2.0]]]))
@pytest.fixture
def chgcar():
struc = Structure(lattice=Lattice.cubic(10), species=["H"], coords=[[0]*3])
data = {"total": np.array([[[0.0, 2.0, 4.0, 6.0, 8.0]]]),
"diff": np.array([[[0.0, 1.0, 2.0, 3.0, 4.0]]])}
return Chgcar(struc, data=data)
def test_grids_joblib_roundtrip(tmpdir, grids):
tmpdir.chdir()
print(tmpdir)
with open("tmp.joblib", mode="wb") as f:
joblib.dump(grids, f, compress=3)
with open("tmp.joblib", mode="rb") as f:
actual = joblib.load(f)
assert_dataclass_almost_equal(actual, grids)
def test_grids_np_save_load_roundtrip(tmpdir, grids):
tmpdir.chdir()
print(tmpdir)
grids.dump()
actual = grids.from_file()
assert_dataclass_almost_equal(actual, grids)
def test_grids_from_chgcar(grids, chgcar):
actual = Grids.from_chgcar(chgcar)
assert_dataclass_almost_equal(actual, grids)
def test_shift_distance_data(grids):
actual = grids.shifted_distance_data(center=[0, 0, 1])
expected = np.array([[[2.0, 0.0, 2.0, 4.0, 4.0]]])
np.testing.assert_array_almost_equal(actual, expected)
def test_shift_distance_data2():
grids = Grids(lattice=Lattice.cubic(10),
dim=(2, 2, 2),
distance_data=np.array([[[0.0, 5.0], [5.0, 7.07]],
[[5.0, 7.07], [7.07, 8.66]]]))
actual = grids.shifted_distance_data(center=[1, 1, 1])
expected = np.array([[[8.66, 7.07], [7.07, 5.0]],
[[7.07, 5.0], [5.0, 0.0]]])
np.testing.assert_array_almost_equal(actual, expected)
def test_spherical_dist(grids):
# distance_data=np.array([[[0.0, 2.0, 4.0, 4.0, 2.0]]]))
actual = grids.spherical_dist(data=np.array([[[0.0, 0.0, 0.0, 0.0, 1.0]]]),
center=[0, 0, 1],
distance_bins=np.array([0.0, 2.5, 5.0]))
# Divide by 2 since there are 2 points at 4.0 distance. volume=1000.
expected = [0.0, 1.0 / 2 / 1000]
assert actual == expected
"""
TODO
- Check how to revolve numpy array.
- Add distances_data
- Add _calc_histogram(chgcar, distances_data, center) method
DONE
- Add defect_center_idxs
- Add defect_center_coords property
""" | [
"numpy.testing.assert_array_almost_equal",
"vise.tests.helpers.assertion.assert_dataclass_almost_equal",
"numpy.array",
"pymatgen.core.Lattice.cubic",
"joblib.load",
"pydefect.analyzer.grids.Grids.from_chgcar",
"joblib.dump",
"pymatgen.io.vasp.Chgcar"
] | [((666, 690), 'pymatgen.io.vasp.Chgcar', 'Chgcar', (['struc'], {'data': 'data'}), '(struc, data=data)\n', (672, 690), False, 'from pymatgen.io.vasp import Chgcar\n'), ((946, 990), 'vise.tests.helpers.assertion.assert_dataclass_almost_equal', 'assert_dataclass_almost_equal', (['actual', 'grids'], {}), '(actual, grids)\n', (975, 990), False, 'from vise.tests.helpers.assertion import assert_dataclass_almost_equal\n'), ((1136, 1180), 'vise.tests.helpers.assertion.assert_dataclass_almost_equal', 'assert_dataclass_almost_equal', (['actual', 'grids'], {}), '(actual, grids)\n', (1165, 1180), False, 'from vise.tests.helpers.assertion import assert_dataclass_almost_equal\n'), ((1239, 1264), 'pydefect.analyzer.grids.Grids.from_chgcar', 'Grids.from_chgcar', (['chgcar'], {}), '(chgcar)\n', (1256, 1264), False, 'from pydefect.analyzer.grids import Grids\n'), ((1269, 1313), 'vise.tests.helpers.assertion.assert_dataclass_almost_equal', 'assert_dataclass_almost_equal', (['actual', 'grids'], {}), '(actual, grids)\n', (1298, 1313), False, 'from vise.tests.helpers.assertion import assert_dataclass_almost_equal\n'), ((1427, 1466), 'numpy.array', 'np.array', (['[[[2.0, 0.0, 2.0, 4.0, 4.0]]]'], {}), '([[[2.0, 0.0, 2.0, 4.0, 4.0]]])\n', (1435, 1466), True, 'import numpy as np\n'), ((1471, 1525), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (1507, 1525), True, 'import numpy as np\n'), ((1855, 1921), 'numpy.array', 'np.array', (['[[[8.66, 7.07], [7.07, 5.0]], [[7.07, 5.0], [5.0, 0.0]]]'], {}), '([[[8.66, 7.07], [7.07, 5.0]], [[7.07, 5.0], [5.0, 0.0]]])\n', (1863, 1921), True, 'import numpy as np\n'), ((1951, 2005), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (1987, 2005), True, 'import numpy as np\n'), ((553, 592), 'numpy.array', 'np.array', (['[[[0.0, 2.0, 4.0, 6.0, 8.0]]]'], {}), '([[[0.0, 2.0, 4.0, 6.0, 8.0]]])\n', (561, 592), True, 'import numpy as np\n'), ((614, 653), 'numpy.array', 'np.array', (['[[[0.0, 1.0, 2.0, 3.0, 4.0]]]'], {}), '([[[0.0, 1.0, 2.0, 3.0, 4.0]]])\n', (622, 653), True, 'import numpy as np\n'), ((831, 864), 'joblib.dump', 'joblib.dump', (['grids', 'f'], {'compress': '(3)'}), '(grids, f, compress=3)\n', (842, 864), False, 'import joblib\n'), ((927, 941), 'joblib.load', 'joblib.load', (['f'], {}), '(f)\n', (938, 941), False, 'import joblib\n'), ((297, 314), 'pymatgen.core.Lattice.cubic', 'Lattice.cubic', (['(10)'], {}), '(10)\n', (310, 314), False, 'from pymatgen.core import Lattice, Structure\n'), ((379, 418), 'numpy.array', 'np.array', (['[[[0.0, 2.0, 4.0, 4.0, 2.0]]]'], {}), '([[[0.0, 2.0, 4.0, 4.0, 2.0]]])\n', (387, 418), True, 'import numpy as np\n'), ((482, 499), 'pymatgen.core.Lattice.cubic', 'Lattice.cubic', (['(10)'], {}), '(10)\n', (495, 499), False, 'from pymatgen.core import Lattice, Structure\n'), ((1587, 1604), 'pymatgen.core.Lattice.cubic', 'Lattice.cubic', (['(10)'], {}), '(10)\n', (1600, 1604), False, 'from pymatgen.core import Lattice, Structure\n'), ((1671, 1737), 'numpy.array', 'np.array', (['[[[0.0, 5.0], [5.0, 7.07]], [[5.0, 7.07], [7.07, 8.66]]]'], {}), '([[[0.0, 5.0], [5.0, 7.07]], [[5.0, 7.07], [7.07, 8.66]]])\n', (1679, 1737), True, 'import numpy as np\n'), ((2140, 2179), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 0.0, 0.0, 1.0]]]'], {}), '([[[0.0, 0.0, 0.0, 0.0, 1.0]]])\n', (2148, 2179), True, 'import numpy as np\n'), ((2281, 2306), 'numpy.array', 'np.array', (['[0.0, 2.5, 5.0]'], {}), '([0.0, 2.5, 5.0])\n', (2289, 2306), True, 'import numpy as np\n')] |
import torch
import random
import numpy as np
import torch.nn.functional as F
from .min_norm_solvers import MinNormSolver, gradient_normalizers
from torch.autograd import Variable
class backprop_scheduler(object):
def __init__(self, model, mode=None):
self.model = model
self.mode = mode
self.num_worker = len(self.model.regression_workers) + len(self.model.classification_workers)
self.Q = torch.zeros(self.num_worker).detach()
self.last_loss = torch.zeros(self.num_worker).detach()
self.pi = torch.ones(self.num_worker).detach()
def __call__(self, preds, label, cls_optim, regr_optim, frontend_optim, device, h=None, dropout_rate=None, delta=None, temperture=None, alpha=None, batch=None):
if self.mode == "base":
return self._base_scheduler(preds, label, cls_optim, regr_optim, frontend_optim, device)
elif self.mode == "adversarial":
return self._adversarial(preds, label, cls_optim, regr_optim, frontend_optim, device)
elif self.mode == "select_one":
return self._select_one(preds, label, cls_optim, regr_optim, frontend_optim, device)
elif self.mode == "select_half":
return self._select_half(preds, label, cls_optim, regr_optim, frontend_optim, device)
elif self.mode == "dropout":
return self._drop_out(preds, label, cls_optim, regr_optim, frontend_optim, device=device, dropout_rate=dropout_rate)
elif self.mode == "hyper_volume":
return self._hyper_volume(preds, label, cls_optim, regr_optim, frontend_optim, device=device, delta=delta)
elif self.mode == "softmax":
return self._softmax(preds, label, cls_optim, regr_optim, frontend_optim, temperture=temperture, device=device)
elif self.mode == "adaptive":
return self._online_adaptive(preds, label, cls_optim, regr_optim, frontend_optim, temperture=temperture, alpha=alpha, device=device)
elif self.mode == "MGD":
return self._MGDA(preds, label, cls_optim, regr_optim, frontend_optim, batch=batch, device=device)
else:
raise NotImplementedError
def _base_scheduler(self, preds, label, cls_optim, regr_optim, frontend_optim, device):
frontend_optim.zero_grad()
tot_loss = 0
losses = {}
for worker in self.model.classification_workers:
cls_optim[worker.name].zero_grad()
loss = worker.loss_weight * worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
tot_loss += loss
for worker in self.model.regression_workers:
regr_optim[worker.name].zero_grad()
loss = worker.loss_weight * worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
tot_loss += loss
for worker in self.model.regularizer_workers:
loss = worker.loss_weight * worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
tot_loss += loss
tot_loss.backward()
for _, optim in cls_optim.items():
optim.step()
for _, optim in regr_optim.items():
optim.step()
frontend_optim.step()
losses["total"] = tot_loss
return losses, 1
def _select_one(self, preds, label, cls_optim, regr_optim, frontend_optim, device):
self.count += 1
loss_lst = []
num_worker = len(self.model.regression_workers) + len(self.model.classification_workers)
frontend_optim.zero_grad()
losses = {}
selected = self.count % num_worker
# select one
if selected > 3:
worker = self.model.classification_workers[selected - 4]
loss = worker.loss(preds[worker.name], label[worker.name])
else:
worker = self.model.classification_workers[selected]
loss = worker.loss(preds[worker.name], label[worker.name])
tot_loss = loss
tot_loss.backward()
for _, optim in cls_optim.items():
optim.step()
for _, optim in regr_optim.items():
optim.step()
frontend_optim.step()
losses["total"] = tot_loss
return losses, 1
def _select_half(self, preds, label, cls_optim, regr_optim, frontend_optim, device):
num_worker = len(self.model.regression_workers) + len(self.model.classification_workers)
loss_tmp = torch.zeros(num_worker).to(device)
idx = 0
frontend_optim.zero_grad()
losses = {}
for worker in self.model.classification_workers:
cls_optim[worker.name].zero_grad()
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
loss_tmp[idx] = loss
idx += 1
for worker in self.model.regression_workers:
regr_optim[worker.name].zero_grad()
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
loss_tmp[idx] = loss
idx += 1
# generate mask
mask = np.random.randint(2, size=num_worker)
while np.sum(mask) > 4 or np.sum(mask) < 3:
mask = np.random.randint(2, size=num_worker)
mask = torch.from_numpy(mask).type(torch.FloatTensor).to(device)
#sum up losses
tot_loss = torch.sum(mask * loss_tmp, dim=0)
tot_loss.backward()
for _, optim in cls_optim.items():
optim.step()
for _, optim in regr_optim.items():
optim.step()
frontend_optim.step()
losses["total"] = tot_loss
return losses, 1
def _drop_out(self, preds, label, cls_optim, regr_optim, frontend_optim, dropout_rate, device):
loss_tmp = torch.zeros(7, requires_grad=True).to(device)
idx = 0
assert dropout_rate is not None
re_mask = np.random.binomial(1, dropout_rate, size=len(self.model.regression_workers))
cls_mask = np.random.binomial(1, dropout_rate, size=len(self.model.classification_workers))
frontend_optim.zero_grad()
losses = {}
for i, worker in enumerate(self.model.classification_workers):
cls_optim[worker.name].zero_grad()
if cls_mask[i] == 1:
loss = worker.loss(preds[worker.name], label[worker.name])
else:
loss = 0
losses[worker.name] = loss
loss_tmp[idx] = loss
idx += 1
for worker in self.model.regression_workers:
regr_optim[worker.name].zero_grad()
if re_mask[i] == 1:
loss = worker.loss(preds[worker.name], label[worker.name])
else:
loss = 0
losses[worker.name] = loss
loss_tmp[idx] = loss
idx += 1
#sum up losses
tot_loss = torch.sum(loss_tmp, dim=0)
tot_loss.backward()
for _, optim in cls_optim.items():
optim.step()
for _, optim in regr_optim.items():
optim.step()
frontend_optim.step()
losses["total"] = tot_loss
return losses, 1
def _hyper_volume(self, preds, label, cls_optim, regr_optim, frontend_optim, delta ,device):
assert delta > 1
num_worker = len(self.model.regression_workers) + len(self.model.classification_workers)
loss_tmp = torch.zeros(num_worker).to(device)
idx = 0
frontend_optim.zero_grad()
losses = {}
for worker in self.model.classification_workers:
cls_optim[worker.name].zero_grad()
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
loss_tmp[idx] = loss
idx += 1
for worker in self.model.regression_workers:
regr_optim[worker.name].zero_grad()
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
loss_tmp[idx] = loss
idx += 1
#sum up losses
eta = delta * torch.max(loss_tmp.detach()).item()
hyper_votolume = torch.sum(loss_tmp)
alpha = 1 / (eta - loss_tmp + 1e-6)
hyper_votolume.backward()
for _, optim in cls_optim.items():
optim.step()
for _, optim in regr_optim.items():
optim.step()
frontend_optim.step()
losses["total"] = hyper_votolume
return losses, alpha
def _softmax(self, preds, label, cls_optim, regr_optim, frontend_optim, temperture, device):
assert temperture > 0
num_worker = len(self.model.regression_workers) + len(self.model.classification_workers)
loss_tmp = []
idx = 0
frontend_optim.zero_grad()
losses = {}
for worker in self.model.classification_workers:
cls_optim[worker.name].zero_grad()
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
loss_tmp.append(loss.item() * temperture)
# idx += 1
for worker in self.model.regression_workers:
regr_optim[worker.name].zero_grad()
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
loss_tmp.append(loss.item() * temperture)
# idx += 1
alpha = self._stable_softmax(loss_tmp)
tot_loss = 0
for worker in self.model.classification_workers:
# tot_loss += alpha[idx] * losses[worker.name]
tot_loss += losses[worker.name]
idx += 1
for worker in self.model.regression_workers:
# tot_loss += alpha[idx] * losses[worker.name]
tot_loss += losses[worker.name]
idx += 1
# tot_loss = torch.sum(alpha.detach() * loss_vec)
tot_loss.backward()
for _, optim in cls_optim.items():
optim.step()
for _, optim in regr_optim.items():
optim.step()
frontend_optim.step()
losses["total"] = tot_loss
return losses, alpha
def _online_adaptive(self, preds, label, cls_optim, regr_optim, frontend_optim, temperture, alpha, device):
assert temperture > 0 and alpha > 0
# device = preds['chunk'].device
num_worker = len(self.model.regression_workers) + len(self.model.classification_workers)
loss_tmp = torch.zeros(num_worker).to(device)
idx = 0
frontend_optim.zero_grad()
losses = {}
for worker in self.model.classification_workers:
cls_optim[worker.name].zero_grad()
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
loss_tmp[idx] = loss
idx += 1
for worker in self.model.regression_workers:
regr_optim[worker.name].zero_grad()
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
loss_tmp[idx] = loss
idx += 1
R_t = self.last_loss.to(device) - loss_tmp
with torch.no_grad():
Q_t = alpha * R_t.detach() + (1 - alpha) * self.Q.to(device)
self.pi = F.softmax(temperture * Q_t, dim=0)
tot_loss = torch.sum(loss_tmp)
tot_loss.backward()
self.last_loss = loss_tmp.detach()
self.Q = Q_t.detach()
for _, optim in cls_optim.items():
optim.step()
for _, optim in regr_optim.items():
optim.step()
frontend_optim.step()
losses["total"] = tot_loss
return losses, self.pi
def _MGDA(self, preds, label, cls_optim, regr_optim, frontend_optim, batch, device):
frontend_optim.zero_grad()
losses = {}
grads = {}
for worker in self.model.classification_workers:
self.model.zero_grad()
h, chunk, preds, labels = self.model.forward(batch, 1, device)
# print(worker.name)
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
grads[worker.name] = self._get_gen_grads(loss)
for worker in self.model.regression_workers:
self.model.zero_grad()
h, chunk, preds, labels = self.model.forward(batch, 1, device)
# print(worker.name)
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
grads[worker.name] = self._get_gen_grads(loss)
sol, min_norm = MinNormSolver.find_min_norm_element([grads[worker].unsqueeze(0) for worker, _ in grads.items()])
alpha = sol
tot_loss = 0
# idx = 0
self.model.zero_grad()
h, chunk, preds, labels = self.model.forward(batch, 1, device)
for worker in self.model.classification_workers:
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
tot_loss += loss
# tot_loss += sol[idx] * loss
for worker in self.model.regression_workers:
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
tot_loss += loss
# tot_loss += sol[idx] * loss
tot_loss.backward()
for _, optim in cls_optim.items():
optim.step()
for _, optim in regr_optim.items():
optim.step()
frontend_optim.step()
losses["total"] = tot_loss
return losses, alpha
def _get_gen_grads(self, loss_):
# grads = torch.autograd.grad(outputs=loss_, inputs=self.model.frontend.parameters())
self.model.frontend.zero_grad()
loss_.backward()
# grads = self.model.frontend.grad()
for params in self.model.frontend.parameters():
try:
grads_ = torch.cat([grads_, params.grad.view(-1)], 0)
except:
grads_ = params.grad.view(-1)
return grads_ / grads_.norm()
def _stable_softmax(self, x):
z = np.asarray(x, np.float) - np.max(x)
numerator = np.exp(z)
denominator = np.sum(numerator)
softmax = numerator / denominator
return softmax
| [
"numpy.asarray",
"torch.from_numpy",
"numpy.max",
"numpy.exp",
"numpy.sum",
"numpy.random.randint",
"torch.zeros",
"torch.sum",
"torch.no_grad",
"torch.nn.functional.softmax",
"torch.ones"
] | [((5181, 5218), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'num_worker'}), '(2, size=num_worker)\n', (5198, 5218), True, 'import numpy as np\n'), ((5444, 5477), 'torch.sum', 'torch.sum', (['(mask * loss_tmp)'], {'dim': '(0)'}), '(mask * loss_tmp, dim=0)\n', (5453, 5477), False, 'import torch\n'), ((6967, 6993), 'torch.sum', 'torch.sum', (['loss_tmp'], {'dim': '(0)'}), '(loss_tmp, dim=0)\n', (6976, 6993), False, 'import torch\n'), ((8243, 8262), 'torch.sum', 'torch.sum', (['loss_tmp'], {}), '(loss_tmp)\n', (8252, 8262), False, 'import torch\n'), ((11427, 11446), 'torch.sum', 'torch.sum', (['loss_tmp'], {}), '(loss_tmp)\n', (11436, 11446), False, 'import torch\n'), ((14309, 14318), 'numpy.exp', 'np.exp', (['z'], {}), '(z)\n', (14315, 14318), True, 'import numpy as np\n'), ((14341, 14358), 'numpy.sum', 'np.sum', (['numerator'], {}), '(numerator)\n', (14347, 14358), True, 'import numpy as np\n'), ((5290, 5327), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'num_worker'}), '(2, size=num_worker)\n', (5307, 5327), True, 'import numpy as np\n'), ((11258, 11273), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11271, 11273), False, 'import torch\n'), ((11371, 11405), 'torch.nn.functional.softmax', 'F.softmax', (['(temperture * Q_t)'], {'dim': '(0)'}), '(temperture * Q_t, dim=0)\n', (11380, 11405), True, 'import torch.nn.functional as F\n'), ((14253, 14276), 'numpy.asarray', 'np.asarray', (['x', 'np.float'], {}), '(x, np.float)\n', (14263, 14276), True, 'import numpy as np\n'), ((14279, 14288), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (14285, 14288), True, 'import numpy as np\n'), ((430, 458), 'torch.zeros', 'torch.zeros', (['self.num_worker'], {}), '(self.num_worker)\n', (441, 458), False, 'import torch\n'), ((493, 521), 'torch.zeros', 'torch.zeros', (['self.num_worker'], {}), '(self.num_worker)\n', (504, 521), False, 'import torch\n'), ((549, 576), 'torch.ones', 'torch.ones', (['self.num_worker'], {}), '(self.num_worker)\n', (559, 576), False, 'import torch\n'), ((4500, 4523), 'torch.zeros', 'torch.zeros', (['num_worker'], {}), '(num_worker)\n', (4511, 4523), False, 'import torch\n'), ((5233, 5245), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (5239, 5245), True, 'import numpy as np\n'), ((5253, 5265), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (5259, 5265), True, 'import numpy as np\n'), ((5858, 5892), 'torch.zeros', 'torch.zeros', (['(7)'], {'requires_grad': '(True)'}), '(7, requires_grad=True)\n', (5869, 5892), False, 'import torch\n'), ((7494, 7517), 'torch.zeros', 'torch.zeros', (['num_worker'], {}), '(num_worker)\n', (7505, 7517), False, 'import torch\n'), ((10551, 10574), 'torch.zeros', 'torch.zeros', (['num_worker'], {}), '(num_worker)\n', (10562, 10574), False, 'import torch\n'), ((5343, 5365), 'torch.from_numpy', 'torch.from_numpy', (['mask'], {}), '(mask)\n', (5359, 5365), False, 'import torch\n')] |
import numpy as np
class DualTask:
def __init__(self, A: list, b: list, c: list, d_lo: list, d_hi: list):
self.A: np.array = np.array(A)
self.b: list = b
self.c: list = c
self.d_lo: np.array = np.array(d_lo)
self.d_hi: np.array = np.array(d_hi)
self.m, self.n = self.A.shape
def remove(self, row, col):
self.A = np.delete(self.A, row, axis=0)
self.A = np.delete(self.A, col, axis=1)
del self.b[row]
del self.c[col]
self.d_lo = np.delete(self.d_lo, col)
self.d_hi = np.delete(self.d_hi, col)
self.n -= 1
self.m -= 1
def extend(self, a, b, J):
J_not_base = [j for j in range(self.n) if j not in J]
restriction = np.zeros(self.n)
restriction[J_not_base] = -a[J_not_base]
self.n += 1
self.m += 1
self.A = np.vstack((self.A, restriction))
self.A = np.hstack((self.A, np.zeros((self.m, 1))))
self.A[-1, -1] = 1
self.b.append(-b)
self.c.append(0)
self.d_lo = np.append(self.d_lo, 0)
self.d_hi = np.append(self.d_hi, 1e9)
| [
"numpy.delete",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.vstack"
] | [((139, 150), 'numpy.array', 'np.array', (['A'], {}), '(A)\n', (147, 150), True, 'import numpy as np\n'), ((231, 245), 'numpy.array', 'np.array', (['d_lo'], {}), '(d_lo)\n', (239, 245), True, 'import numpy as np\n'), ((276, 290), 'numpy.array', 'np.array', (['d_hi'], {}), '(d_hi)\n', (284, 290), True, 'import numpy as np\n'), ((383, 413), 'numpy.delete', 'np.delete', (['self.A', 'row'], {'axis': '(0)'}), '(self.A, row, axis=0)\n', (392, 413), True, 'import numpy as np\n'), ((431, 461), 'numpy.delete', 'np.delete', (['self.A', 'col'], {'axis': '(1)'}), '(self.A, col, axis=1)\n', (440, 461), True, 'import numpy as np\n'), ((532, 557), 'numpy.delete', 'np.delete', (['self.d_lo', 'col'], {}), '(self.d_lo, col)\n', (541, 557), True, 'import numpy as np\n'), ((578, 603), 'numpy.delete', 'np.delete', (['self.d_hi', 'col'], {}), '(self.d_hi, col)\n', (587, 603), True, 'import numpy as np\n'), ((762, 778), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (770, 778), True, 'import numpy as np\n'), ((895, 927), 'numpy.vstack', 'np.vstack', (['(self.A, restriction)'], {}), '((self.A, restriction))\n', (904, 927), True, 'import numpy as np\n'), ((1088, 1111), 'numpy.append', 'np.append', (['self.d_lo', '(0)'], {}), '(self.d_lo, 0)\n', (1097, 1111), True, 'import numpy as np\n'), ((1132, 1166), 'numpy.append', 'np.append', (['self.d_hi', '(1000000000.0)'], {}), '(self.d_hi, 1000000000.0)\n', (1141, 1166), True, 'import numpy as np\n'), ((964, 985), 'numpy.zeros', 'np.zeros', (['(self.m, 1)'], {}), '((self.m, 1))\n', (972, 985), True, 'import numpy as np\n')] |
import logging
import random
from abc import abstractmethod
from typing import List, Callable, Tuple
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import colors
from matplotlib.colors import LinearSegmentedColormap
logger = logging.getLogger(__name__)
class SAModel:
"""
Use case specific SA model that needs to be inherited by user of SARouteOptimizer.
Contains methods used in SARouteOptimizer to
- calculate cost for route
- mutate route
- calculate temperature
- calculate probability from cost change and temperature
"""
@abstractmethod
def cost(self, route: List[int]) -> float:
"""
Calculate cost for route.
"""
raise NotImplementedError
@staticmethod
def schedule(t: int, max_temperature: float = 1.0, decay_constant: float = 0.005) -> float:
"""
Calculate current temperature from iteration round t.
"""
return max_temperature * np.exp(-decay_constant * t)
@staticmethod
def probability(delta_cost: float, temperature: float, k: float = 1) -> float:
"""
Calculate acceptance probability for mutated route, based on cost change (vs. current solution) and temperature.
"""
if delta_cost < 0:
return 1
else:
return np.exp(-delta_cost / (k * temperature))
def mutate(self, input_route: List[int], mutation_probability: float = 0.2) -> List[int]:
"""
Mutate (modify) given route. This mutated route will be solution candidate that will be accepted or not, based
on calculated probability.
"""
route = input_route.copy()
for k in range(len(route)):
if random.random() < mutation_probability:
self._swap(route)
# Make sure that at least one change is made to input route
if route == input_route:
self._swap(route)
return route
def _swap(self, route):
i, j = random.sample([k + 1 for k in range(len(route) - 2)], 2)
value_i = route[i]
value_j = route[j]
route[i] = value_j
route[j] = value_i
class SARouteOptimizer:
"""
Simulated annealing route optimizer. With give model and termination criteria, finds optimal route that minimizes
cost function defined by model.
"""
def __init__(self,
model: SAModel,
max_iter: int = 10000,
max_iter_without_improvement: int = 2000,
min_temperature: float = 1e-12,
cost_threshold: float = -np.inf,
):
self.model = model
self.max_iter = max_iter
self.max_iter_without_improvement = max_iter_without_improvement
self.min_temperature = min_temperature
self.cost_threshold = cost_threshold
self.temperatures = []
self.costs = []
self.probabilities = []
self.delta_costs = []
self.is_accepted = []
def run(self, init_route: List[int]) -> Tuple[List[int], float]:
"""
Find optimal route.
:param init_route: Init guess for route.
:return: optimal route, route cost
"""
current_route = init_route.copy()
best_route = current_route.copy()
current_cost = self.model.cost(current_route)
best_cost = self.model.cost(best_route)
probability, delta_cost = 1, 0
is_accepted = True
no_improvement_counter = 0
for t in range(self.max_iter):
no_improvement_counter += 1
temperature = self.model.schedule(t)
if temperature < self.min_temperature:
logger.info("Minimum temperature reached. Return solution")
return best_route, best_cost
self.temperatures.append(temperature)
self.costs.append(current_cost)
self.probabilities.append(probability)
self.delta_costs.append(delta_cost)
self.is_accepted.append(is_accepted)
mutated_route = self.model.mutate(current_route.copy())
mutated_route_cost = self.model.cost(mutated_route)
logger.debug(f"Mutated route: {mutated_route}; cost {mutated_route_cost}")
delta_cost = mutated_route_cost - current_cost
is_accepted = False
probability = self.model.probability(delta_cost, temperature)
if probability >= random.uniform(0.0, 1.0):
is_accepted = True
current_route = mutated_route.copy()
current_cost = mutated_route_cost
if current_cost < best_cost:
best_route = current_route.copy()
best_cost = current_cost
logger.info(f"Found better solution; round {t}; cost {best_cost}")
no_improvement_counter = 0
if best_cost < self.cost_threshold:
logger.info("Cost reached required threshold value. Return solution.")
return best_route, best_cost
logger.debug(f"Round {t}: temperature {temperature}; cost {current_cost}")
if no_improvement_counter > self.max_iter_without_improvement:
logger.info("Max iteration number without improvement reached. Return solution.")
return best_route, best_cost
logger.info("Max iteration number reached. Return solution.")
return best_route, best_cost
def plot_solution(self):
plt.figure(1)
ax1 = plt.subplot(1, 2, 1)
color = 'tab:blue'
ax1.plot(self.costs, color=color)
ax1.set_ylabel('Cost', color=color)
color = 'tab:red'
ax2 = ax1.twinx()
ax2.plot(self.temperatures, color=color)
ax2.set_ylabel('Temperature', color=color)
ax1.set_xlabel("Iteration")
plt.title("Cost & temperature")
plt.subplot(1, 2, 2)
sc = plt.scatter(self.temperatures,
self.delta_costs,
c=np.array(self.probabilities) + 0.001,
norm=colors.LogNorm(),
edgecolors="k",
cmap=LinearSegmentedColormap.from_list("MyCmapName", ["b", "r"]))
plt.colorbar(sc)
plt.gca().invert_xaxis()
plt.plot(np.array(self.temperatures)[self.is_accepted],
np.array(self.delta_costs)[self.is_accepted],
"kx",
markersize=3)
plt.xlabel("Temperature")
plt.ylabel("Cost change")
plt.title("Probability")
plt.tight_layout()
plt.show()
| [
"logging.getLogger",
"random.uniform",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.colorbar",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.exp",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
... | [((247, 274), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (264, 274), False, 'import logging\n'), ((5581, 5594), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (5591, 5594), True, 'import matplotlib.pyplot as plt\n'), ((5610, 5630), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (5621, 5630), True, 'import matplotlib.pyplot as plt\n'), ((5943, 5974), 'matplotlib.pyplot.title', 'plt.title', (['"""Cost & temperature"""'], {}), "('Cost & temperature')\n", (5952, 5974), True, 'import matplotlib.pyplot as plt\n'), ((5984, 6004), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (5995, 6004), True, 'import matplotlib.pyplot as plt\n'), ((6345, 6361), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sc'], {}), '(sc)\n', (6357, 6361), True, 'import matplotlib.pyplot as plt\n'), ((6585, 6610), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Temperature"""'], {}), "('Temperature')\n", (6595, 6610), True, 'import matplotlib.pyplot as plt\n'), ((6619, 6644), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cost change"""'], {}), "('Cost change')\n", (6629, 6644), True, 'import matplotlib.pyplot as plt\n'), ((6653, 6677), 'matplotlib.pyplot.title', 'plt.title', (['"""Probability"""'], {}), "('Probability')\n", (6662, 6677), True, 'import matplotlib.pyplot as plt\n'), ((6687, 6705), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6703, 6705), True, 'import matplotlib.pyplot as plt\n'), ((6714, 6724), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6722, 6724), True, 'import matplotlib.pyplot as plt\n'), ((978, 1005), 'numpy.exp', 'np.exp', (['(-decay_constant * t)'], {}), '(-decay_constant * t)\n', (984, 1005), True, 'import numpy as np\n'), ((1334, 1373), 'numpy.exp', 'np.exp', (['(-delta_cost / (k * temperature))'], {}), '(-delta_cost / (k * temperature))\n', (1340, 1373), True, 'import numpy as np\n'), ((1733, 1748), 'random.random', 'random.random', ([], {}), '()\n', (1746, 1748), False, 'import random\n'), ((4482, 4506), 'random.uniform', 'random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (4496, 4506), False, 'import random\n'), ((6187, 6203), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {}), '()\n', (6201, 6203), False, 'from matplotlib import colors\n'), ((6276, 6335), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""MyCmapName"""', "['b', 'r']"], {}), "('MyCmapName', ['b', 'r'])\n", (6309, 6335), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((6370, 6379), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6377, 6379), True, 'import matplotlib.pyplot as plt\n'), ((6412, 6439), 'numpy.array', 'np.array', (['self.temperatures'], {}), '(self.temperatures)\n', (6420, 6439), True, 'import numpy as np\n'), ((6476, 6502), 'numpy.array', 'np.array', (['self.delta_costs'], {}), '(self.delta_costs)\n', (6484, 6502), True, 'import numpy as np\n'), ((6119, 6147), 'numpy.array', 'np.array', (['self.probabilities'], {}), '(self.probabilities)\n', (6127, 6147), True, 'import numpy as np\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import numpy as np
from numba import jit
from paddle import fluid
import paddle.fluid.layers as F
import paddle.fluid.dygraph as dg
def masked_mean(inputs, mask):
"""
Args:
inputs (Variable): shape(B, T, C), dtype float32, the input.
mask (Variable): shape(B, T), dtype float32, a mask.
Returns:
loss (Variable): shape(1, ), dtype float32, masked mean.
"""
channels = inputs.shape[-1]
masked_inputs = F.elementwise_mul(inputs, mask, axis=0)
loss = F.reduce_sum(masked_inputs) / (channels * F.reduce_sum(mask))
return loss
@jit(nopython=True)
def guided_attention(N, max_N, T, max_T, g):
"""Generate an diagonal attention guide.
Args:
N (int): valid length of encoder.
max_N (int): max length of encoder.
T (int): valid length of decoder.
max_T (int): max length of decoder.
g (float): sigma to adjust the degree of diagonal guide.
Returns:
np.ndarray: shape(max_N, max_T), dtype float32, the diagonal guide.
"""
W = np.zeros((max_N, max_T), dtype=np.float32)
for n in range(N):
for t in range(T):
W[n, t] = 1 - np.exp(-(n / N - t / T)**2 / (2 * g * g))
return W
def guided_attentions(encoder_lengths, decoder_lengths, max_decoder_len,
g=0.2):
"""Generate a diagonal attention guide for a batch.
Args:
encoder_lengths (np.ndarray): shape(B, ), dtype: int64, encoder valid lengths.
decoder_lengths (np.ndarray): shape(B, ), dtype: int64, decoder valid lengths.
max_decoder_len (int): max length of decoder.
g (float, optional): sigma to adjust the degree of diagonal guide.. Defaults to 0.2.
Returns:
np.ndarray: shape(B, max_T, max_N), dtype float32, the diagonal guide. (max_N: max encoder length, max_T: max decoder length.)
"""
B = len(encoder_lengths)
max_input_len = encoder_lengths.max()
W = np.zeros((B, max_decoder_len, max_input_len), dtype=np.float32)
for b in range(B):
W[b] = guided_attention(encoder_lengths[b], max_input_len,
decoder_lengths[b], max_decoder_len, g).T
return W
class TTSLoss(object):
def __init__(self,
masked_weight=0.0,
priority_bin=None,
priority_weight=0.0,
binary_divergence_weight=0.0,
guided_attention_sigma=0.2,
downsample_factor=4,
r=1):
"""Compute loss for Deep Voice 3 model.
Args:
masked_weight (float, optional): the weight of masked loss. Defaults to 0.0.
priority_bin ([type], optional): frequency bands for linear spectrogram loss to be prioritized. Defaults to None.
priority_weight (float, optional): weight for the prioritized frequency bands. Defaults to 0.0.
binary_divergence_weight (float, optional): weight for binary cross entropy (used for spectrogram loss). Defaults to 0.0.
guided_attention_sigma (float, optional): `sigma` for attention guide. Defaults to 0.2.
downsample_factor (int, optional): the downsample factor for mel spectrogram. Defaults to 4.
r (int, optional): frames per decoder step. Defaults to 1.
"""
self.masked_weight = masked_weight
self.priority_bin = priority_bin # only used for lin-spec loss
self.priority_weight = priority_weight # only used for lin-spec loss
self.binary_divergence_weight = binary_divergence_weight
self.guided_attention_sigma = guided_attention_sigma
self.time_shift = r
self.r = r
self.downsample_factor = downsample_factor
def l1_loss(self, prediction, target, mask, priority_bin=None):
"""L1 loss for spectrogram.
Args:
prediction (Variable): shape(B, T, C), dtype float32, predicted spectrogram.
target (Variable): shape(B, T, C), dtype float32, target spectrogram.
mask (Variable): shape(B, T), mask.
priority_bin (int, optional): frequency bands for linear spectrogram loss to be prioritized. Defaults to None.
Returns:
Variable: shape(1,), dtype float32, l1 loss(with mask and possibly priority bin applied.)
"""
abs_diff = F.abs(prediction - target)
# basic mask-weighted l1 loss
w = self.masked_weight
if w > 0 and mask is not None:
base_l1_loss = w * masked_mean(abs_diff, mask) \
+ (1 - w) * F.reduce_mean(abs_diff)
else:
base_l1_loss = F.reduce_mean(abs_diff)
if self.priority_weight > 0 and priority_bin is not None:
# mask-weighted priority channels' l1-loss
priority_abs_diff = abs_diff[:, :, :priority_bin]
if w > 0 and mask is not None:
priority_loss = w * masked_mean(priority_abs_diff, mask) \
+ (1 - w) * F.reduce_mean(priority_abs_diff)
else:
priority_loss = F.reduce_mean(priority_abs_diff)
# priority weighted sum
p = self.priority_weight
loss = p * priority_loss + (1 - p) * base_l1_loss
else:
loss = base_l1_loss
return loss
def binary_divergence(self, prediction, target, mask):
"""Binary cross entropy loss for spectrogram. All the values in the spectrogram are treated as logits in a logistic regression.
Args:
prediction (Variable): shape(B, T, C), dtype float32, predicted spectrogram.
target (Variable): shape(B, T, C), dtype float32, target spectrogram.
mask (Variable): shape(B, T), mask.
Returns:
Variable: shape(1,), dtype float32, binary cross entropy loss.
"""
flattened_prediction = F.reshape(prediction, [-1, 1])
flattened_target = F.reshape(target, [-1, 1])
flattened_loss = F.log_loss(
flattened_prediction, flattened_target, epsilon=1e-8)
bin_div = fluid.layers.reshape(flattened_loss, prediction.shape)
w = self.masked_weight
if w > 0 and mask is not None:
loss = w * masked_mean(bin_div, mask) \
+ (1 - w) * F.reduce_mean(bin_div)
else:
loss = F.reduce_mean(bin_div)
return loss
@staticmethod
def done_loss(done_hat, done):
"""Compute done loss
Args:
done_hat (Variable): shape(B, T), dtype float32, predicted done probability(the probability that the final frame has been generated.)
done (Variable): shape(B, T), dtype float32, ground truth done probability(the probability that the final frame has been generated.)
Returns:
Variable: shape(1, ), dtype float32, done loss.
"""
flat_done_hat = F.reshape(done_hat, [-1, 1])
flat_done = F.reshape(done, [-1, 1])
loss = F.log_loss(flat_done_hat, flat_done, epsilon=1e-8)
loss = F.reduce_mean(loss)
return loss
def attention_loss(self, predicted_attention, input_lengths,
target_lengths):
"""
Given valid encoder_lengths and decoder_lengths, compute a diagonal guide, and compute loss from the predicted attention and the guide.
Args:
predicted_attention (Variable): shape(*, B, T_dec, T_enc), dtype float32, the alignment tensor, where B means batch size, T_dec means number of time steps of the decoder, T_enc means the number of time steps of the encoder, * means other possible dimensions.
input_lengths (numpy.ndarray): shape(B,), dtype:int64, valid lengths (time steps) of encoder outputs.
target_lengths (numpy.ndarray): shape(batch_size,), dtype:int64, valid lengths (time steps) of decoder outputs.
Returns:
loss (Variable): shape(1, ), dtype float32, attention loss.
"""
n_attention, batch_size, max_target_len, max_input_len = (
predicted_attention.shape)
soft_mask = guided_attentions(input_lengths, target_lengths,
max_target_len,
self.guided_attention_sigma)
soft_mask_ = dg.to_variable(soft_mask)
loss = fluid.layers.reduce_mean(predicted_attention * soft_mask_)
return loss
def __call__(self, outputs, inputs):
"""Total loss
Args:
outpus is a tuple of (mel_hyp, lin_hyp, attn_hyp, done_hyp).
mel_hyp (Variable): shape(B, T, C_mel), dtype float32, predicted mel spectrogram.
lin_hyp (Variable): shape(B, T, C_lin), dtype float32, predicted linear spectrogram.
done_hyp (Variable): shape(B, T), dtype float32, predicted done probability.
attn_hyp (Variable): shape(N, B, T_dec, T_enc), dtype float32, predicted attention.
inputs is a tuple of (mel_ref, lin_ref, done_ref, input_lengths, n_frames)
mel_ref (Variable): shape(B, T, C_mel), dtype float32, ground truth mel spectrogram.
lin_ref (Variable): shape(B, T, C_lin), dtype float32, ground truth linear spectrogram.
done_ref (Variable): shape(B, T), dtype float32, ground truth done flag.
input_lengths (Variable): shape(B, ), dtype: int, encoder valid lengths.
n_frames (Variable): shape(B, ), dtype: int, decoder valid lengths.
Returns:
Dict(str, Variable): details of loss.
"""
total_loss = 0.
mel_hyp, lin_hyp, attn_hyp, done_hyp = outputs
mel_ref, lin_ref, done_ref, input_lengths, n_frames = inputs
# n_frames # mel_lengths # decoder_lengths
max_frames = lin_hyp.shape[1]
max_mel_steps = max_frames // self.downsample_factor
# max_decoder_steps = max_mel_steps // self.r
# decoder_mask = F.sequence_mask(n_frames // self.downsample_factor //
# self.r,
# max_decoder_steps,
# dtype="float32")
mel_mask = F.sequence_mask(
n_frames // self.downsample_factor, max_mel_steps, dtype="float32")
lin_mask = F.sequence_mask(n_frames, max_frames, dtype="float32")
lin_hyp = lin_hyp[:, :-self.time_shift, :]
lin_ref = lin_ref[:, self.time_shift:, :]
lin_mask = lin_mask[:, self.time_shift:]
lin_l1_loss = self.l1_loss(
lin_hyp, lin_ref, lin_mask, priority_bin=self.priority_bin)
lin_bce_loss = self.binary_divergence(lin_hyp, lin_ref, lin_mask)
lin_loss = self.binary_divergence_weight * lin_bce_loss \
+ (1 - self.binary_divergence_weight) * lin_l1_loss
total_loss += lin_loss
mel_hyp = mel_hyp[:, :-self.time_shift, :]
mel_ref = mel_ref[:, self.time_shift:, :]
mel_mask = mel_mask[:, self.time_shift:]
mel_l1_loss = self.l1_loss(mel_hyp, mel_ref, mel_mask)
mel_bce_loss = self.binary_divergence(mel_hyp, mel_ref, mel_mask)
# print("=====>", mel_l1_loss.numpy()[0], mel_bce_loss.numpy()[0])
mel_loss = self.binary_divergence_weight * mel_bce_loss \
+ (1 - self.binary_divergence_weight) * mel_l1_loss
total_loss += mel_loss
attn_loss = self.attention_loss(attn_hyp,
input_lengths.numpy(),
n_frames.numpy() //
(self.downsample_factor * self.r))
total_loss += attn_loss
done_loss = self.done_loss(done_hyp, done_ref)
total_loss += done_loss
losses = {
"loss": total_loss,
"mel/mel_loss": mel_loss,
"mel/l1_loss": mel_l1_loss,
"mel/bce_loss": mel_bce_loss,
"lin/lin_loss": lin_loss,
"lin/l1_loss": lin_l1_loss,
"lin/bce_loss": lin_bce_loss,
"done": done_loss,
"attn": attn_loss,
}
return losses
| [
"paddle.fluid.layers.reduce_mean",
"paddle.fluid.dygraph.to_variable",
"paddle.fluid.layers.reduce_sum",
"numpy.exp",
"numpy.zeros",
"numba.jit",
"paddle.fluid.layers.abs",
"paddle.fluid.layers.sequence_mask",
"paddle.fluid.layers.elementwise_mul",
"paddle.fluid.layers.log_loss",
"paddle.fluid.l... | [((1228, 1246), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (1231, 1246), False, 'from numba import jit\n'), ((1096, 1135), 'paddle.fluid.layers.elementwise_mul', 'F.elementwise_mul', (['inputs', 'mask'], {'axis': '(0)'}), '(inputs, mask, axis=0)\n', (1113, 1135), True, 'import paddle.fluid.layers as F\n'), ((1695, 1737), 'numpy.zeros', 'np.zeros', (['(max_N, max_T)'], {'dtype': 'np.float32'}), '((max_N, max_T), dtype=np.float32)\n', (1703, 1737), True, 'import numpy as np\n'), ((2598, 2661), 'numpy.zeros', 'np.zeros', (['(B, max_decoder_len, max_input_len)'], {'dtype': 'np.float32'}), '((B, max_decoder_len, max_input_len), dtype=np.float32)\n', (2606, 2661), True, 'import numpy as np\n'), ((1147, 1174), 'paddle.fluid.layers.reduce_sum', 'F.reduce_sum', (['masked_inputs'], {}), '(masked_inputs)\n', (1159, 1174), True, 'import paddle.fluid.layers as F\n'), ((4989, 5015), 'paddle.fluid.layers.abs', 'F.abs', (['(prediction - target)'], {}), '(prediction - target)\n', (4994, 5015), True, 'import paddle.fluid.layers as F\n'), ((6540, 6570), 'paddle.fluid.layers.reshape', 'F.reshape', (['prediction', '[-1, 1]'], {}), '(prediction, [-1, 1])\n', (6549, 6570), True, 'import paddle.fluid.layers as F\n'), ((6598, 6624), 'paddle.fluid.layers.reshape', 'F.reshape', (['target', '[-1, 1]'], {}), '(target, [-1, 1])\n', (6607, 6624), True, 'import paddle.fluid.layers as F\n'), ((6650, 6715), 'paddle.fluid.layers.log_loss', 'F.log_loss', (['flattened_prediction', 'flattened_target'], {'epsilon': '(1e-08)'}), '(flattened_prediction, flattened_target, epsilon=1e-08)\n', (6660, 6715), True, 'import paddle.fluid.layers as F\n'), ((6746, 6800), 'paddle.fluid.layers.reshape', 'fluid.layers.reshape', (['flattened_loss', 'prediction.shape'], {}), '(flattened_loss, prediction.shape)\n', (6766, 6800), False, 'from paddle import fluid\n'), ((7555, 7583), 'paddle.fluid.layers.reshape', 'F.reshape', (['done_hat', '[-1, 1]'], {}), '(done_hat, [-1, 1])\n', (7564, 7583), True, 'import paddle.fluid.layers as F\n'), ((7604, 7628), 'paddle.fluid.layers.reshape', 'F.reshape', (['done', '[-1, 1]'], {}), '(done, [-1, 1])\n', (7613, 7628), True, 'import paddle.fluid.layers as F\n'), ((7644, 7695), 'paddle.fluid.layers.log_loss', 'F.log_loss', (['flat_done_hat', 'flat_done'], {'epsilon': '(1e-08)'}), '(flat_done_hat, flat_done, epsilon=1e-08)\n', (7654, 7695), True, 'import paddle.fluid.layers as F\n'), ((7710, 7729), 'paddle.fluid.layers.reduce_mean', 'F.reduce_mean', (['loss'], {}), '(loss)\n', (7723, 7729), True, 'import paddle.fluid.layers as F\n'), ((8971, 8996), 'paddle.fluid.dygraph.to_variable', 'dg.to_variable', (['soft_mask'], {}), '(soft_mask)\n', (8985, 8996), True, 'import paddle.fluid.dygraph as dg\n'), ((9012, 9070), 'paddle.fluid.layers.reduce_mean', 'fluid.layers.reduce_mean', (['(predicted_attention * soft_mask_)'], {}), '(predicted_attention * soft_mask_)\n', (9036, 9070), False, 'from paddle import fluid\n'), ((10853, 10941), 'paddle.fluid.layers.sequence_mask', 'F.sequence_mask', (['(n_frames // self.downsample_factor)', 'max_mel_steps'], {'dtype': '"""float32"""'}), "(n_frames // self.downsample_factor, max_mel_steps, dtype=\n 'float32')\n", (10868, 10941), True, 'import paddle.fluid.layers as F\n'), ((10969, 11023), 'paddle.fluid.layers.sequence_mask', 'F.sequence_mask', (['n_frames', 'max_frames'], {'dtype': '"""float32"""'}), "(n_frames, max_frames, dtype='float32')\n", (10984, 11023), True, 'import paddle.fluid.layers as F\n'), ((1189, 1207), 'paddle.fluid.layers.reduce_sum', 'F.reduce_sum', (['mask'], {}), '(mask)\n', (1201, 1207), True, 'import paddle.fluid.layers as F\n'), ((5288, 5311), 'paddle.fluid.layers.reduce_mean', 'F.reduce_mean', (['abs_diff'], {}), '(abs_diff)\n', (5301, 5311), True, 'import paddle.fluid.layers as F\n'), ((7009, 7031), 'paddle.fluid.layers.reduce_mean', 'F.reduce_mean', (['bin_div'], {}), '(bin_div)\n', (7022, 7031), True, 'import paddle.fluid.layers as F\n'), ((1814, 1857), 'numpy.exp', 'np.exp', (['(-(n / N - t / T) ** 2 / (2 * g * g))'], {}), '(-(n / N - t / T) ** 2 / (2 * g * g))\n', (1820, 1857), True, 'import numpy as np\n'), ((5739, 5771), 'paddle.fluid.layers.reduce_mean', 'F.reduce_mean', (['priority_abs_diff'], {}), '(priority_abs_diff)\n', (5752, 5771), True, 'import paddle.fluid.layers as F\n'), ((5223, 5246), 'paddle.fluid.layers.reduce_mean', 'F.reduce_mean', (['abs_diff'], {}), '(abs_diff)\n', (5236, 5246), True, 'import paddle.fluid.layers as F\n'), ((6953, 6975), 'paddle.fluid.layers.reduce_mean', 'F.reduce_mean', (['bin_div'], {}), '(bin_div)\n', (6966, 6975), True, 'import paddle.fluid.layers as F\n'), ((5656, 5688), 'paddle.fluid.layers.reduce_mean', 'F.reduce_mean', (['priority_abs_diff'], {}), '(priority_abs_diff)\n', (5669, 5688), True, 'import paddle.fluid.layers as F\n')] |
r"""@package motsfinder.metric.discrete.discretize
Helpers to create discrete versions of (\eg analytical) metrics.
These can be used to compare results obtained with analytically implemented
metrics with those of the discrete metric classes.
@b Examples
```
# Use a simple Brill-Lindquist metric as example here.
m1 = 0.2; m2 = 0.8; d = 0.6
gBL = BrillLindquistMetric(d=d, m1=m1, m2=m2)
# This creates the discrete version of it.
res = 128; radius = 2
g = DiscretizedMetric(
patch=DiscretizedMetric.construct_patch(res=res, radius=radius),
metric=gBL,
curv=gBL.get_curv(), # not needed here as gBL is time symmetric
)
# To demonstrate that this metric can be used as usual, we find the four
# MOTSs in it.
h = InitHelper(
metric=g,
out_base="some/output/folder_res%s" % res,
suffix="discrete",
)
curves = h.find_four_MOTSs(m1=m1, m2=m2, d=d, plot=True)
```
"""
import numpy as np
from ...numutils import nan_mat, raise_all_warnings
from .patch import GridPatch, DataPatch, BBox
from .metric import DiscreteMetric
from .tensors import DiscreteScalarField, DiscreteVectorField
from .tensors import DiscreteSym2TensorField
__all__ = [
"DiscretizedMetric",
]
class _ScalarField(DiscreteScalarField):
r"""Axisymmetric discrete scalar field created from a scalar function.
This samples a (e.g. analytical) function on a grid to create a discrete
version of it.
The grid is defined by the configuration of the metric this field is
associated with.
"""
def __init__(self, metric, func):
r"""Create a scalar field from the given function.
The `metric` defines the discretization (i.e. resolution, domain).
"""
super(_ScalarField, self).__init__(metric)
self.__func = func
def _load_data(self):
f = self.__func
patch = self.metric.patch
mat = self.metric.empty_mat()
with raise_all_warnings():
for (i, j, k), pt in patch.grid(full_output=True):
mat[i, j, k] = _eval(f, pt, np.nan)
return [DataPatch.from_patch(patch, mat, 'even')]
class _VectorField(DiscreteVectorField):
r"""Axisymmetric discrete vector field created from a vector-valued function.
This samples a (e.g. analytical) function on a grid to create a discrete
version of it.
The grid is defined by the configuration of the metric this field is
associated with.
"""
def __init__(self, metric, func):
r"""Create a vector field from the given function.
The `metric` defines the discretization (i.e. resolution, domain).
`func` should be a callable returning three floats: the x-, y-, and
z-components of the vector.
"""
super(_VectorField, self).__init__(metric)
self.__func = func
def _load_data(self):
f = self.__func
patch = self.metric.patch
x_mat = self.metric.empty_mat()
y_mat = self.metric.empty_mat()
z_mat = self.metric.empty_mat()
nan = nan_mat(3)
with raise_all_warnings():
for (i, j, k), pt in patch.grid(full_output=True):
x, y, z = _eval(f, pt, nan)
x_mat[i, j, k] = x
y_mat[i, j, k] = y
z_mat[i, j, k] = z
data = [(x_mat, 'odd'), (y_mat, 'odd'), (z_mat, 'even')]
return [DataPatch.from_patch(patch, mat, sym) for mat, sym in data]
class _Sym2TensorField(DiscreteSym2TensorField):
r"""Axisymmetric discrete tensor field created from a matrix-valued function.
This samples a (e.g. analytical) function on a grid to create a discrete
version of it.
The grid is defined by the configuration of the metric this field is
associated with.
"""
def __init__(self, metric, func):
r"""Create a tensor field from the given function.
The `metric` defines the discretization (i.e. resolution, domain).
`func` should be a callable returning a symmetric 3x3 matrix.
"""
super(_Sym2TensorField, self).__init__(metric)
self.__func = func
def _load_data(self):
f = self.__func
patch = self.metric.patch
xx_mat = self.metric.empty_mat()
xy_mat = self.metric.empty_mat()
xz_mat = self.metric.empty_mat()
yy_mat = self.metric.empty_mat()
yz_mat = self.metric.empty_mat()
zz_mat = self.metric.empty_mat()
nan = nan_mat((3, 3))
with raise_all_warnings():
for (i, j, k), pt in patch.grid(full_output=True):
mat = _eval(f, pt, nan)
xx_mat[i, j, k] = mat[0, 0]
xy_mat[i, j, k] = mat[0, 1]
xz_mat[i, j, k] = mat[0, 2]
yy_mat[i, j, k] = mat[1, 1]
yz_mat[i, j, k] = mat[1, 2]
zz_mat[i, j, k] = mat[2, 2]
data = [
(xx_mat, 'even'),
(xy_mat, 'even'),
(xz_mat, 'odd'),
(yy_mat, 'even'),
(yz_mat, 'odd'),
(zz_mat, 'even'),
]
return [DataPatch.from_patch(patch, mat, sym) for mat, sym in data]
class DiscretizedMetric(DiscreteMetric):
r"""Full discrete slice geometry generated from (analytical) functions.
This takes a 3-metric (..base._ThreeMetric) and optionally callables to
evaluate e.g. the extrinsic curvature and builds matrices for all the
different components. This allows discretization of analytical metrics to
e.g. compare results at different discrete resolutions.
"""
def __init__(self, patch, metric, curv=None, lapse=None, shift=None,
dtlapse=None, dtshift=None):
r"""Construct a discrete metric on a given patch.
@param patch
Definition of the discretization (i.e. domain, resolution). Use
the convenience class method construct_patch() to easily generate
such a patch.
@param metric
3-metric tensor field. Should be axisymmetric. A valid example is
..analytical.simple.BrillLindquistMetric.
@param curv
Callable returning symmetric 3x3 matrices representing the
extrinsic curvature of the 3-slice embedded in spacetime.
@param lapse,dtlapse
Lapse function (scalar) and its time derivative (also scalar),
respectively. Both are callables returning scalar values.
@param shift,dtshift
Shift vector field and its time derivative, respectively. Both
callables should return 3 floats for the x-, y-, z-components of
the field at the specified point.
"""
super(DiscretizedMetric, self).__init__()
self._patch = patch
self._metric = _Sym2TensorField(self, metric)
self._curv = _Sym2TensorField(self, curv) if curv else None
self._lapse = _ScalarField(self, lapse) if lapse else None
self._shift = _VectorField(self, shift) if shift else None
self._dtlapse = _ScalarField(self, dtlapse) if dtlapse else None
self._dtshift = _VectorField(self, dtshift) if dtshift else None
@classmethod
def construct_patch(cls, res, radius, origin=(0., 0., 0.)):
r"""Class method to create a patch definition from a given resolution.
This creates a patch to be used for constructing a DiscretizedMetric.
The domain is specified using an origin and "radius" (the domain is,
of course, rectangular).
@param res
Resolution. There will be `res` grid points per unit per axis.
@param radius
Coordinate distance from `origin` to include in the domain.
@param origin
Origin of the patch around which the radius defines the full
domain. Default is ``0,0,0``.
"""
origin = np.array(origin) # make a copy to not mutate input
xmin = max(0., origin[0] - radius)
xmax = origin[0] + radius
origin[0] = xmin
origin[2] -= radius
deltas = 1./res * np.identity(3)
box = BBox(
lower=[0, 0, 0],
upper=[int(round(res*(xmax-xmin)))+1,
1,
int(round(2*res*radius))+1],
)
return GridPatch(origin=origin, deltas=deltas, box=box)
@property
def patch(self):
r"""Patch property used during construction."""
return self._patch
def empty_mat(self):
r"""Empty (zero) matrix of the correct shape for the full domain."""
return np.zeros(shape=self.patch.shape)
def all_field_objects(self):
return [
self._metric, self._curv, self._lapse, self._shift,
self._dtlapse, self._dtshift,
]
def _get_metric(self):
return self._metric
def get_curv(self):
return self._curv
def get_lapse(self):
return self._lapse
def get_dtlapse(self):
return self._dtlapse
def get_shift(self):
return self._shift
def get_dtshift(self):
return self._dtshift
def _eval(func, arg, default):
r"""Evaluate a function, returning a given default in case of error.
If evaluating the function succeeds, the produced value is returned. If,
however, a `FloatingPointError` is raised, the given default value is
returned instead.
@param func
Callable to evaluate.
@param arg
Argument to call `func` with.
@param default
Value to return in case of `FloatingPointError`.
"""
try:
return func(arg)
except (FloatingPointError, ZeroDivisionError):
return default
| [
"numpy.identity",
"numpy.array",
"numpy.zeros"
] | [((7924, 7940), 'numpy.array', 'np.array', (['origin'], {}), '(origin)\n', (7932, 7940), True, 'import numpy as np\n'), ((8626, 8658), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.patch.shape'}), '(shape=self.patch.shape)\n', (8634, 8658), True, 'import numpy as np\n'), ((8131, 8145), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (8142, 8145), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as pp
omega = .1
omega_n = 5
def analytic(t):
return (np.cos(omega*t) - np.cos(omega_n*t) ) / (omega_n * omega_n - omega * omega)
num=[1000,10000,100000]
for type in ('euler', 'euler_symplectic', 'rk4'):
for value in num:
s = type + '.' + str(value) + '.out'
t,x,xprime = np.loadtxt(s,unpack=True)
labelstring = 'Nsteps = ' + str(value)
#if type != 'euler_symplectic':
# pp.plot(x,xprime,label=labelstring)
if value == 10000:
pp.plot(x,xprime,label=labelstring,lw=2.5)
elif value == 1000:
pp.plot(x,xprime,label=labelstring,lw=2)
else:
pp.plot(x,xprime,label=labelstring)
# pp.plot(np.linspace(0.,100.,1000),analytic(np.linspace(0.,100.,1000)),label="True")
pp.xlabel("position")
pp.ylabel("velocity")
pp.xlim(-.1,.1)
pp.ylim(-.3,.3)
if type == 'euler':
pp.xlim(-1,1)
pp.ylim(-1,1)
pp.legend(loc='best')
pp.title(type)
s = 'pdf/' + type + '_phase_plot.pdf'
pp.savefig(s)
pp.close()
#Now plot for a given nsteps all the types
for value in num:
for type in ('euler', 'euler_symplectic', 'rk4'):
s = type + '.' + str(value) + '.out'
t,x,xprime = np.loadtxt(s,unpack=True)
if type == 'euler_symplectic':
pp.plot(x,xprime,label=type,lw=4)
else:
pp.plot(x,xprime,label=type)
#pp.plot(np.linspace(0.,100.,100000),analytic(np.linspace(0.,100.,100000)),label="True")
pp.xlabel("position")
pp.ylabel("velocity")
pp.xlim(-.2,.15)
pp.ylim(-1,1)
pp.legend(loc='best')
titlestring = 'Nsteps = ' + str(value)
pp.title(titlestring)
s = 'pdf/' + str(value) + '_phase_plot.pdf'
pp.savefig(s)
pp.close()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.loadtxt",
"numpy.cos",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend"
] | [((838, 859), 'matplotlib.pyplot.xlabel', 'pp.xlabel', (['"""position"""'], {}), "('position')\n", (847, 859), True, 'import matplotlib.pyplot as pp\n'), ((864, 885), 'matplotlib.pyplot.ylabel', 'pp.ylabel', (['"""velocity"""'], {}), "('velocity')\n", (873, 885), True, 'import matplotlib.pyplot as pp\n'), ((890, 908), 'matplotlib.pyplot.xlim', 'pp.xlim', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (897, 908), True, 'import matplotlib.pyplot as pp\n'), ((910, 928), 'matplotlib.pyplot.ylim', 'pp.ylim', (['(-0.3)', '(0.3)'], {}), '(-0.3, 0.3)\n', (917, 928), True, 'import matplotlib.pyplot as pp\n'), ((998, 1019), 'matplotlib.pyplot.legend', 'pp.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1007, 1019), True, 'import matplotlib.pyplot as pp\n'), ((1024, 1038), 'matplotlib.pyplot.title', 'pp.title', (['type'], {}), '(type)\n', (1032, 1038), True, 'import matplotlib.pyplot as pp\n'), ((1085, 1098), 'matplotlib.pyplot.savefig', 'pp.savefig', (['s'], {}), '(s)\n', (1095, 1098), True, 'import matplotlib.pyplot as pp\n'), ((1103, 1113), 'matplotlib.pyplot.close', 'pp.close', ([], {}), '()\n', (1111, 1113), True, 'import matplotlib.pyplot as pp\n'), ((1569, 1590), 'matplotlib.pyplot.xlabel', 'pp.xlabel', (['"""position"""'], {}), "('position')\n", (1578, 1590), True, 'import matplotlib.pyplot as pp\n'), ((1595, 1616), 'matplotlib.pyplot.ylabel', 'pp.ylabel', (['"""velocity"""'], {}), "('velocity')\n", (1604, 1616), True, 'import matplotlib.pyplot as pp\n'), ((1621, 1640), 'matplotlib.pyplot.xlim', 'pp.xlim', (['(-0.2)', '(0.15)'], {}), '(-0.2, 0.15)\n', (1628, 1640), True, 'import matplotlib.pyplot as pp\n'), ((1642, 1656), 'matplotlib.pyplot.ylim', 'pp.ylim', (['(-1)', '(1)'], {}), '(-1, 1)\n', (1649, 1656), True, 'import matplotlib.pyplot as pp\n'), ((1660, 1681), 'matplotlib.pyplot.legend', 'pp.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1669, 1681), True, 'import matplotlib.pyplot as pp\n'), ((1729, 1750), 'matplotlib.pyplot.title', 'pp.title', (['titlestring'], {}), '(titlestring)\n', (1737, 1750), True, 'import matplotlib.pyplot as pp\n'), ((1803, 1816), 'matplotlib.pyplot.savefig', 'pp.savefig', (['s'], {}), '(s)\n', (1813, 1816), True, 'import matplotlib.pyplot as pp\n'), ((1821, 1831), 'matplotlib.pyplot.close', 'pp.close', ([], {}), '()\n', (1829, 1831), True, 'import matplotlib.pyplot as pp\n'), ((343, 369), 'numpy.loadtxt', 'np.loadtxt', (['s'], {'unpack': '(True)'}), '(s, unpack=True)\n', (353, 369), True, 'import numpy as np\n'), ((958, 972), 'matplotlib.pyplot.xlim', 'pp.xlim', (['(-1)', '(1)'], {}), '(-1, 1)\n', (965, 972), True, 'import matplotlib.pyplot as pp\n'), ((980, 994), 'matplotlib.pyplot.ylim', 'pp.ylim', (['(-1)', '(1)'], {}), '(-1, 1)\n', (987, 994), True, 'import matplotlib.pyplot as pp\n'), ((1305, 1331), 'numpy.loadtxt', 'np.loadtxt', (['s'], {'unpack': '(True)'}), '(s, unpack=True)\n', (1315, 1331), True, 'import numpy as np\n'), ((103, 120), 'numpy.cos', 'np.cos', (['(omega * t)'], {}), '(omega * t)\n', (109, 120), True, 'import numpy as np\n'), ((121, 140), 'numpy.cos', 'np.cos', (['(omega_n * t)'], {}), '(omega_n * t)\n', (127, 140), True, 'import numpy as np\n'), ((544, 589), 'matplotlib.pyplot.plot', 'pp.plot', (['x', 'xprime'], {'label': 'labelstring', 'lw': '(2.5)'}), '(x, xprime, label=labelstring, lw=2.5)\n', (551, 589), True, 'import matplotlib.pyplot as pp\n'), ((1382, 1418), 'matplotlib.pyplot.plot', 'pp.plot', (['x', 'xprime'], {'label': 'type', 'lw': '(4)'}), '(x, xprime, label=type, lw=4)\n', (1389, 1418), True, 'import matplotlib.pyplot as pp\n'), ((1442, 1472), 'matplotlib.pyplot.plot', 'pp.plot', (['x', 'xprime'], {'label': 'type'}), '(x, xprime, label=type)\n', (1449, 1472), True, 'import matplotlib.pyplot as pp\n'), ((627, 670), 'matplotlib.pyplot.plot', 'pp.plot', (['x', 'xprime'], {'label': 'labelstring', 'lw': '(2)'}), '(x, xprime, label=labelstring, lw=2)\n', (634, 670), True, 'import matplotlib.pyplot as pp\n'), ((694, 731), 'matplotlib.pyplot.plot', 'pp.plot', (['x', 'xprime'], {'label': 'labelstring'}), '(x, xprime, label=labelstring)\n', (701, 731), True, 'import matplotlib.pyplot as pp\n')] |
import numpy as np
import properscoring as ps
from scipy.stats import norm
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
EPS = 1e-6
def rmse(predictions, targets):
"""
Root Mean Squared Error
Args:
predictions (np.ndarray): Point Predictions of the model
targets (np.ndarray): Point Targets of the model
Returns:
float: RMSE
"""
return np.sqrt(((predictions - targets) ** 2).mean())
def mape(predictions, targets):
"""
Mean Absolute Percentage Error
Args:
predictions (np.ndarray): Predictions of the model
targets (np.ndarray): Targets of the model
Returns:
float: MAPE
"""
return np.mean(np.abs((predictions - targets) / targets)) * 100
# target ground truth
# mean -
def crps(mean, std, targets):
"""
Quantile-based CRPS
Args:
mean (np.ndarray): Mean of the distribution (N)
std (np.ndarray): Standard Deviation of the distribution (N)
targets (np.ndarray): Targets of the model (N)
Returns:
float: CRPS
"""
return ps.crps_gaussian(targets, mean, std).mean()
# -1
def crps_samples(samples, targets):
"""
Quantile-based CRPS
Args:
samples (np.ndarray): Samples of the distribution (N, samples)
targets (np.ndarray): Targets of the model (N)
Returns:
float: CRPS
"""
return ps.crps_ensemble(targets, samples).mean()
def log_score(
mean, std, targets, window = 0.1
):
"""
Log Score
Args:
mean (np.ndarray): Mean of the distribution (N)
std (np.ndarray): Standard Deviation of the distribution (N)
targets (np.ndarray): Targets of the model (N)
Returns:
float: Log Score
"""
# rescale, changed code
scale = MinMaxScaler()
targets = scale.fit_transform(targets)
t1 = norm.cdf(targets - window / 2.0, mean, std)
t2 = norm.cdf(targets + window / 2.0, mean, std)
a = np.log(np.clip(t2 - t1, EPS, 1.0)).mean()
return np.clip(a, -10, 10)
# put in slack
def interval_score(
mean, std, targets, window = 1.0
):
"""
Interval Score
Args:
mean (np.ndarray): Mean of the distribution (N)
std (np.ndarray): Standard Deviation of the distribution (N)
targets (np.ndarray): Targets of the model (N)
Returns:
float: Interval Score
"""
# rescale, changed code
scale = MinMaxScaler()
targets = scale.fit_transform(targets)
rd_val = np.round(targets, decimals=1)
low_val = np.clip(rd_val - window / 2, a_min=0.0, a_max=None)
high_val = np.clip(rd_val + window / 2, a_min=None, a_max=13)
t1 = norm.cdf(low_val, loc=mean, scale=std)
t2 = norm.cdf(high_val, loc=mean, scale=std)
return np.log(np.clip(t2 - t1, a_min=EPS, a_max=1.0)).mean()
def conf_interval(
mean, var, conf
):
"""
Confintance Interval for given confidence level
Args:
mean (np.ndarray): Mean of the distribution (N)
var (np.ndarray): Variance of the distribution (N)
conf (float): Confidence level
Returns:
tuple: (low, high) interval
"""
out_prob = 1.0 - conf
high = norm.ppf(1.0 - (out_prob / 2), loc=mean, scale=var**0.5)
low = norm.ppf((1.0 - conf) / 2, loc=mean, scale=var**0.5)
return low, high
def pres_recall(
mean, var, target, conf
):
"""
Fraction of GT points within the confidence interval
Args:
mean (np.ndarray): Mean of the distribution (N)
var (np.ndarray): Variance of the distribution (N)
target (np.ndarray): Target of the model (N)
conf (float): Confidence level
Returns:
np.ndarray: Fraction of GT points within the confidence interval
"""
low, high = conf_interval(mean, var, conf)
truth = ((target > low) & (target < high)).astype("float32")
return truth.mean(-1)
# Plot
def get_pr(pred, var, target, color="blue", label="FluFNP"):
"""
Plot confidence and return Confidence score and AUC
Args:
pred (np.ndarray): Predictions of the model (N)
var (np.ndarray): Variance of the distribution (N)
target (np.ndarray): Target of the model (N)
color (str): Color of the line
label (str): Label of the model
Returns:
tuple: (Confidence score, AUC, fraction values)
"""
x = np.arange(0.05, 1.0, 0.01).reshape((95, 1))
y = np.array([pres_recall(pred, var, target, c) for c in x])
# plt.plot(list(x) + [1.0], list(y) + [1.0], label=label, color=color)
conf_score = np.abs(y - x).sum() * 0.01
auc = y.sum() * 0.01
return auc, conf_score, list(y) + [1.0] | [
"numpy.clip",
"numpy.abs",
"numpy.arange",
"scipy.stats.norm.ppf",
"properscoring.crps_ensemble",
"properscoring.crps_gaussian",
"scipy.stats.norm.cdf",
"sklearn.preprocessing.MinMaxScaler",
"numpy.round"
] | [((1820, 1834), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (1832, 1834), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1893, 1936), 'scipy.stats.norm.cdf', 'norm.cdf', (['(targets - window / 2.0)', 'mean', 'std'], {}), '(targets - window / 2.0, mean, std)\n', (1901, 1936), False, 'from scipy.stats import norm\n'), ((1946, 1989), 'scipy.stats.norm.cdf', 'norm.cdf', (['(targets + window / 2.0)', 'mean', 'std'], {}), '(targets + window / 2.0, mean, std)\n', (1954, 1989), False, 'from scipy.stats import norm\n'), ((2052, 2071), 'numpy.clip', 'np.clip', (['a', '(-10)', '(10)'], {}), '(a, -10, 10)\n', (2059, 2071), True, 'import numpy as np\n'), ((2457, 2471), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (2469, 2471), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((2530, 2559), 'numpy.round', 'np.round', (['targets'], {'decimals': '(1)'}), '(targets, decimals=1)\n', (2538, 2559), True, 'import numpy as np\n'), ((2574, 2625), 'numpy.clip', 'np.clip', (['(rd_val - window / 2)'], {'a_min': '(0.0)', 'a_max': 'None'}), '(rd_val - window / 2, a_min=0.0, a_max=None)\n', (2581, 2625), True, 'import numpy as np\n'), ((2641, 2691), 'numpy.clip', 'np.clip', (['(rd_val + window / 2)'], {'a_min': 'None', 'a_max': '(13)'}), '(rd_val + window / 2, a_min=None, a_max=13)\n', (2648, 2691), True, 'import numpy as np\n'), ((2701, 2739), 'scipy.stats.norm.cdf', 'norm.cdf', (['low_val'], {'loc': 'mean', 'scale': 'std'}), '(low_val, loc=mean, scale=std)\n', (2709, 2739), False, 'from scipy.stats import norm\n'), ((2749, 2788), 'scipy.stats.norm.cdf', 'norm.cdf', (['high_val'], {'loc': 'mean', 'scale': 'std'}), '(high_val, loc=mean, scale=std)\n', (2757, 2788), False, 'from scipy.stats import norm\n'), ((3215, 3271), 'scipy.stats.norm.ppf', 'norm.ppf', (['(1.0 - out_prob / 2)'], {'loc': 'mean', 'scale': '(var ** 0.5)'}), '(1.0 - out_prob / 2, loc=mean, scale=var ** 0.5)\n', (3223, 3271), False, 'from scipy.stats import norm\n'), ((3282, 3336), 'scipy.stats.norm.ppf', 'norm.ppf', (['((1.0 - conf) / 2)'], {'loc': 'mean', 'scale': '(var ** 0.5)'}), '((1.0 - conf) / 2, loc=mean, scale=var ** 0.5)\n', (3290, 3336), False, 'from scipy.stats import norm\n'), ((729, 770), 'numpy.abs', 'np.abs', (['((predictions - targets) / targets)'], {}), '((predictions - targets) / targets)\n', (735, 770), True, 'import numpy as np\n'), ((1116, 1152), 'properscoring.crps_gaussian', 'ps.crps_gaussian', (['targets', 'mean', 'std'], {}), '(targets, mean, std)\n', (1132, 1152), True, 'import properscoring as ps\n'), ((1422, 1456), 'properscoring.crps_ensemble', 'ps.crps_ensemble', (['targets', 'samples'], {}), '(targets, samples)\n', (1438, 1456), True, 'import properscoring as ps\n'), ((4399, 4425), 'numpy.arange', 'np.arange', (['(0.05)', '(1.0)', '(0.01)'], {}), '(0.05, 1.0, 0.01)\n', (4408, 4425), True, 'import numpy as np\n'), ((2006, 2032), 'numpy.clip', 'np.clip', (['(t2 - t1)', 'EPS', '(1.0)'], {}), '(t2 - t1, EPS, 1.0)\n', (2013, 2032), True, 'import numpy as np\n'), ((2807, 2845), 'numpy.clip', 'np.clip', (['(t2 - t1)'], {'a_min': 'EPS', 'a_max': '(1.0)'}), '(t2 - t1, a_min=EPS, a_max=1.0)\n', (2814, 2845), True, 'import numpy as np\n'), ((4600, 4613), 'numpy.abs', 'np.abs', (['(y - x)'], {}), '(y - x)\n', (4606, 4613), True, 'import numpy as np\n')] |
"""
Train and test the key identification CNN
"""
import database
import generator
import eda
from glob import glob
import os
import numpy as np
import keras
from keras.models import Model
from keras.layers import Input, Conv2D, MaxPooling2D, add
from keras.layers import Dropout, Flatten, Dense
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
from sklearn.metrics import f1_score, accuracy_score, recall_score, precision_score
def build_model():
"""Builds the key identification CNN model"""
# o = 0.0388
inputs = Input(shape=(15,155,1))
nb_filters = 32
# 15 x 155 x 1
x = Conv2D(filters=nb_filters, kernel_size=(3,3), padding='same', activation='relu')(inputs)
x = Conv2D(filters=2*nb_filters, kernel_size=(3,3), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
pool = MaxPooling2D(pool_size=(1,2), padding='same')(x)
# 15 x 78 x 32
x = Conv2D(filters=nb_filters, kernel_size=(3,3), padding='same', activation='relu')(pool)
x = Conv2D(filters=2*nb_filters, kernel_size=(3,3), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = add([pool,x])
pool = MaxPooling2D(pool_size=(1,2), padding='same')(x)
# 15 x 39 x 32
x = Conv2D(filters=nb_filters, kernel_size=(3,3), padding='same', activation='relu')(pool)
x = Conv2D(filters=2*nb_filters, kernel_size=(3,3), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = add([pool,x])
pool = MaxPooling2D(pool_size=(1,2), padding='same')(x)
# 15 x 20 x 32
x = Conv2D(filters=nb_filters, kernel_size=(3,3), padding='same', activation='relu')(pool)
x = Conv2D(filters=2*nb_filters, kernel_size=(3,3), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = add([pool,x])
pool = MaxPooling2D(pool_size=(2,2), padding='same')(x)
# 8 x 10 x 32
x = Conv2D(filters=nb_filters, kernel_size=(3,3), padding='same', activation='relu')(pool)
x = Conv2D(filters=2*nb_filters, kernel_size=(3,3), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
x = add([pool,x])
#pool = MaxPooling2D(pool_size=(2,2), padding='same')(x)
pool = x
# 8 x 10 x 64
x = Conv2D(filters=2*nb_filters, kernel_size=(3,3), padding='same', activation='relu')(pool)
x = add([pool,x])
x = Conv2D(filters=4*nb_filters, kernel_size=(3,3), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
pool = MaxPooling2D(pool_size=(2,2), padding='same')(x)
# 4 x 5 x 128
x = Conv2D(filters=4*nb_filters, kernel_size=(2,2), padding='same', activation='relu')(pool)
x = add([pool,x])
x = Conv2D(filters=8*nb_filters, kernel_size=(2,2), padding='same', activation='relu')(x)
x = Dropout(0.2)(x)
pool = MaxPooling2D(pool_size=(2,2), padding='same')(x)
# 2 x 3 x 256
x = Dropout(0.2)(pool)
x = Flatten()(x)
## x = Dense(512, activation='relu')(x)
## x = Dropout(0.2)(x)
## x = Dense(256, activation='relu')(x)
## x = Dropout(0.2)(x)
predictions = Dense(88, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer=Adam(),
loss='binary_crossentropy',
metrics=['accuracy'])
return model
def train_model(model, db, output=None, epochs=300):
"""
Train a key identification model on a database
:param model: model to train
:param db: database to train on
:param output: output filename of the best trained model
:param epochs: number of iterations
:return: train history
"""
if output==None:
dbname = db.name()
output = dbname[:dbname.rindex('.')] + '_note.hdf5'
#model.summary()
checkpointer = ModelCheckpoint(filepath=output,
verbose=1, save_best_only=True)
train_group = ['train','cqt']
xgroup = db.get_subgroup(['train','cqt'])
ygroup = db.get_subgroup(['train','note_labels'])
step = min(500, db.get_total_points(train_group))
frac_val = 0.2
frac = 1.-frac_val
nb_steps, tmp = generator.get_nb_steps(xgroup, step, frac, dict_arrays=True)
nb_steps_val, tmp = generator.get_nb_steps(xgroup, step, frac_val, shift='end', dict_arrays=True)
print('step: ',step)
print('nb_steps: ',nb_steps, nb_steps_val)
hist = model.fit_generator(generator.generator( (xgroup, ygroup),
nb=step,
frac=frac,
dict_arrays=True),
steps_per_epoch= 100,#max(4,nb_steps),
max_queue_size=1,
validation_data= generator.generator( (xgroup, ygroup),
nb=step,
frac=frac_val,
shift='end',
dict_arrays=True),
validation_steps= nb_steps_val,
epochs=epochs,
callbacks=[checkpointer],
verbose=2
)
return hist.history
def compute_scores(model, set_name, subset, log, categories=False):
"""
Do predictions on a dataset and compute scores
:param model: model to test
:param set_name: string in the form of set_1 (must be in data folder)
:param subset: string in the form of 'test', 'train' (must be in the set_name folder)
:param log: opened log file to write results into
:param categories: set to True to compute scores per category
:return: None
"""
filenames = glob('data/{}/{}/*.mid'.format(set_name, subset))
basenames = []
for f in filenames:
basenames.append(os.path.splitext(os.path.basename(f))[0])
db = database.DatabaseReader('data_{}.hdf5'.format(set_name))
print("compute scores for : {} / {}".format(set_name, subset))
dgroup = db.get_subgroup([subset, 'cqt'])
lgroup = db.get_subgroup([subset, 'note_labels'])
res = {}
true_positive = np.zeros(88)
true_negative = np.zeros(88)
false_positive = np.zeros(88)
false_negative = np.zeros(88)
nb_class = np.zeros(88)
nb_total = 0
tp_total = 0
tn_total = 0
fp_total = 0
fn_total = 0
for name in dgroup:
test_preds = 1. * (model.predict(dgroup[name]) >= 0.5)
y_test = lgroup[name]
x = test_preds
y = y_test[:]
z = y + 2*x
# foreach note (88 arrays):
true_positive += np.sum(z==3, axis=0)
true_negative += np.sum(z==0, axis=0)
false_positive += np.sum(z==2, axis=0)
false_negative += np.sum(z==1, axis=0)
nb_class += np.sum(y, axis=0)
nb_total += x.shape[0]
tp_total += np.sum(true_positive)
tn_total += np.sum(true_negative)
fp_total += np.sum(false_positive)
fn_total += np.sum(false_negative)
score = (accuracy_score(y_test, test_preds),
precision_score(y_test, test_preds, average='weighted'),
recall_score(y_test, test_preds, average='weighted'),
f1_score(y_test, test_preds, average='weighted'))
for f, b in zip(filenames, basenames):
if b in name:
break
stats = eda.analyze(f)
stats_simp = []
for s in stats:
stats_simp.append(s[0][s[1].argmax()])
res[name] = (score, stats_simp)
print(f'--- {name} ---')
print("accuracy {:.4f}".format(score[0]))
print("precision {:.4f}".format(score[1]))
print("recall {:.4f}".format(score[2]))
print("F1-score {:.4f}".format(score[3]))
log.write(name)
for r in score:
log.write('\t{:.4f}'.format(r))
for s in stats_simp:
log.write('\t{}'.format(s))
log.write('\n')
log.flush()
log.write(f'TOTAL_{set_name}_{subset}')
# score per class
accu = (true_positive+true_negative)/nb_total
prec = np.divide(true_positive, (true_positive+false_positive))
reca = np.divide(true_positive, (true_positive+false_negative))
f1 = np.divide(2*prec*reca, (prec+reca))
# macro score (unused)
macro_f1 = np.nanmean(f1)
# micro score (unused)
micro_prec = tp_total/(tp_total+fp_total)
micro_reca = tp_total/(tp_total+fn_total)
micro_f1 = 2*micro_prec*micro_reca/(micro_reca+micro_prec)
# weighted score
w = nb_class/np.sum(nb_class)
w_accu = np.nansum(accu*w)
w_prec = np.nansum(prec*w)
w_reca = np.nansum(reca*w)
w_f1 = np.nansum(f1*w)
for r in (w_accu, w_prec, w_reca, w_f1):
log.write('\t{:.4f}'.format(r))
for s in stats_simp:
log.write('\t-')
log.write('\n')
# category score
if categories:
for i in range(len(accu)):
log.write(f'carac_note\t{i}\t{accu[i]}\t{prec[i]}\t{reca[i]}\t{f1[i]}\n')
log.flush()
def train_sets(sets, epochs):
"""
Train several models on datasets (each model is trained on one dataset)
:param sets: list of datasets
:param epochs: number of iterations
:return: None
"""
for s in sets:
print(f'-------- Training on {s} --------')
db = database.DatabaseReader(f'data_{s}.hdf5')
model = build_model()
#model.summary()
hist = train_model(model, db, output=f'best_note_{s}.hdf5', epochs=epochs)
with open(f'best_note_{s}_training.log','w') as log:
log.write('epoch\t')
for k in hist:
log.write(k+'\t')
nb = len(hist[k])
log.write('\n')
for i in range(nb):
log.write(f'{i+1}\t')
for k in hist:
log.write(f'{hist[k][i]}\t')
log.write('\n')
def test_sets(sets, doTrain=False, categories=False):
"""
Test models on datasets. Each model is tested on all datasets.
:param sets: list of datasets
:param doTrain: set to True to also test on training set
:param categories: set to True to also compute scores per category
:return: None (writes log files)
"""
for s in sets:
with open(f'best_note_{s}_predictions.log', "w") as log:
print(f'-------- Predictions for model trained on {s} --------')
log.write('song\tacc\tprec\trecall\tF1\tstroke\tnote\tvolume\tnb\ttempo\n')
model = keras.models.load_model(f'best_note_{s}.hdf5')
for s2 in sets:
print(f' -> computing predictions on {s2}')
if doTrain:
compute_scores(model, s2, 'train', log, categories)
compute_scores(model, s2, 'test', log, categories)
if __name__ == '__main__':
sets = ['set_1',
'set_2',
'set_3',
'set_4'
]
# train_sets(sets, epochs=50)
test_sets(sets, doTrain=True, categories=False)
# test_sets(sets, doTrain=False, categories=True)
| [
"keras.layers.Conv2D",
"eda.analyze",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"numpy.nanmean",
"keras.layers.Dense",
"numpy.divide",
"generator.generator",
"database.DatabaseReader",
"keras.models.Model",
"generator.get_nb_steps",
"keras.optimizers.Adam",
"keras.la... | [((563, 588), 'keras.layers.Input', 'Input', ([], {'shape': '(15, 155, 1)'}), '(shape=(15, 155, 1))\n', (568, 588), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((1146, 1160), 'keras.layers.add', 'add', (['[pool, x]'], {}), '([pool, x])\n', (1149, 1160), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((1462, 1476), 'keras.layers.add', 'add', (['[pool, x]'], {}), '([pool, x])\n', (1465, 1476), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((1778, 1792), 'keras.layers.add', 'add', (['[pool, x]'], {}), '([pool, x])\n', (1781, 1792), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((2093, 2107), 'keras.layers.add', 'add', (['[pool, x]'], {}), '([pool, x])\n', (2096, 2107), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((2306, 2320), 'keras.layers.add', 'add', (['[pool, x]'], {}), '([pool, x])\n', (2309, 2320), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((2623, 2637), 'keras.layers.add', 'add', (['[pool, x]'], {}), '([pool, x])\n', (2626, 2637), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((3095, 3136), 'keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'predictions'}), '(inputs=inputs, outputs=predictions)\n', (3100, 3136), False, 'from keras.models import Model\n'), ((3759, 3823), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'output', 'verbose': '(1)', 'save_best_only': '(True)'}), '(filepath=output, verbose=1, save_best_only=True)\n', (3774, 3823), False, 'from keras.callbacks import ModelCheckpoint\n'), ((4118, 4178), 'generator.get_nb_steps', 'generator.get_nb_steps', (['xgroup', 'step', 'frac'], {'dict_arrays': '(True)'}), '(xgroup, step, frac, dict_arrays=True)\n', (4140, 4178), False, 'import generator\n'), ((4203, 4280), 'generator.get_nb_steps', 'generator.get_nb_steps', (['xgroup', 'step', 'frac_val'], {'shift': '"""end"""', 'dict_arrays': '(True)'}), "(xgroup, step, frac_val, shift='end', dict_arrays=True)\n", (4225, 4280), False, 'import generator\n'), ((6254, 6266), 'numpy.zeros', 'np.zeros', (['(88)'], {}), '(88)\n', (6262, 6266), True, 'import numpy as np\n'), ((6287, 6299), 'numpy.zeros', 'np.zeros', (['(88)'], {}), '(88)\n', (6295, 6299), True, 'import numpy as np\n'), ((6321, 6333), 'numpy.zeros', 'np.zeros', (['(88)'], {}), '(88)\n', (6329, 6333), True, 'import numpy as np\n'), ((6355, 6367), 'numpy.zeros', 'np.zeros', (['(88)'], {}), '(88)\n', (6363, 6367), True, 'import numpy as np\n'), ((6383, 6395), 'numpy.zeros', 'np.zeros', (['(88)'], {}), '(88)\n', (6391, 6395), True, 'import numpy as np\n'), ((8263, 8319), 'numpy.divide', 'np.divide', (['true_positive', '(true_positive + false_positive)'], {}), '(true_positive, true_positive + false_positive)\n', (8272, 8319), True, 'import numpy as np\n'), ((8331, 8387), 'numpy.divide', 'np.divide', (['true_positive', '(true_positive + false_negative)'], {}), '(true_positive, true_positive + false_negative)\n', (8340, 8387), True, 'import numpy as np\n'), ((8397, 8436), 'numpy.divide', 'np.divide', (['(2 * prec * reca)', '(prec + reca)'], {}), '(2 * prec * reca, prec + reca)\n', (8406, 8436), True, 'import numpy as np\n'), ((8476, 8490), 'numpy.nanmean', 'np.nanmean', (['f1'], {}), '(f1)\n', (8486, 8490), True, 'import numpy as np\n'), ((8743, 8762), 'numpy.nansum', 'np.nansum', (['(accu * w)'], {}), '(accu * w)\n', (8752, 8762), True, 'import numpy as np\n'), ((8774, 8793), 'numpy.nansum', 'np.nansum', (['(prec * w)'], {}), '(prec * w)\n', (8783, 8793), True, 'import numpy as np\n'), ((8805, 8824), 'numpy.nansum', 'np.nansum', (['(reca * w)'], {}), '(reca * w)\n', (8814, 8824), True, 'import numpy as np\n'), ((8834, 8851), 'numpy.nansum', 'np.nansum', (['(f1 * w)'], {}), '(f1 * w)\n', (8843, 8851), True, 'import numpy as np\n'), ((637, 723), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'nb_filters', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=nb_filters, kernel_size=(3, 3), padding='same', activation=\n 'relu')\n", (643, 723), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((734, 823), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(2 * nb_filters)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=2 * nb_filters, kernel_size=(3, 3), padding='same',\n activation='relu')\n", (740, 823), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((828, 840), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (835, 840), False, 'from keras.layers import Dropout, Flatten, Dense\n'), ((855, 901), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(1, 2)', 'padding': '"""same"""'}), "(pool_size=(1, 2), padding='same')\n", (867, 901), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((933, 1019), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'nb_filters', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=nb_filters, kernel_size=(3, 3), padding='same', activation=\n 'relu')\n", (939, 1019), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((1028, 1117), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(2 * nb_filters)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=2 * nb_filters, kernel_size=(3, 3), padding='same',\n activation='relu')\n", (1034, 1117), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((1122, 1134), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1129, 1134), False, 'from keras.layers import Dropout, Flatten, Dense\n'), ((1171, 1217), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(1, 2)', 'padding': '"""same"""'}), "(pool_size=(1, 2), padding='same')\n", (1183, 1217), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((1249, 1335), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'nb_filters', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=nb_filters, kernel_size=(3, 3), padding='same', activation=\n 'relu')\n", (1255, 1335), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((1344, 1433), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(2 * nb_filters)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=2 * nb_filters, kernel_size=(3, 3), padding='same',\n activation='relu')\n", (1350, 1433), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((1438, 1450), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1445, 1450), False, 'from keras.layers import Dropout, Flatten, Dense\n'), ((1487, 1533), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(1, 2)', 'padding': '"""same"""'}), "(pool_size=(1, 2), padding='same')\n", (1499, 1533), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((1565, 1651), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'nb_filters', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=nb_filters, kernel_size=(3, 3), padding='same', activation=\n 'relu')\n", (1571, 1651), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((1660, 1749), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(2 * nb_filters)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=2 * nb_filters, kernel_size=(3, 3), padding='same',\n activation='relu')\n", (1666, 1749), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((1754, 1766), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1761, 1766), False, 'from keras.layers import Dropout, Flatten, Dense\n'), ((1803, 1849), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'padding': '"""same"""'}), "(pool_size=(2, 2), padding='same')\n", (1815, 1849), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((1880, 1966), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'nb_filters', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=nb_filters, kernel_size=(3, 3), padding='same', activation=\n 'relu')\n", (1886, 1966), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((1975, 2064), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(2 * nb_filters)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=2 * nb_filters, kernel_size=(3, 3), padding='same',\n activation='relu')\n", (1981, 2064), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((2069, 2081), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2076, 2081), False, 'from keras.layers import Dropout, Flatten, Dense\n'), ((2209, 2298), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(2 * nb_filters)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=2 * nb_filters, kernel_size=(3, 3), padding='same',\n activation='relu')\n", (2215, 2298), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((2328, 2417), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(4 * nb_filters)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=4 * nb_filters, kernel_size=(3, 3), padding='same',\n activation='relu')\n", (2334, 2417), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((2422, 2434), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2429, 2434), False, 'from keras.layers import Dropout, Flatten, Dense\n'), ((2449, 2495), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'padding': '"""same"""'}), "(pool_size=(2, 2), padding='same')\n", (2461, 2495), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((2526, 2615), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(4 * nb_filters)', 'kernel_size': '(2, 2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=4 * nb_filters, kernel_size=(2, 2), padding='same',\n activation='relu')\n", (2532, 2615), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((2645, 2734), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(8 * nb_filters)', 'kernel_size': '(2, 2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=8 * nb_filters, kernel_size=(2, 2), padding='same',\n activation='relu')\n", (2651, 2734), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((2739, 2751), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2746, 2751), False, 'from keras.layers import Dropout, Flatten, Dense\n'), ((2766, 2812), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'padding': '"""same"""'}), "(pool_size=(2, 2), padding='same')\n", (2778, 2812), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, add\n'), ((2851, 2863), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2858, 2863), False, 'from keras.layers import Dropout, Flatten, Dense\n'), ((2878, 2887), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2885, 2887), False, 'from keras.layers import Dropout, Flatten, Dense\n'), ((3047, 3078), 'keras.layers.Dense', 'Dense', (['(88)'], {'activation': '"""sigmoid"""'}), "(88, activation='sigmoid')\n", (3052, 3078), False, 'from keras.layers import Dropout, Flatten, Dense\n'), ((4390, 4465), 'generator.generator', 'generator.generator', (['(xgroup, ygroup)'], {'nb': 'step', 'frac': 'frac', 'dict_arrays': '(True)'}), '((xgroup, ygroup), nb=step, frac=frac, dict_arrays=True)\n', (4409, 4465), False, 'import generator\n'), ((6737, 6759), 'numpy.sum', 'np.sum', (['(z == 3)'], {'axis': '(0)'}), '(z == 3, axis=0)\n', (6743, 6759), True, 'import numpy as np\n'), ((6783, 6805), 'numpy.sum', 'np.sum', (['(z == 0)'], {'axis': '(0)'}), '(z == 0, axis=0)\n', (6789, 6805), True, 'import numpy as np\n'), ((6830, 6852), 'numpy.sum', 'np.sum', (['(z == 2)'], {'axis': '(0)'}), '(z == 2, axis=0)\n', (6836, 6852), True, 'import numpy as np\n'), ((6877, 6899), 'numpy.sum', 'np.sum', (['(z == 1)'], {'axis': '(0)'}), '(z == 1, axis=0)\n', (6883, 6899), True, 'import numpy as np\n'), ((6918, 6935), 'numpy.sum', 'np.sum', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (6924, 6935), True, 'import numpy as np\n'), ((6997, 7018), 'numpy.sum', 'np.sum', (['true_positive'], {}), '(true_positive)\n', (7003, 7018), True, 'import numpy as np\n'), ((7039, 7060), 'numpy.sum', 'np.sum', (['true_negative'], {}), '(true_negative)\n', (7045, 7060), True, 'import numpy as np\n'), ((7081, 7103), 'numpy.sum', 'np.sum', (['false_positive'], {}), '(false_positive)\n', (7087, 7103), True, 'import numpy as np\n'), ((7124, 7146), 'numpy.sum', 'np.sum', (['false_negative'], {}), '(false_negative)\n', (7130, 7146), True, 'import numpy as np\n'), ((7526, 7540), 'eda.analyze', 'eda.analyze', (['f'], {}), '(f)\n', (7537, 7540), False, 'import eda\n'), ((8713, 8729), 'numpy.sum', 'np.sum', (['nb_class'], {}), '(nb_class)\n', (8719, 8729), True, 'import numpy as np\n'), ((9488, 9529), 'database.DatabaseReader', 'database.DatabaseReader', (['f"""data_{s}.hdf5"""'], {}), "(f'data_{s}.hdf5')\n", (9511, 9529), False, 'import database\n'), ((3166, 3172), 'keras.optimizers.Adam', 'Adam', ([], {}), '()\n', (3170, 3172), False, 'from keras.optimizers import Adam\n'), ((4735, 4831), 'generator.generator', 'generator.generator', (['(xgroup, ygroup)'], {'nb': 'step', 'frac': 'frac_val', 'shift': '"""end"""', 'dict_arrays': '(True)'}), "((xgroup, ygroup), nb=step, frac=frac_val, shift='end',\n dict_arrays=True)\n", (4754, 4831), False, 'import generator\n'), ((7165, 7199), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'test_preds'], {}), '(y_test, test_preds)\n', (7179, 7199), False, 'from sklearn.metrics import f1_score, accuracy_score, recall_score, precision_score\n'), ((7218, 7273), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'test_preds'], {'average': '"""weighted"""'}), "(y_test, test_preds, average='weighted')\n", (7233, 7273), False, 'from sklearn.metrics import f1_score, accuracy_score, recall_score, precision_score\n'), ((7292, 7344), 'sklearn.metrics.recall_score', 'recall_score', (['y_test', 'test_preds'], {'average': '"""weighted"""'}), "(y_test, test_preds, average='weighted')\n", (7304, 7344), False, 'from sklearn.metrics import f1_score, accuracy_score, recall_score, precision_score\n'), ((7363, 7411), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'test_preds'], {'average': '"""weighted"""'}), "(y_test, test_preds, average='weighted')\n", (7371, 7411), False, 'from sklearn.metrics import f1_score, accuracy_score, recall_score, precision_score\n'), ((10693, 10739), 'keras.models.load_model', 'keras.models.load_model', (['f"""best_note_{s}.hdf5"""'], {}), "(f'best_note_{s}.hdf5')\n", (10716, 10739), False, 'import keras\n'), ((5959, 5978), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (5975, 5978), False, 'import os\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import logging
import os
import time
import math
import reader
import google.protobuf.text_format as text_format
import numpy as np
import six
import paddle
import paddle.fluid as fluid
import paddle.fluid.proto.framework_pb2 as framework_pb2
import paddle.fluid.core as core
from multiprocessing import cpu_count
# disable gpu training for this example
os.environ["CUDA_VISIBLE_DEVICES"] = ""
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("fluid")
logger.setLevel(logging.INFO)
dense_feature_dim = 13
def parse_args():
parser = argparse.ArgumentParser(description="PaddlePaddle CTR example")
parser.add_argument(
'--train_data_path',
type=str,
default='./data/raw/train.txt',
help="The path of training dataset")
parser.add_argument(
'--batch_size',
type=int,
default=1000,
help="The size of mini-batch (default:1000)")
parser.add_argument(
'--embedding_size',
type=int,
default=10,
help="The size for embedding layer (default:10)")
parser.add_argument(
'--sparse_feature_dim',
type=int,
default=1000001,
help='sparse feature hashing space for index processing')
parser.add_argument(
'--model_output_dir',
type=str,
default='models',
help='The path for model to store (default: models)')
return parser.parse_args()
def ctr_dnn_model(embedding_size, sparse_feature_dim, use_py_reader=True):
def embedding_layer(input):
emb = fluid.layers.embedding(
input=input,
is_sparse=True,
# you need to patch https://github.com/PaddlePaddle/Paddle/pull/14190
# if you want to set is_distributed to True
is_distributed=False,
size=[sparse_feature_dim, embedding_size],
param_attr=fluid.ParamAttr(
name="SparseFeatFactors",
initializer=fluid.initializer.Uniform()))
seq = fluid.layers.sequence_pool(input=emb, pool_type='average')
return emb, seq
dense_input = fluid.layers.data(
name="dense_input", shape=[dense_feature_dim], dtype='float32')
sparse_input_ids = [
fluid.layers.data(
name="C" + str(i), shape=[1], lod_level=1, dtype='int64')
for i in range(1, 27)
]
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
words = [dense_input] + sparse_input_ids + [label]
sparse_embed_and_seq = list(map(embedding_layer, words[1:-1]))
emb_list = [x[0] for x in sparse_embed_and_seq]
sparse_embed_seq = [x[1] for x in sparse_embed_and_seq]
concated = fluid.layers.concat(sparse_embed_seq + words[0:1], axis=1)
train_feed_vars = words
inference_feed_vars = emb_list + words[0:1]
fc1 = fluid.layers.fc(input=concated,
size=400,
act='relu',
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Normal(
scale=1 / math.sqrt(concated.shape[1]))))
fc2 = fluid.layers.fc(input=fc1,
size=400,
act='relu',
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Normal(
scale=1 / math.sqrt(fc1.shape[1]))))
fc3 = fluid.layers.fc(input=fc2,
size=400,
act='relu',
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Normal(
scale=1 / math.sqrt(fc2.shape[1]))))
predict = fluid.layers.fc(input=fc3,
size=2,
act='softmax',
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Normal(
scale=1 / math.sqrt(fc3.shape[1]))))
cost = fluid.layers.cross_entropy(input=predict, label=words[-1])
avg_cost = fluid.layers.reduce_sum(cost)
accuracy = fluid.layers.accuracy(input=predict, label=words[-1])
auc_var, batch_auc_var, auc_states = \
fluid.layers.auc(input=predict, label=words[-1], num_thresholds=2 ** 12, slide_steps=20)
fetch_vars = [predict]
return avg_cost, auc_var, batch_auc_var, train_feed_vars, inference_feed_vars, fetch_vars
def train_loop(args, train_program, feed_vars, loss, auc_var, batch_auc_var,
trainer_num, trainer_id):
dataset = reader.CriteoDataset(args.sparse_feature_dim)
train_reader = paddle.batch(
paddle.reader.shuffle(
dataset.train([args.train_data_path], trainer_num, trainer_id),
buf_size=args.batch_size * 100),
batch_size=args.batch_size)
feed_var_names = [var.name for var in feed_vars]
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
total_time = 0
pass_id = 0
batch_id = 0
feeder = fluid.DataFeeder(feed_var_names, place)
for data in train_reader():
loss_val, auc_val, batch_auc_val = exe.run(
fluid.default_main_program(),
feed=feeder.feed(data),
fetch_list=[loss.name, auc_var.name, batch_auc_var.name])
break
loss_val = np.mean(loss_val)
auc_val = np.mean(auc_val)
batch_auc_val = np.mean(batch_auc_val)
logger.info("TRAIN --> pass: {} batch: {} loss: {} auc: {}, batch_auc: {}"
.format(pass_id, batch_id, loss_val / args.batch_size, auc_val,
batch_auc_val))
def save_program():
args = parse_args()
if not os.path.isdir(args.model_output_dir):
os.mkdir(args.model_output_dir)
loss, auc_var, batch_auc_var, train_feed_vars, inference_feed_vars, fetch_vars = ctr_dnn_model(
args.embedding_size, args.sparse_feature_dim, use_py_reader=False)
optimizer = fluid.optimizer.Adam(learning_rate=1e-4)
optimizer.minimize(loss)
main_program = fluid.default_main_program()
place = fluid.CPUPlace()
exe = fluid.Executor(place)
train_loop(args, main_program, train_feed_vars, loss, auc_var,
batch_auc_var, 1, 0)
model_dir = args.model_output_dir + "/inference_only"
feed_var_names = [var.name for var in inference_feed_vars]
fluid.io.save_inference_model(model_dir, feed_var_names, fetch_vars, exe,
fluid.default_main_program())
def prune_program():
args = parse_args()
model_dir = args.model_output_dir + "/inference_only"
model_file = model_dir + "/__model__"
with open(model_file, "rb") as f:
protostr = f.read()
f.close()
proto = framework_pb2.ProgramDesc.FromString(six.binary_type(protostr))
block = proto.blocks[0]
kept_ops = [op for op in block.ops if op.type != "lookup_table"]
del block.ops[:]
block.ops.extend(kept_ops)
kept_vars = [var for var in block.vars if var.name != "SparseFeatFactors"]
del block.vars[:]
block.vars.extend(kept_vars)
with open(model_file, "wb") as f:
f.write(proto.SerializePartialToString())
f.close()
with open(model_file + ".prototxt.pruned", "w") as f:
f.write(text_format.MessageToString(proto))
f.close()
def remove_embedding_param_file():
args = parse_args()
model_dir = args.model_output_dir + "/inference_only"
embedding_file = model_dir + "/SparseFeatFactors"
os.remove(embedding_file)
if __name__ == '__main__':
save_program()
prune_program()
remove_embedding_param_file()
| [
"logging.getLogger",
"paddle.fluid.DataFeeder",
"paddle.fluid.layers.auc",
"paddle.fluid.layers.data",
"paddle.fluid.layers.cross_entropy",
"math.sqrt",
"paddle.fluid.Executor",
"paddle.fluid.optimizer.Adam",
"os.remove",
"numpy.mean",
"argparse.ArgumentParser",
"paddle.fluid.default_startup_p... | [((1065, 1136), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(message)s"""'}), "(format='%(asctime)s - %(levelname)s - %(message)s')\n", (1084, 1136), False, 'import logging\n'), ((1146, 1172), 'logging.getLogger', 'logging.getLogger', (['"""fluid"""'], {}), "('fluid')\n", (1163, 1172), False, 'import logging\n'), ((1260, 1323), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PaddlePaddle CTR example"""'}), "(description='PaddlePaddle CTR example')\n", (1283, 1323), False, 'import argparse\n'), ((2814, 2900), 'paddle.fluid.layers.data', 'fluid.layers.data', ([], {'name': '"""dense_input"""', 'shape': '[dense_feature_dim]', 'dtype': '"""float32"""'}), "(name='dense_input', shape=[dense_feature_dim], dtype=\n 'float32')\n", (2831, 2900), True, 'import paddle.fluid as fluid\n'), ((3077, 3134), 'paddle.fluid.layers.data', 'fluid.layers.data', ([], {'name': '"""label"""', 'shape': '[1]', 'dtype': '"""int64"""'}), "(name='label', shape=[1], dtype='int64')\n", (3094, 3134), True, 'import paddle.fluid as fluid\n'), ((3387, 3445), 'paddle.fluid.layers.concat', 'fluid.layers.concat', (['(sparse_embed_seq + words[0:1])'], {'axis': '(1)'}), '(sparse_embed_seq + words[0:1], axis=1)\n', (3406, 3445), True, 'import paddle.fluid as fluid\n'), ((4787, 4845), 'paddle.fluid.layers.cross_entropy', 'fluid.layers.cross_entropy', ([], {'input': 'predict', 'label': 'words[-1]'}), '(input=predict, label=words[-1])\n', (4813, 4845), True, 'import paddle.fluid as fluid\n'), ((4861, 4890), 'paddle.fluid.layers.reduce_sum', 'fluid.layers.reduce_sum', (['cost'], {}), '(cost)\n', (4884, 4890), True, 'import paddle.fluid as fluid\n'), ((4906, 4959), 'paddle.fluid.layers.accuracy', 'fluid.layers.accuracy', ([], {'input': 'predict', 'label': 'words[-1]'}), '(input=predict, label=words[-1])\n', (4927, 4959), True, 'import paddle.fluid as fluid\n'), ((5011, 5103), 'paddle.fluid.layers.auc', 'fluid.layers.auc', ([], {'input': 'predict', 'label': 'words[-1]', 'num_thresholds': '(2 ** 12)', 'slide_steps': '(20)'}), '(input=predict, label=words[-1], num_thresholds=2 ** 12,\n slide_steps=20)\n', (5027, 5103), True, 'import paddle.fluid as fluid\n'), ((5356, 5401), 'reader.CriteoDataset', 'reader.CriteoDataset', (['args.sparse_feature_dim'], {}), '(args.sparse_feature_dim)\n', (5376, 5401), False, 'import reader\n'), ((5689, 5705), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (5703, 5705), True, 'import paddle.fluid as fluid\n'), ((5716, 5737), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (5730, 5737), True, 'import paddle.fluid as fluid\n'), ((5851, 5890), 'paddle.fluid.DataFeeder', 'fluid.DataFeeder', (['feed_var_names', 'place'], {}), '(feed_var_names, place)\n', (5867, 5890), True, 'import paddle.fluid as fluid\n'), ((6153, 6170), 'numpy.mean', 'np.mean', (['loss_val'], {}), '(loss_val)\n', (6160, 6170), True, 'import numpy as np\n'), ((6185, 6201), 'numpy.mean', 'np.mean', (['auc_val'], {}), '(auc_val)\n', (6192, 6201), True, 'import numpy as np\n'), ((6222, 6244), 'numpy.mean', 'np.mean', (['batch_auc_val'], {}), '(batch_auc_val)\n', (6229, 6244), True, 'import numpy as np\n'), ((6774, 6816), 'paddle.fluid.optimizer.Adam', 'fluid.optimizer.Adam', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (6794, 6816), True, 'import paddle.fluid as fluid\n'), ((6863, 6891), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (6889, 6891), True, 'import paddle.fluid as fluid\n'), ((6905, 6921), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (6919, 6921), True, 'import paddle.fluid as fluid\n'), ((6932, 6953), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (6946, 6953), True, 'import paddle.fluid as fluid\n'), ((8312, 8337), 'os.remove', 'os.remove', (['embedding_file'], {}), '(embedding_file)\n', (8321, 8337), False, 'import os\n'), ((2712, 2770), 'paddle.fluid.layers.sequence_pool', 'fluid.layers.sequence_pool', ([], {'input': 'emb', 'pool_type': '"""average"""'}), "(input=emb, pool_type='average')\n", (2738, 2770), True, 'import paddle.fluid as fluid\n'), ((5751, 5782), 'paddle.fluid.default_startup_program', 'fluid.default_startup_program', ([], {}), '()\n', (5780, 5782), True, 'import paddle.fluid as fluid\n'), ((6503, 6539), 'os.path.isdir', 'os.path.isdir', (['args.model_output_dir'], {}), '(args.model_output_dir)\n', (6516, 6539), False, 'import os\n'), ((6549, 6580), 'os.mkdir', 'os.mkdir', (['args.model_output_dir'], {}), '(args.model_output_dir)\n', (6557, 6580), False, 'import os\n'), ((7291, 7319), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (7317, 7319), True, 'import paddle.fluid as fluid\n'), ((7597, 7622), 'six.binary_type', 'six.binary_type', (['protostr'], {}), '(protostr)\n', (7612, 7622), False, 'import six\n'), ((5987, 6015), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (6013, 6015), True, 'import paddle.fluid as fluid\n'), ((8085, 8119), 'google.protobuf.text_format.MessageToString', 'text_format.MessageToString', (['proto'], {}), '(proto)\n', (8112, 8119), True, 'import google.protobuf.text_format as text_format\n'), ((2668, 2695), 'paddle.fluid.initializer.Uniform', 'fluid.initializer.Uniform', ([], {}), '()\n', (2693, 2695), True, 'import paddle.fluid as fluid\n'), ((3806, 3834), 'math.sqrt', 'math.sqrt', (['concated.shape[1]'], {}), '(concated.shape[1])\n', (3815, 3834), False, 'import math\n'), ((4115, 4138), 'math.sqrt', 'math.sqrt', (['fc1.shape[1]'], {}), '(fc1.shape[1])\n', (4124, 4138), False, 'import math\n'), ((4419, 4442), 'math.sqrt', 'math.sqrt', (['fc2.shape[1]'], {}), '(fc2.shape[1])\n', (4428, 4442), False, 'import math\n'), ((4748, 4771), 'math.sqrt', 'math.sqrt', (['fc3.shape[1]'], {}), '(fc3.shape[1])\n', (4757, 4771), False, 'import math\n')] |
# -*- coding: utf-8 -*-
# test_calculateTF.py
# This module provides the tests for the calculateTF() function.
# Copyright 2014 <NAME> & <NAME>
# This file is part of python-deltasigma.
#
# python-deltasigma is a 1:1 Python replacement of Richard Schreier's
# MATLAB delta sigma toolbox (aka "delsigma"), upon which it is heavily based.
# The delta sigma toolbox is (c) 2009, <NAME>.
#
# python-deltasigma is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# LICENSE file for the licensing terms.
import unittest
import numpy as np
import deltasigma as ds
from deltasigma._utils import cplxpair
class TestCalculateTF(unittest.TestCase):
"""Test function for calculateTF()"""
def setUp(self):
ABCD = [[1.0, 0.0, 0.0, 0.044408783846879, -0.044408783846879],
[0.999036450096481, 0.997109907515262, -0.005777399147297,
0.0, 0.499759089304780],
[0.499759089304780, 0.999036450096481, 0.997109907515262,
0.0, -0.260002096136488],
[0.0, 0.0, 1.0, 0.0, 0.0]]
ABCD = np.array(ABCD)
ntf, stf = ds.calculateTF(ABCD)
ntf_zeros, ntf_poles, _ = ntf
stf_zeros, stf_poles, _ = stf
mntf_poles = np.array((1.498975311463384, 1.102565142679772,
0.132677264750882))
mntf_zeros = np.array((0.997109907515262 + 0.075972576202904j,
0.997109907515262 - 0.075972576202904j,
1.000000000000000 + 0.000000000000000j))
mstf_zeros = np.array((-0.999999999999996,))
mstf_poles = np.array((1.498975311463384, 1.102565142679772,
0.132677264750882))
# for some reason, sometimes the zeros are in different order.
self.ntf_zeros, self.mntf_zeros = (cplxpair(ntf_zeros),
cplxpair(mntf_zeros))
self.stf_zeros, self.mstf_zeros = (cplxpair(stf_zeros),
cplxpair(mstf_zeros))
self.ntf_poles, self.mntf_poles = (cplxpair(ntf_poles),
cplxpair(mntf_poles))
self.stf_poles, self.mstf_poles = (cplxpair(stf_poles),
cplxpair(mstf_poles))
def test_calculateTF1(self):
"""Test function for calculateTF() 1/4"""
# ntf zeros
self.assertTrue(np.allclose(self.ntf_zeros, self.mntf_zeros, rtol=1e-5,
atol=1e-8))
# ntf poles
self.assertTrue(np.allclose(self.ntf_poles, self.mntf_poles, rtol=1e-5,
atol=1e-8))
# stf zeros
self.assertTrue(np.allclose(self.stf_zeros, self.mstf_zeros, rtol=1e-5,
atol=1e-8))
# stf poles
self.assertTrue(np.allclose(self.stf_poles, self.mstf_poles, rtol=1e-5,
atol=1e-8))
def test_calculateTF2(self):
"""Test function for calculateTF() 2/4"""
# test an easy TF
ABCD = np.array([[1., 1., -1.],
[1., 0., 0.]])
k = 1.
ntf, stf = ds.calculateTF(ABCD, k)
ntf_zeros, ntf_poles, ntf_gain = ntf
stf_zeros, stf_poles, stf_gain = stf
self.assertTrue(np.allclose(stf_poles, [0.], rtol=1e-5, atol=1e-8))
self.assertTrue(not len(stf_zeros))
self.assertTrue(np.allclose(stf_gain, 1., rtol=1e-5, atol=1e-8))
self.assertTrue(np.allclose(ntf_poles, [0.], rtol=1e-5, atol=1e-8))
self.assertTrue(np.allclose(ntf_zeros, [1.], rtol=1e-5, atol=1e-8))
self.assertTrue(np.allclose(ntf_gain, 1., rtol=1e-5, atol=1e-8))
def test_calculateTF3(self):
"""Test function for calculateTF() 3/4"""
# test for the default k value
ABCD = np.array([[1., 1., -1.],
[1., 0., 0.]])
ntf, stf = ds.calculateTF(ABCD)
ntf_zeros, ntf_poles, ntf_gain = ntf
stf_zeros, stf_poles, stf_gain = stf
self.assertTrue(np.allclose(stf_poles, [0.], rtol=1e-5, atol=1e-8))
self.assertTrue(not len(stf_zeros))
self.assertTrue(np.allclose(stf_gain, 1., rtol=1e-5, atol=1e-8))
self.assertTrue(np.allclose(ntf_poles, [0.], rtol=1e-5, atol=1e-8))
self.assertTrue(np.allclose(ntf_zeros, [1.], rtol=1e-5, atol=1e-8))
self.assertTrue(np.allclose(ntf_gain, 1., rtol=1e-5, atol=1e-8))
def test_calculateTF4(self):
"""Test function for calculateTF() 4/4"""
# Easy test for a 2-quantizers system
# MASH 1-0 cascade
ABCD = [[1, 1, -1, 0],
[1, 0, 0, 0],
[1, 0, -1, 0]]
ABCD = np.array(ABCD, dtype=np.float_)
k = [1., 1.]
# here we get back arrays of transfer functions
ntfs, stfs = ds.calculateTF(ABCD, k=k)
# stfs
self.assertTrue(np.allclose(stfs[0][1], [0.], rtol=1e-5, atol=1e-8))
self.assertTrue(not len(stfs[0][0]))
self.assertTrue(np.allclose(stfs[0][2], 1., rtol=1e-5, atol=1e-8))
self.assertTrue(np.allclose(stfs[1][1], [0.], rtol=1e-5, atol=1e-8))
self.assertTrue(not len(stfs[1][0]))
self.assertTrue(np.allclose(stfs[1][2], 1., rtol=1e-5, atol=1e-8))
# e1 to V1
self.assertTrue(np.allclose(ntfs[0, 0][1], [0.], rtol=1e-5, atol=1e-8))
self.assertTrue(np.allclose(ntfs[0, 0][0], [1.], rtol=1e-5, atol=1e-8))
self.assertTrue(np.allclose(ntfs[0, 0][2], 1., rtol=1e-5, atol=1e-8))
# e1 to V2
self.assertTrue(np.allclose(ntfs[1, 0][1], [0.], rtol=1e-5, atol=1e-8))
self.assertTrue(not len(ntfs[1, 0][0]))
self.assertTrue(np.allclose(ntfs[1, 0][2], -1., rtol=1e-5, atol=1e-8))
# e2 to V2
self.assertTrue(not len(ntfs[1, 1][0]))
self.assertTrue(not len(ntfs[1, 1][1]))
self.assertTrue(np.allclose(ntfs[1, 1][2], 1., rtol=1e-5, atol=1e-8))
# e2 to V1
self.assertTrue(np.allclose(ntfs[0, 1][2], 0., rtol=1e-5, atol=1e-8))
| [
"numpy.array",
"numpy.allclose",
"deltasigma.calculateTF",
"deltasigma._utils.cplxpair"
] | [((1198, 1212), 'numpy.array', 'np.array', (['ABCD'], {}), '(ABCD)\n', (1206, 1212), True, 'import numpy as np\n'), ((1232, 1252), 'deltasigma.calculateTF', 'ds.calculateTF', (['ABCD'], {}), '(ABCD)\n', (1246, 1252), True, 'import deltasigma as ds\n'), ((1350, 1417), 'numpy.array', 'np.array', (['(1.498975311463384, 1.102565142679772, 0.132677264750882)'], {}), '((1.498975311463384, 1.102565142679772, 0.132677264750882))\n', (1358, 1417), True, 'import numpy as np\n'), ((1470, 1577), 'numpy.array', 'np.array', (['(0.997109907515262 + 0.075972576202904j, 0.997109907515262 - \n 0.075972576202904j, 1.0 + 0.0j)'], {}), '((0.997109907515262 + 0.075972576202904j, 0.997109907515262 - \n 0.075972576202904j, 1.0 + 0.0j))\n', (1478, 1577), True, 'import numpy as np\n'), ((1684, 1715), 'numpy.array', 'np.array', (['(-0.999999999999996,)'], {}), '((-0.999999999999996,))\n', (1692, 1715), True, 'import numpy as np\n'), ((1737, 1804), 'numpy.array', 'np.array', (['(1.498975311463384, 1.102565142679772, 0.132677264750882)'], {}), '((1.498975311463384, 1.102565142679772, 0.132677264750882))\n', (1745, 1804), True, 'import numpy as np\n'), ((3177, 3222), 'numpy.array', 'np.array', (['[[1.0, 1.0, -1.0], [1.0, 0.0, 0.0]]'], {}), '([[1.0, 1.0, -1.0], [1.0, 0.0, 0.0]])\n', (3185, 3222), True, 'import numpy as np\n'), ((3275, 3298), 'deltasigma.calculateTF', 'ds.calculateTF', (['ABCD', 'k'], {}), '(ABCD, k)\n', (3289, 3298), True, 'import deltasigma as ds\n'), ((3946, 3991), 'numpy.array', 'np.array', (['[[1.0, 1.0, -1.0], [1.0, 0.0, 0.0]]'], {}), '([[1.0, 1.0, -1.0], [1.0, 0.0, 0.0]])\n', (3954, 3991), True, 'import numpy as np\n'), ((4029, 4049), 'deltasigma.calculateTF', 'ds.calculateTF', (['ABCD'], {}), '(ABCD)\n', (4043, 4049), True, 'import deltasigma as ds\n'), ((4822, 4853), 'numpy.array', 'np.array', (['ABCD'], {'dtype': 'np.float_'}), '(ABCD, dtype=np.float_)\n', (4830, 4853), True, 'import numpy as np\n'), ((4952, 4977), 'deltasigma.calculateTF', 'ds.calculateTF', (['ABCD'], {'k': 'k'}), '(ABCD, k=k)\n', (4966, 4977), True, 'import deltasigma as ds\n'), ((1951, 1970), 'deltasigma._utils.cplxpair', 'cplxpair', (['ntf_zeros'], {}), '(ntf_zeros)\n', (1959, 1970), False, 'from deltasigma._utils import cplxpair\n'), ((2015, 2035), 'deltasigma._utils.cplxpair', 'cplxpair', (['mntf_zeros'], {}), '(mntf_zeros)\n', (2023, 2035), False, 'from deltasigma._utils import cplxpair\n'), ((2080, 2099), 'deltasigma._utils.cplxpair', 'cplxpair', (['stf_zeros'], {}), '(stf_zeros)\n', (2088, 2099), False, 'from deltasigma._utils import cplxpair\n'), ((2144, 2164), 'deltasigma._utils.cplxpair', 'cplxpair', (['mstf_zeros'], {}), '(mstf_zeros)\n', (2152, 2164), False, 'from deltasigma._utils import cplxpair\n'), ((2209, 2228), 'deltasigma._utils.cplxpair', 'cplxpair', (['ntf_poles'], {}), '(ntf_poles)\n', (2217, 2228), False, 'from deltasigma._utils import cplxpair\n'), ((2273, 2293), 'deltasigma._utils.cplxpair', 'cplxpair', (['mntf_poles'], {}), '(mntf_poles)\n', (2281, 2293), False, 'from deltasigma._utils import cplxpair\n'), ((2338, 2357), 'deltasigma._utils.cplxpair', 'cplxpair', (['stf_poles'], {}), '(stf_poles)\n', (2346, 2357), False, 'from deltasigma._utils import cplxpair\n'), ((2402, 2422), 'deltasigma._utils.cplxpair', 'cplxpair', (['mstf_poles'], {}), '(mstf_poles)\n', (2410, 2422), False, 'from deltasigma._utils import cplxpair\n'), ((2552, 2620), 'numpy.allclose', 'np.allclose', (['self.ntf_zeros', 'self.mntf_zeros'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(self.ntf_zeros, self.mntf_zeros, rtol=1e-05, atol=1e-08)\n', (2563, 2620), True, 'import numpy as np\n'), ((2688, 2756), 'numpy.allclose', 'np.allclose', (['self.ntf_poles', 'self.mntf_poles'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(self.ntf_poles, self.mntf_poles, rtol=1e-05, atol=1e-08)\n', (2699, 2756), True, 'import numpy as np\n'), ((2824, 2892), 'numpy.allclose', 'np.allclose', (['self.stf_zeros', 'self.mstf_zeros'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(self.stf_zeros, self.mstf_zeros, rtol=1e-05, atol=1e-08)\n', (2835, 2892), True, 'import numpy as np\n'), ((2960, 3028), 'numpy.allclose', 'np.allclose', (['self.stf_poles', 'self.mstf_poles'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(self.stf_poles, self.mstf_poles, rtol=1e-05, atol=1e-08)\n', (2971, 3028), True, 'import numpy as np\n'), ((3414, 3467), 'numpy.allclose', 'np.allclose', (['stf_poles', '[0.0]'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(stf_poles, [0.0], rtol=1e-05, atol=1e-08)\n', (3425, 3467), True, 'import numpy as np\n'), ((3534, 3584), 'numpy.allclose', 'np.allclose', (['stf_gain', '(1.0)'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(stf_gain, 1.0, rtol=1e-05, atol=1e-08)\n', (3545, 3584), True, 'import numpy as np\n'), ((3607, 3660), 'numpy.allclose', 'np.allclose', (['ntf_poles', '[0.0]'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(ntf_poles, [0.0], rtol=1e-05, atol=1e-08)\n', (3618, 3660), True, 'import numpy as np\n'), ((3683, 3736), 'numpy.allclose', 'np.allclose', (['ntf_zeros', '[1.0]'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(ntf_zeros, [1.0], rtol=1e-05, atol=1e-08)\n', (3694, 3736), True, 'import numpy as np\n'), ((3759, 3809), 'numpy.allclose', 'np.allclose', (['ntf_gain', '(1.0)'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(ntf_gain, 1.0, rtol=1e-05, atol=1e-08)\n', (3770, 3809), True, 'import numpy as np\n'), ((4164, 4217), 'numpy.allclose', 'np.allclose', (['stf_poles', '[0.0]'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(stf_poles, [0.0], rtol=1e-05, atol=1e-08)\n', (4175, 4217), True, 'import numpy as np\n'), ((4284, 4334), 'numpy.allclose', 'np.allclose', (['stf_gain', '(1.0)'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(stf_gain, 1.0, rtol=1e-05, atol=1e-08)\n', (4295, 4334), True, 'import numpy as np\n'), ((4357, 4410), 'numpy.allclose', 'np.allclose', (['ntf_poles', '[0.0]'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(ntf_poles, [0.0], rtol=1e-05, atol=1e-08)\n', (4368, 4410), True, 'import numpy as np\n'), ((4433, 4486), 'numpy.allclose', 'np.allclose', (['ntf_zeros', '[1.0]'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(ntf_zeros, [1.0], rtol=1e-05, atol=1e-08)\n', (4444, 4486), True, 'import numpy as np\n'), ((4509, 4559), 'numpy.allclose', 'np.allclose', (['ntf_gain', '(1.0)'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(ntf_gain, 1.0, rtol=1e-05, atol=1e-08)\n', (4520, 4559), True, 'import numpy as np\n'), ((5017, 5071), 'numpy.allclose', 'np.allclose', (['stfs[0][1]', '[0.0]'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(stfs[0][1], [0.0], rtol=1e-05, atol=1e-08)\n', (5028, 5071), True, 'import numpy as np\n'), ((5139, 5191), 'numpy.allclose', 'np.allclose', (['stfs[0][2]', '(1.0)'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(stfs[0][2], 1.0, rtol=1e-05, atol=1e-08)\n', (5150, 5191), True, 'import numpy as np\n'), ((5214, 5268), 'numpy.allclose', 'np.allclose', (['stfs[1][1]', '[0.0]'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(stfs[1][1], [0.0], rtol=1e-05, atol=1e-08)\n', (5225, 5268), True, 'import numpy as np\n'), ((5336, 5388), 'numpy.allclose', 'np.allclose', (['stfs[1][2]', '(1.0)'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(stfs[1][2], 1.0, rtol=1e-05, atol=1e-08)\n', (5347, 5388), True, 'import numpy as np\n'), ((5430, 5487), 'numpy.allclose', 'np.allclose', (['ntfs[0, 0][1]', '[0.0]'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(ntfs[0, 0][1], [0.0], rtol=1e-05, atol=1e-08)\n', (5441, 5487), True, 'import numpy as np\n'), ((5510, 5567), 'numpy.allclose', 'np.allclose', (['ntfs[0, 0][0]', '[1.0]'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(ntfs[0, 0][0], [1.0], rtol=1e-05, atol=1e-08)\n', (5521, 5567), True, 'import numpy as np\n'), ((5590, 5645), 'numpy.allclose', 'np.allclose', (['ntfs[0, 0][2]', '(1.0)'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(ntfs[0, 0][2], 1.0, rtol=1e-05, atol=1e-08)\n', (5601, 5645), True, 'import numpy as np\n'), ((5687, 5744), 'numpy.allclose', 'np.allclose', (['ntfs[1, 0][1]', '[0.0]'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(ntfs[1, 0][1], [0.0], rtol=1e-05, atol=1e-08)\n', (5698, 5744), True, 'import numpy as np\n'), ((5815, 5871), 'numpy.allclose', 'np.allclose', (['ntfs[1, 0][2]', '(-1.0)'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(ntfs[1, 0][2], -1.0, rtol=1e-05, atol=1e-08)\n', (5826, 5871), True, 'import numpy as np\n'), ((6009, 6064), 'numpy.allclose', 'np.allclose', (['ntfs[1, 1][2]', '(1.0)'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(ntfs[1, 1][2], 1.0, rtol=1e-05, atol=1e-08)\n', (6020, 6064), True, 'import numpy as np\n'), ((6106, 6161), 'numpy.allclose', 'np.allclose', (['ntfs[0, 1][2]', '(0.0)'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(ntfs[0, 1][2], 0.0, rtol=1e-05, atol=1e-08)\n', (6117, 6161), True, 'import numpy as np\n')] |
import pytest
from pymbar import testsystems
from reproducibility_project.src.analysis.equilibration import is_equilibrated
from reproducibility_project.src.analysis.sampler import (
_decorr_sampling,
write_subsampled_values,
)
from reproducibility_project.tests.base_test import BaseTest
class TestSampler(BaseTest):
def test_not_equilibrated(self, correlated_data_tau100_n10000):
data = correlated_data_tau100_n10000
with pytest.raises(ValueError):
start, stop, step = _decorr_sampling(
data, threshold_fraction=0.80, threshold_neff=100
)
def test_equilibrated(self, correlated_data_tau100_n10000):
data = correlated_data_tau100_n10000
is_equil, prod_start, ineff, Neff = is_equilibrated(
data,
threshold_fraction=0.10,
threshold_neff=10,
nskip=1,
)
start, stop, step, neff_samples = _decorr_sampling(
data, threshold_fraction=0.10, threshold_neff=10
)
assert start >= prod_start
assert step >= ineff
assert neff_samples > 1
def test_write_subsampled_incorrect_params(self, tmp_job):
with pytest.raises(
TypeError,
match=r"Expected input \'job\' of type signac\.contrib\.project\.Job",
):
write_subsampled_values(
"foo",
property="density",
)
with pytest.raises(
ValueError,
match=r"Expected \'property\' to be a name of a property",
):
write_subsampled_values(
tmp_job,
property="",
)
with pytest.raises(
ValueError,
match=r"Attempting to overwrite already existing data for property",
):
import numpy as np
vals = [1, 2, 3, 4, 5, 6]
out = f"""foo\n"""
for val in vals:
out = out + str(val) + "\n"
with open(tmp_job.fn("log.txt"), "w") as fp:
fp.write(out)
tmp_job.data["subsamples/foo"] = np.asarray([1, 2, 3, 4])
tmp_job.doc["sampling_results"] = {
"foo": {"start": 1, "stop": 4, "step": 2, "Neff": 2}
}
write_subsampled_values(tmp_job, property="foo", overwrite=False)
def test_file_missing(self, tmp_job):
# by default, the tmp job is missing the file
with pytest.raises(
FileNotFoundError, match=r"File missing\.txt does not exist"
):
tmp_job.doc["sampling_results"] = {
"foo": {"start": 1, "stop": 4, "step": 2, "Neff": 2}
}
write_subsampled_values(
tmp_job,
property="foo",
property_filename="missing.txt",
overwrite=False,
)
def test_correct_samples(self, tmp_job):
import numpy as np
vals = [1, 2, 3, 4, 5, 6]
out = f"""foo\n"""
for val in vals:
out = out + str(val) + "\n"
with open(tmp_job.fn("log.txt"), "w") as fp:
fp.write(out)
tmp_job.doc["sampling_results"] = {
"foo": {"start": 1, "stop": 4, "step": 1, "Neff": 4}
}
write_subsampled_values(tmp_job, property="foo", overwrite=False)
with tmp_job.data:
assert len(tmp_job.data["subsamples/foo"]) == 3
np.testing.assert_array_equal(
tmp_job.data["subsamples/foo"], [2, 3, 4]
)
| [
"reproducibility_project.src.analysis.sampler.write_subsampled_values",
"numpy.asarray",
"pytest.raises",
"reproducibility_project.src.analysis.sampler._decorr_sampling",
"numpy.testing.assert_array_equal",
"reproducibility_project.src.analysis.equilibration.is_equilibrated"
] | [((766, 839), 'reproducibility_project.src.analysis.equilibration.is_equilibrated', 'is_equilibrated', (['data'], {'threshold_fraction': '(0.1)', 'threshold_neff': '(10)', 'nskip': '(1)'}), '(data, threshold_fraction=0.1, threshold_neff=10, nskip=1)\n', (781, 839), False, 'from reproducibility_project.src.analysis.equilibration import is_equilibrated\n'), ((942, 1007), 'reproducibility_project.src.analysis.sampler._decorr_sampling', '_decorr_sampling', (['data'], {'threshold_fraction': '(0.1)', 'threshold_neff': '(10)'}), '(data, threshold_fraction=0.1, threshold_neff=10)\n', (958, 1007), False, 'from reproducibility_project.src.analysis.sampler import _decorr_sampling, write_subsampled_values\n'), ((3308, 3373), 'reproducibility_project.src.analysis.sampler.write_subsampled_values', 'write_subsampled_values', (['tmp_job'], {'property': '"""foo"""', 'overwrite': '(False)'}), "(tmp_job, property='foo', overwrite=False)\n", (3331, 3373), False, 'from reproducibility_project.src.analysis.sampler import _decorr_sampling, write_subsampled_values\n'), ((455, 480), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (468, 480), False, 'import pytest\n'), ((514, 580), 'reproducibility_project.src.analysis.sampler._decorr_sampling', '_decorr_sampling', (['data'], {'threshold_fraction': '(0.8)', 'threshold_neff': '(100)'}), '(data, threshold_fraction=0.8, threshold_neff=100)\n', (530, 580), False, 'from reproducibility_project.src.analysis.sampler import _decorr_sampling, write_subsampled_values\n'), ((1204, 1308), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Expected input \\\\\'job\\\\\' of type signac\\\\.contrib\\\\.project\\\\.Job"""'}), '(TypeError, match=\n "Expected input \\\\\'job\\\\\' of type signac\\\\.contrib\\\\.project\\\\.Job")\n', (1217, 1308), False, 'import pytest\n'), ((1348, 1398), 'reproducibility_project.src.analysis.sampler.write_subsampled_values', 'write_subsampled_values', (['"""foo"""'], {'property': '"""density"""'}), "('foo', property='density')\n", (1371, 1398), False, 'from reproducibility_project.src.analysis.sampler import _decorr_sampling, write_subsampled_values\n'), ((1460, 1550), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Expected \\\\\'property\\\\\' to be a name of a property"""'}), '(ValueError, match=\n "Expected \\\\\'property\\\\\' to be a name of a property")\n', (1473, 1550), False, 'import pytest\n'), ((1593, 1638), 'reproducibility_project.src.analysis.sampler.write_subsampled_values', 'write_subsampled_values', (['tmp_job'], {'property': '""""""'}), "(tmp_job, property='')\n", (1616, 1638), False, 'from reproducibility_project.src.analysis.sampler import _decorr_sampling, write_subsampled_values\n'), ((1700, 1798), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Attempting to overwrite already existing data for property"""'}), "(ValueError, match=\n 'Attempting to overwrite already existing data for property')\n", (1713, 1798), False, 'import pytest\n'), ((2137, 2161), 'numpy.asarray', 'np.asarray', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (2147, 2161), True, 'import numpy as np\n'), ((2306, 2371), 'reproducibility_project.src.analysis.sampler.write_subsampled_values', 'write_subsampled_values', (['tmp_job'], {'property': '"""foo"""', 'overwrite': '(False)'}), "(tmp_job, property='foo', overwrite=False)\n", (2329, 2371), False, 'from reproducibility_project.src.analysis.sampler import _decorr_sampling, write_subsampled_values\n'), ((2482, 2557), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {'match': '"""File missing\\\\.txt does not exist"""'}), "(FileNotFoundError, match='File missing\\\\.txt does not exist')\n", (2495, 2557), False, 'import pytest\n'), ((2724, 2827), 'reproducibility_project.src.analysis.sampler.write_subsampled_values', 'write_subsampled_values', (['tmp_job'], {'property': '"""foo"""', 'property_filename': '"""missing.txt"""', 'overwrite': '(False)'}), "(tmp_job, property='foo', property_filename=\n 'missing.txt', overwrite=False)\n", (2747, 2827), False, 'from reproducibility_project.src.analysis.sampler import _decorr_sampling, write_subsampled_values\n'), ((3473, 3545), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["tmp_job.data['subsamples/foo']", '[2, 3, 4]'], {}), "(tmp_job.data['subsamples/foo'], [2, 3, 4])\n", (3502, 3545), True, 'import numpy as np\n')] |
import logging
logging.basicConfig(level=logging.INFO)
import numpy as np
import functions
def model(X_train, Y_train, X_test, Y_test, iterations=2500, learning_rate=0.005):
w = np.zeros((X_train.shape[0], 1))
b = 0
parameters, grads, costs = functions.optimize(
w,
b,
X_train,
Y_train,
iterations=iterations,
learning_rate=learning_rate
)
w = parameters["w"]
b = parameters["b"]
Y_prediction_test = functions.predict(w, b, X_test)
Y_prediction_train = functions.predict(w, b, X_train)
train_accuracy = 100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100
logging.info(f"train accuracy: {train_accuracy} %")
test_accuracy = 100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100
logging.info(f"test accuracy: {test_accuracy} %")
return {
"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train": Y_prediction_train,
"w": w,
"b": b,
"learning_rate": learning_rate,
"iterations": iterations
}
| [
"logging.basicConfig",
"numpy.abs",
"functions.predict",
"numpy.zeros",
"logging.info",
"functions.optimize"
] | [((16, 55), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (35, 55), False, 'import logging\n'), ((186, 217), 'numpy.zeros', 'np.zeros', (['(X_train.shape[0], 1)'], {}), '((X_train.shape[0], 1))\n', (194, 217), True, 'import numpy as np\n'), ((260, 358), 'functions.optimize', 'functions.optimize', (['w', 'b', 'X_train', 'Y_train'], {'iterations': 'iterations', 'learning_rate': 'learning_rate'}), '(w, b, X_train, Y_train, iterations=iterations,\n learning_rate=learning_rate)\n', (278, 358), False, 'import functions\n'), ((484, 515), 'functions.predict', 'functions.predict', (['w', 'b', 'X_test'], {}), '(w, b, X_test)\n', (501, 515), False, 'import functions\n'), ((541, 573), 'functions.predict', 'functions.predict', (['w', 'b', 'X_train'], {}), '(w, b, X_train)\n', (558, 573), False, 'import functions\n'), ((658, 709), 'logging.info', 'logging.info', (['f"""train accuracy: {train_accuracy} %"""'], {}), "(f'train accuracy: {train_accuracy} %')\n", (670, 709), False, 'import logging\n'), ((791, 840), 'logging.info', 'logging.info', (['f"""test accuracy: {test_accuracy} %"""'], {}), "(f'test accuracy: {test_accuracy} %')\n", (803, 840), False, 'import logging\n'), ((610, 646), 'numpy.abs', 'np.abs', (['(Y_prediction_train - Y_train)'], {}), '(Y_prediction_train - Y_train)\n', (616, 646), True, 'import numpy as np\n'), ((745, 779), 'numpy.abs', 'np.abs', (['(Y_prediction_test - Y_test)'], {}), '(Y_prediction_test - Y_test)\n', (751, 779), True, 'import numpy as np\n')] |
'''
This script plots the four test predictions of the predictive
expectation and variance of BAR-DenseED seen in Figure 13 of the paper.
===
Distributed by: <NAME> (MIT Liscense)
- Associated publication:
url: http://www.sciencedirect.com/science/article/pii/S0021999119307612
doi: https://doi.org/10.1016/j.jcp.2019.109056
github: https://github.com/cics-nd/ar-pde-cnn
===
'''
import sys
sys.path.append("..") # Adds higher directory to python modules path.
from args import Parser
from nn.denseEDcirc import DenseED
from nn.bayesNN import BayesNN
from nn.swag import SwagNN
from utils.utils import mkdirs
from utils.burgerLoader import BurgerLoader
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import rc
import matplotlib.gridspec as gridspec
import torch
import numpy as np
import os
import time
def testSample(args, swag_nn, test_loader, tstep=100, n_samples=10):
'''
Tests the samples of the Bayesian SWAG model
Args:
args (argparse): object with programs arguements
model (PyTorch model): DenseED model to be tested
test_loader (dataloader): dataloader with test cases (use createTestingLoader)
tstep (int): number of timesteps to predict for
n_samples (int): number of model samples to draw
Returns:
u_out (torch.Tensor): [d x nsamples x tstep x nel] predicted quantities of each sample
u_target (torch.Tensor): [d x tstep x nel] respective target values loaded from simulator
'''
mb_size = int(len(test_loader.dataset)/len(test_loader))
u_out = torch.zeros(mb_size, n_samples, tstep+1, args.nel)
betas = torch.zeros(n_samples)
for i in range(n_samples):
print('Executing model sample {:d}'.format(i))
model = swag_nn.sample(diagCov=True)
model.eval()
betas[i] = model.model.log_beta.exp()
for batch_idx, (input0, uTarget0) in enumerate(test_loader):
input = input0.to(args.device)
u_target = uTarget0
u_out[:,i,0,:] = input[:,0]
# Auto-regress
for t_idx in range(tstep):
uPred = model(input[:,-args.nic:,:])
u_out[:,i,t_idx+1,:] = uPred[:,0]
input = input[:,-int(args.nic-1):,:].detach()
input0 = uPred[:,0,:].unsqueeze(1).detach()
input = torch.cat([input, input0], dim=1)
# Only do the first mini-batch
break
return u_out, betas, u_target
def plotContourGrid(t, xT, uPred, betas, uTarget):
'''
Creates grid of 4 different test cases, plots target, prediction, variance and error for each
'''
mpl.rcParams['font.family'] = ['serif'] # default is sans-serif
rc('text', usetex=False)
fig = plt.figure(figsize=(15, 13), dpi=150)
outer = gridspec.GridSpec(2, 2, wspace=0.45, hspace=0.2) # Outer grid
for i in range(4):
# Inner grid
inner = gridspec.GridSpecFromSubplotSpec(4, 1,
subplot_spec=outer[i], wspace=0, hspace=0.25)
ax = []
for j in range(4):
ax0 = plt.Subplot(fig, inner[j])
fig.add_subplot(ax0)
ax.append(ax0)
# Plot specific test case
plotPred(fig, ax, t, xT, uPred[i], betas, uTarget[i])
file_dir = '.'
# If directory does not exist create it
if not os.path.exists(file_dir):
os.makedirs(file_dir)
file_name = file_dir+"/burger_BAR_pred"
plt.savefig(file_name+".png", bbox_inches='tight')
plt.savefig(file_name+".pdf", bbox_inches='tight')
plt.show()
def plotPred(fig, ax, t, xT, uPred, betas, uTarget):
'''
Plots specific test case
Args:
fig: matplotlib figure
ax (list): list of four subplot axis
t (np.array): [n] array to time values for x axis
xT (np.array): [m] array of spacial coordinates for y axis
uPred (np.array): [n x m] model predictions
uTarget (np.array): [n x m] target field
'''
# Start with the target up top
cmap = "inferno"
c0 = ax[0].imshow(uTarget.T, interpolation='nearest', cmap=cmap, origin='lower', aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
c_max = np.max(uTarget.T)
c_min = np.min(uTarget.T)
c0.set_clim(vmin=c_min, vmax=c_max)
# Plot the mean
uPred_mean = np.mean(uPred, axis=0)
c0 = ax[1].imshow(uPred_mean.T, interpolation='nearest', cmap=cmap, origin='lower', aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
c0.set_clim(vmin=c_min, vmax=c_max)
p0 = ax[0].get_position().get_points().flatten()
p1 = ax[1].get_position().get_points().flatten()
ax_cbar = fig.add_axes([p1[2]+0.015, p1[1], 0.020, p0[3]-p1[1]])
ticks = np.linspace(0, 1, 5)
tickLabels = np.linspace(c_min, c_max, 5)
tickLabels = ["{:02.2f}".format(t0) for t0 in tickLabels]
cbar = mpl.colorbar.ColorbarBase(ax_cbar, cmap=plt.get_cmap(cmap), orientation='vertical', ticks=ticks)
cbar.set_ticklabels(tickLabels)
# Variance
betas = np.expand_dims(betas, axis=1).repeat(uPred.shape[1], axis=1) # Expand noise parameter
betas = np.expand_dims(betas, axis=2).repeat(uPred.shape[2], axis=2) # Expand noise parameter
uPred_var = np.mean(1./betas + uPred*uPred, axis=0) - uPred_mean*uPred_mean
c0 = ax[2].imshow(uPred_var.T, interpolation='nearest', cmap=cmap, origin='lower', aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
c_max = np.max(uPred_var)
c0.set_clim(vmin=0, vmax=c_max)
p0 = ax[2].get_position().get_points().flatten()
ax_cbar = fig.add_axes([p0[2]+0.015, p0[1], 0.020, p0[3]-p0[1]])
ticks = np.linspace(0, 1, 5)
tickLabels = np.linspace(0, c_max, 5)
tickLabels = ["{:02.2f}".format(t0) for t0 in tickLabels]
cbar = mpl.colorbar.ColorbarBase(ax_cbar, cmap=plt.get_cmap(cmap), orientation='vertical', ticks=ticks)
cbar.set_ticklabels(tickLabels)
# Mean Error
cmap = "viridis"
c0 = ax[3].imshow(np.abs(uPred_mean.T - uTarget.T), interpolation='nearest', cmap=cmap, origin='lower', aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
p0 = ax[3].get_position().get_points().flatten()
ax_cbar = fig.add_axes([p0[2]+0.015, p0[1], 0.020, p0[3]-p0[1]])
ticks = np.linspace(0, 1, 5)
tickLabels = np.linspace(c0.norm.vmin, c0.norm.vmax, 5)
tickLabels = ["{:.2e}".format(t0) for t0 in tickLabels]
cbar = mpl.colorbar.ColorbarBase(ax_cbar, cmap=plt.get_cmap(cmap), orientation='vertical', ticks=ticks)
cbar.set_ticklabels(tickLabels)
ax[0].set_ylabel('x', fontsize=14)
ax[1].set_ylabel('x', fontsize=14)
ax[2].set_ylabel('x', fontsize=14)
ax[3].set_ylabel('x', fontsize=14)
ax[3].set_xlabel('t', fontsize=14)
# Remove some tick labels to help declutter plot
ax[0].set_xticklabels([])
ax[1].set_xticklabels([])
ax[2].set_xticklabels([])
for ax0 in ax:
ax0.set_yticks([0,0.5,1])
if __name__ == '__main__':
# Parse arguements
args = Parser().parse(dirs=False)
use_cuda = "cpu"
if(torch.cuda.is_available()):
use_cuda = "cuda"
args.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Torch device:{}".format(args.device))
# Domain settings, matches solver settings
x0 = 0
x1 = 1.0
args.dx = (x1 - x0)/args.nel
# Create training loader
burgerLoader = BurgerLoader(dt=args.dt)
# Create training loader
test_cases = np.arange(0,5,1).astype(int)
testing_loader = burgerLoader.createTestingLoader('../solver/fenics_data_dt0.001_T2.0', test_cases, batch_size=5)
# Create DenseED model
denseED = DenseED(in_channels=args.nic, out_channels=args.noc,
blocks=args.blocks,
growth_rate=args.growth_rate,
init_features=args.init_features,
bn_size=args.bn_size,
drop_rate=args.drop_rate,
bottleneck=False,
out_activation=None).to(args.device)
# Bayesian neural network
bayes_nn = BayesNN(args, denseED)
# Stochastic weighted averages
swag_nn = SwagNN(args, bayes_nn, full_cov=True, max_models=args.swag_max)
# Load network
swag_nn.loadModel(200, file_dir="./networks")
with torch.no_grad():
uPred, betas, uTarget = testSample(args, swag_nn, testing_loader, tstep=400, n_samples=30)
tTest = np.arange(0, 400*args.dt+1e-8, args.dt)
xTest = np.linspace(x0, x1, args.nel+1)
plotContourGrid(tTest, xTest, uPred.cpu().numpy(), betas.cpu().numpy(), uTarget.cpu().numpy())
| [
"torch.cuda.is_available",
"matplotlib.rc",
"nn.denseEDcirc.DenseED",
"matplotlib.gridspec.GridSpecFromSubplotSpec",
"sys.path.append",
"numpy.arange",
"numpy.mean",
"os.path.exists",
"numpy.max",
"matplotlib.pyplot.Subplot",
"matplotlib.gridspec.GridSpec",
"numpy.linspace",
"numpy.min",
"... | [((390, 411), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (405, 411), False, 'import sys\n'), ((1596, 1648), 'torch.zeros', 'torch.zeros', (['mb_size', 'n_samples', '(tstep + 1)', 'args.nel'], {}), '(mb_size, n_samples, tstep + 1, args.nel)\n', (1607, 1648), False, 'import torch\n'), ((1659, 1681), 'torch.zeros', 'torch.zeros', (['n_samples'], {}), '(n_samples)\n', (1670, 1681), False, 'import torch\n'), ((2768, 2792), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(False)'}), "('text', usetex=False)\n", (2770, 2792), False, 'from matplotlib import rc\n'), ((2804, 2841), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 13)', 'dpi': '(150)'}), '(figsize=(15, 13), dpi=150)\n', (2814, 2841), True, 'import matplotlib.pyplot as plt\n'), ((2854, 2902), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(2)'], {'wspace': '(0.45)', 'hspace': '(0.2)'}), '(2, 2, wspace=0.45, hspace=0.2)\n', (2871, 2902), True, 'import matplotlib.gridspec as gridspec\n'), ((3497, 3549), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(file_name + '.png')"], {'bbox_inches': '"""tight"""'}), "(file_name + '.png', bbox_inches='tight')\n", (3508, 3549), True, 'import matplotlib.pyplot as plt\n'), ((3552, 3604), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(file_name + '.pdf')"], {'bbox_inches': '"""tight"""'}), "(file_name + '.pdf', bbox_inches='tight')\n", (3563, 3604), True, 'import matplotlib.pyplot as plt\n'), ((3608, 3618), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3616, 3618), True, 'import matplotlib.pyplot as plt\n'), ((4232, 4249), 'numpy.max', 'np.max', (['uTarget.T'], {}), '(uTarget.T)\n', (4238, 4249), True, 'import numpy as np\n'), ((4262, 4279), 'numpy.min', 'np.min', (['uTarget.T'], {}), '(uTarget.T)\n', (4268, 4279), True, 'import numpy as np\n'), ((4358, 4380), 'numpy.mean', 'np.mean', (['uPred'], {'axis': '(0)'}), '(uPred, axis=0)\n', (4365, 4380), True, 'import numpy as np\n'), ((4746, 4766), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (4757, 4766), True, 'import numpy as np\n'), ((4784, 4812), 'numpy.linspace', 'np.linspace', (['c_min', 'c_max', '(5)'], {}), '(c_min, c_max, 5)\n', (4795, 4812), True, 'import numpy as np\n'), ((5460, 5477), 'numpy.max', 'np.max', (['uPred_var'], {}), '(uPred_var)\n', (5466, 5477), True, 'import numpy as np\n'), ((5649, 5669), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (5660, 5669), True, 'import numpy as np\n'), ((5687, 5711), 'numpy.linspace', 'np.linspace', (['(0)', 'c_max', '(5)'], {}), '(0, c_max, 5)\n', (5698, 5711), True, 'import numpy as np\n'), ((6248, 6268), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (6259, 6268), True, 'import numpy as np\n'), ((6286, 6328), 'numpy.linspace', 'np.linspace', (['c0.norm.vmin', 'c0.norm.vmax', '(5)'], {}), '(c0.norm.vmin, c0.norm.vmax, 5)\n', (6297, 6328), True, 'import numpy as np\n'), ((7048, 7073), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7071, 7073), False, 'import torch\n'), ((7390, 7414), 'utils.burgerLoader.BurgerLoader', 'BurgerLoader', ([], {'dt': 'args.dt'}), '(dt=args.dt)\n', (7402, 7414), False, 'from utils.burgerLoader import BurgerLoader\n'), ((8105, 8127), 'nn.bayesNN.BayesNN', 'BayesNN', (['args', 'denseED'], {}), '(args, denseED)\n', (8112, 8127), False, 'from nn.bayesNN import BayesNN\n'), ((8177, 8240), 'nn.swag.SwagNN', 'SwagNN', (['args', 'bayes_nn'], {'full_cov': '(True)', 'max_models': 'args.swag_max'}), '(args, bayes_nn, full_cov=True, max_models=args.swag_max)\n', (8183, 8240), False, 'from nn.swag import SwagNN\n'), ((8449, 8493), 'numpy.arange', 'np.arange', (['(0)', '(400 * args.dt + 1e-08)', 'args.dt'], {}), '(0, 400 * args.dt + 1e-08, args.dt)\n', (8458, 8493), True, 'import numpy as np\n'), ((8501, 8534), 'numpy.linspace', 'np.linspace', (['x0', 'x1', '(args.nel + 1)'], {}), '(x0, x1, args.nel + 1)\n', (8512, 8534), True, 'import numpy as np\n'), ((2976, 3064), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(4)', '(1)'], {'subplot_spec': 'outer[i]', 'wspace': '(0)', 'hspace': '(0.25)'}), '(4, 1, subplot_spec=outer[i], wspace=0,\n hspace=0.25)\n', (3008, 3064), True, 'import matplotlib.gridspec as gridspec\n'), ((3393, 3417), 'os.path.exists', 'os.path.exists', (['file_dir'], {}), '(file_dir)\n', (3407, 3417), False, 'import os\n'), ((3427, 3448), 'os.makedirs', 'os.makedirs', (['file_dir'], {}), '(file_dir)\n', (3438, 3448), False, 'import os\n'), ((5247, 5291), 'numpy.mean', 'np.mean', (['(1.0 / betas + uPred * uPred)'], {'axis': '(0)'}), '(1.0 / betas + uPred * uPred, axis=0)\n', (5254, 5291), True, 'import numpy as np\n'), ((5979, 6011), 'numpy.abs', 'np.abs', (['(uPred_mean.T - uTarget.T)'], {}), '(uPred_mean.T - uTarget.T)\n', (5985, 6011), True, 'import numpy as np\n'), ((8320, 8335), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8333, 8335), False, 'import torch\n'), ((3135, 3161), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['fig', 'inner[j]'], {}), '(fig, inner[j])\n', (3146, 3161), True, 'import matplotlib.pyplot as plt\n'), ((4926, 4944), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (4938, 4944), True, 'import matplotlib.pyplot as plt\n'), ((5047, 5076), 'numpy.expand_dims', 'np.expand_dims', (['betas'], {'axis': '(1)'}), '(betas, axis=1)\n', (5061, 5076), True, 'import numpy as np\n'), ((5145, 5174), 'numpy.expand_dims', 'np.expand_dims', (['betas'], {'axis': '(2)'}), '(betas, axis=2)\n', (5159, 5174), True, 'import numpy as np\n'), ((5825, 5843), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (5837, 5843), True, 'import matplotlib.pyplot as plt\n'), ((6440, 6458), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (6452, 6458), True, 'import matplotlib.pyplot as plt\n'), ((6993, 7001), 'args.Parser', 'Parser', ([], {}), '()\n', (6999, 7001), False, 'from args import Parser\n'), ((7145, 7170), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7168, 7170), False, 'import torch\n'), ((7461, 7479), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(1)'], {}), '(0, 5, 1)\n', (7470, 7479), True, 'import numpy as np\n'), ((7650, 7886), 'nn.denseEDcirc.DenseED', 'DenseED', ([], {'in_channels': 'args.nic', 'out_channels': 'args.noc', 'blocks': 'args.blocks', 'growth_rate': 'args.growth_rate', 'init_features': 'args.init_features', 'bn_size': 'args.bn_size', 'drop_rate': 'args.drop_rate', 'bottleneck': '(False)', 'out_activation': 'None'}), '(in_channels=args.nic, out_channels=args.noc, blocks=args.blocks,\n growth_rate=args.growth_rate, init_features=args.init_features, bn_size\n =args.bn_size, drop_rate=args.drop_rate, bottleneck=False,\n out_activation=None)\n', (7657, 7886), False, 'from nn.denseEDcirc import DenseED\n'), ((2386, 2419), 'torch.cat', 'torch.cat', (['[input, input0]'], {'dim': '(1)'}), '([input, input0], dim=1)\n', (2395, 2419), False, 'import torch\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 16 08:42:47 2021
@author: <NAME>
@name: roi-aselect.py
"""
import argparse
import csv
import cv2
import os
import sys
import numpy as np
from pathlib import Path
s_default=20
th_default=5.0
threshold=-1
th_under=False
th_over=False
img_sel=[]
cross_col=(255,0,0)
decimals=4
roi=[-1,-1,-1,-1]
isROI=False
def roix(value):
return round(value/img_x1,decimals)
def roiy(value):
return round(value/img_y1,decimals)
def getMarkerCoordinates(w,h,x,y,s):
top=int(y-s/2)
bottom=top+s
left=int(x-s/2)
right=left+s
if top<0:
top=0
if bottom>h-1:
bottom=h-1
if left<0:
left=0
if right>w-1:
right=w-1
return(left,right,top,bottom)
def getThresholdCoordinate(direction,mean,x,y,s):
global img
delta_x=0
delta_y=0
sadjust=int(s/2)
if direction==0:
# Up
sx=x-sadjust
ex=x+sadjust
sy=y-sadjust
ey=y-sadjust+1
delta_y=-1
elif direction==1:
# Right
sx=x+sadjust-1
ex=x+sadjust
sy=y-sadjust
ey=y+sadjust
delta_x=1
elif direction==2:
# Down
sx=x-sadjust
ex=x+sadjust
sy=y+sadjust-1
ey=y+sadjust
delta_y=1
elif direction==3:
# Left
sx=x-sadjust
ex=x-sadjust+1
sy=y-sadjust
ey=y+sadjust
delta_x=-1
# Fix values outside ranges
if sx<0:
sx=0
elif sx>w-1:
sx=w-1
if ex<0:
ex=0
elif ex>w-1:
ex=w-1
if sy<0:
sy=0
elif sy>h-1:
sy=h-1
if ey<0:
ey=0
elif ey>h-1:
ey=h-1
# Fix slicing values
if ex<=sx:
ex=sx+1
if ey<=sy:
ey=sy+1
while True:
line=img[sy:ey,sx:ex]
mean_line=line.mean(axis=(0,1)).mean()
if th_under:
if mean_line<mean-threshold:
break
if th_over:
if mean_line>mean+threshold:
break
sx+=delta_x
ex+=delta_x
sy+=delta_y
ey+=delta_y
if (sx<0 or sx>w-1) or (ex<0 or ex>w-1):
break
if (sy<0 or sy>h-1) or (ey<0 or ey>h-1):
break
if sy<0:
sy=0
elif sy>h-1:
sy=h-1
if sx<0:
sx=0
elif sx>w-1:
sx=w-1
if direction==0 or direction==2:
return sy
else:
return sx
def click_event(event, x, y, flags, param):
global img_clone,img_clone2
global roi,isROI
if x<0 or x>=w:
return
if y<0 or y>=h:
return
if event == cv2.EVENT_LBUTTONDOWN:
(selx1,selx2,sely1,sely2)=getMarkerCoordinates(w,h,x,y,s)
if sely1==sely2 or selx1==selx2:
return
img_sel=img[sely1:sely2,selx1:selx2]
mean_bgr=img_sel.mean(axis=(0,1))
neg_bgr=tuple(255-mean_bgr)
m=mean_bgr.mean()
ysel1=getThresholdCoordinate(0,m,x,y,s)
ysel2=getThresholdCoordinate(2,m,x,y,s)
xsel1=getThresholdCoordinate(3,m,x,y,s)
xsel2=getThresholdCoordinate(1,m,x,y,s)
print("("+str(x)+","+str(y)+")")
# Draw selection area
blk=np.zeros(img.shape,np.uint8)
cv2.rectangle(blk, (xsel1,ysel1), (xsel2,ysel2), neg_bgr,-1)
img_clone=cv2.addWeighted(img,1,blk,0.25,1)
# Draw selection cross
sadjust=int(s/2)
blk=np.zeros(img.shape,np.uint8)
cv2.rectangle(blk,(xsel1,y-sadjust),(xsel2,y+sadjust),cross_col,-1)
cv2.rectangle(blk,(x-sadjust,ysel1),(x+sadjust,ysel2),cross_col,-1)
img_clone2=cv2.addWeighted(img_clone,1,blk,0.25,1)
cv2.imshow('Select ROI',img_clone2)
roi=[xsel1,ysel1,xsel2,ysel2]
isROI=True
elif event == cv2.EVENT_RBUTTONDOWN:
# Clear all markers
img_clone=img.copy()
cv2.imshow('Select ROI', img_clone)
roi=[-1,-1,-1,-1]
isROI=False
parser=argparse.ArgumentParser()
parser.add_argument("-f","--file",type=Path, help="specify the image name",required=True)
parser.add_argument('-t',type=float,help="threshold (default: "+str(th_default)+") as float ",required=False)
parser.add_argument('-s',type=float,help="selection size s x s (default: "+str(s_default)+") as integer ",required=False)
parser.add_argument("-u", action="store_true", help="enable under threshold",required=False)
parser.add_argument("-o", action="store_true", help="enable over threshold",required=False)
args = parser.parse_args()
if args.file:
stem=args.file.stem
fname=str(args.file.name)
filename=str(args.file)
if not os.path.isfile(filename):
print("File "+str(filename)+" not found!")
sys.exit(0)
if args.t != None:
threshold=float(args.t)
if threshold<0 or threshold>255:
threshold=th_default
else:
threshold=th_default
if args.s != None:
s=int(args.s)
else:
s=s_default
if args.u==True:
th_under=True
if args.o==True:
th_over=True
if not True in [th_under,th_over]:
th_under=True
th_over=True
stem+="-"
print("Auto ROI selection")
print("(C) <NAME> 2021")
print("")
print("Image file: "+filename)
print("")
print("Current directory:")
curdir=os.getcwd()
path=Path(curdir)
print(curdir)
print("")
print("1. Select ROI by pressing the mouse left button")
print("2. Remove ROI by pressing the mouse right button")
print("3. Press any key to accept the selection")
img = cv2.imread(filename)
if len(img.shape)==2:
h,w=img.shape
elif len(img.shape)==3:
h,w,ch=img.shape
img_clone=img.copy()
img_clone2=img.copy()
cv2.namedWindow("Select ROI", cv2.WINDOW_NORMAL)
cv2.imshow("Select ROI",img_clone)
cv2.setMouseCallback("Select ROI", click_event)
cv2.waitKey(0)
print("")
if isROI:
print("Saving roi.ini")
# Build and save a ROI file
img_x0=0
img_y0=0
img_x1=w
img_y1=h
crop_x0=roi[0]
crop_x1=roi[2]
crop_y0=roi[1]
crop_y1=roi[3]
roi_x0=roix(crop_x0)
roi_w=roix(crop_x1-crop_x0+1)
roi_y0=roiy(crop_y0)
roi_h=roiy(crop_y1-crop_y0+1)
roilist=[]
roilist.append(["scale","coordinate name","value"])
roilist.append(["original","img_x0",img_x0])
roilist.append(["original","img_x1",img_x1])
roilist.append(["original","img_y0",img_y0])
roilist.append(["original","img_y1",img_y1])
roilist.append(["original","crop_x0",crop_x0])
roilist.append(["original","crop_x1",crop_x1])
roilist.append(["original","crop_y0",crop_y0])
roilist.append(["original","crop_y1",crop_y1])
roilist.append(["normalized","roi_x0",roi_x0])
roilist.append(["normalized","roi_y0",roi_y0])
roilist.append(["normalized","roi_w",roi_w])
roilist.append(["normalized","roi_h",roi_h])
with open("roi.ini","w",newline="") as csvfile:
csvwriter=csv.writer(csvfile,delimiter=";")
for s in roilist:
csvwriter.writerow(s)
# Save ROI images
print("Saving ROI image files")
cv2.imwrite("roi-patch.jpg",img_clone)
cv2.imwrite("roi-selection.jpg",img_clone2)
cv2.imwrite("roi.jpg",img[crop_y0:crop_y1,crop_x0:crop_x1])
else:
print("ROI not selected.")
# All these waitkeys are a hack to get the OpenCV window to close
cv2.waitKey(1)
cv2.destroyAllWindows()
for i in range (1,5):
cv2.waitKey(1)
| [
"cv2.setMouseCallback",
"cv2.imwrite",
"cv2.rectangle",
"argparse.ArgumentParser",
"pathlib.Path",
"csv.writer",
"os.getcwd",
"cv2.imshow",
"os.path.isfile",
"cv2.waitKey",
"numpy.zeros",
"cv2.destroyAllWindows",
"cv2.addWeighted",
"sys.exit",
"cv2.imread",
"cv2.namedWindow"
] | [((4034, 4059), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4057, 4059), False, 'import argparse\n'), ((5288, 5299), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5297, 5299), False, 'import os\n'), ((5305, 5317), 'pathlib.Path', 'Path', (['curdir'], {}), '(curdir)\n', (5309, 5317), False, 'from pathlib import Path\n'), ((5515, 5535), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (5525, 5535), False, 'import cv2\n'), ((5665, 5713), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Select ROI"""', 'cv2.WINDOW_NORMAL'], {}), "('Select ROI', cv2.WINDOW_NORMAL)\n", (5680, 5713), False, 'import cv2\n'), ((5714, 5749), 'cv2.imshow', 'cv2.imshow', (['"""Select ROI"""', 'img_clone'], {}), "('Select ROI', img_clone)\n", (5724, 5749), False, 'import cv2\n'), ((5749, 5796), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""Select ROI"""', 'click_event'], {}), "('Select ROI', click_event)\n", (5769, 5796), False, 'import cv2\n'), ((5797, 5811), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5808, 5811), False, 'import cv2\n'), ((7325, 7339), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7336, 7339), False, 'import cv2\n'), ((7340, 7363), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7361, 7363), False, 'import cv2\n'), ((4698, 4722), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (4712, 4722), False, 'import os\n'), ((4775, 4786), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4783, 4786), False, 'import sys\n'), ((7057, 7096), 'cv2.imwrite', 'cv2.imwrite', (['"""roi-patch.jpg"""', 'img_clone'], {}), "('roi-patch.jpg', img_clone)\n", (7068, 7096), False, 'import cv2\n'), ((7100, 7144), 'cv2.imwrite', 'cv2.imwrite', (['"""roi-selection.jpg"""', 'img_clone2'], {}), "('roi-selection.jpg', img_clone2)\n", (7111, 7144), False, 'import cv2\n'), ((7148, 7209), 'cv2.imwrite', 'cv2.imwrite', (['"""roi.jpg"""', 'img[crop_y0:crop_y1, crop_x0:crop_x1]'], {}), "('roi.jpg', img[crop_y0:crop_y1, crop_x0:crop_x1])\n", (7159, 7209), False, 'import cv2\n'), ((7390, 7404), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7401, 7404), False, 'import cv2\n'), ((3260, 3289), 'numpy.zeros', 'np.zeros', (['img.shape', 'np.uint8'], {}), '(img.shape, np.uint8)\n', (3268, 3289), True, 'import numpy as np\n'), ((3297, 3360), 'cv2.rectangle', 'cv2.rectangle', (['blk', '(xsel1, ysel1)', '(xsel2, ysel2)', 'neg_bgr', '(-1)'], {}), '(blk, (xsel1, ysel1), (xsel2, ysel2), neg_bgr, -1)\n', (3310, 3360), False, 'import cv2\n'), ((3376, 3413), 'cv2.addWeighted', 'cv2.addWeighted', (['img', '(1)', 'blk', '(0.25)', '(1)'], {}), '(img, 1, blk, 0.25, 1)\n', (3391, 3413), False, 'import cv2\n'), ((3487, 3516), 'numpy.zeros', 'np.zeros', (['img.shape', 'np.uint8'], {}), '(img.shape, np.uint8)\n', (3495, 3516), True, 'import numpy as np\n'), ((3524, 3601), 'cv2.rectangle', 'cv2.rectangle', (['blk', '(xsel1, y - sadjust)', '(xsel2, y + sadjust)', 'cross_col', '(-1)'], {}), '(blk, (xsel1, y - sadjust), (xsel2, y + sadjust), cross_col, -1)\n', (3537, 3601), False, 'import cv2\n'), ((3600, 3677), 'cv2.rectangle', 'cv2.rectangle', (['blk', '(x - sadjust, ysel1)', '(x + sadjust, ysel2)', 'cross_col', '(-1)'], {}), '(blk, (x - sadjust, ysel1), (x + sadjust, ysel2), cross_col, -1)\n', (3613, 3677), False, 'import cv2\n'), ((3687, 3730), 'cv2.addWeighted', 'cv2.addWeighted', (['img_clone', '(1)', 'blk', '(0.25)', '(1)'], {}), '(img_clone, 1, blk, 0.25, 1)\n', (3702, 3730), False, 'import cv2\n'), ((3744, 3780), 'cv2.imshow', 'cv2.imshow', (['"""Select ROI"""', 'img_clone2'], {}), "('Select ROI', img_clone2)\n", (3754, 3780), False, 'import cv2\n'), ((6896, 6930), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""";"""'}), "(csvfile, delimiter=';')\n", (6906, 6930), False, 'import csv\n'), ((3944, 3979), 'cv2.imshow', 'cv2.imshow', (['"""Select ROI"""', 'img_clone'], {}), "('Select ROI', img_clone)\n", (3954, 3979), False, 'import cv2\n')] |
import os
import json
import re
import numpy as np
from shutil import copyfile
from keras.optimizers import SGD
import keras.backend as K
from AlphaGo.ai import ProbabilisticPolicyPlayer
import AlphaGo.go as go
from AlphaGo.models.policy import CNNPolicy
from AlphaGo.util import flatten_idx
def _make_training_pair(st, mv, preprocessor):
# Convert move to one-hot
st_tensor = preprocessor.state_to_tensor(st)
mv_tensor = np.zeros((1, st.size * st.size))
mv_tensor[(0, flatten_idx(mv, st.size))] = 1
return (st_tensor, mv_tensor)
def run_n_games(optimizer, learner, opponent, num_games, mock_states=[]):
'''Run num_games games to completion, keeping track of each position and move of the learner.
(Note: learning cannot happen until all games have completed)
'''
board_size = learner.policy.model.input_shape[-1]
states = [go.GameState(size=board_size) for _ in range(num_games)]
learner_net = learner.policy.model
# Allowing injection of a mock state object for testing purposes
if mock_states:
states = mock_states
# Create one list of features (aka state tensors) and one of moves for each game being played.
state_tensors = [[] for _ in range(num_games)]
move_tensors = [[] for _ in range(num_games)]
# List of booleans indicating whether the 'learner' player won.
learner_won = [None] * num_games
# Start all odd games with moves by 'opponent'. Even games will have 'learner' black.
learner_color = [go.BLACK if i % 2 == 0 else go.WHITE for i in range(num_games)]
odd_states = states[1::2]
moves = opponent.get_moves(odd_states)
for st, mv in zip(odd_states, moves):
st.do_move(mv)
current = learner
other = opponent
idxs_to_unfinished_states = {i: states[i] for i in range(num_games)}
while len(idxs_to_unfinished_states) > 0:
# Get next moves by current player for all unfinished states.
moves = current.get_moves(idxs_to_unfinished_states.values())
just_finished = []
# Do each move to each state in order.
for (idx, state), mv in zip(idxs_to_unfinished_states.iteritems(), moves):
# Order is important here. We must get the training pair on the unmodified state before
# updating it with do_move.
is_learnable = current is learner and mv is not go.PASS_MOVE
if is_learnable:
(st_tensor, mv_tensor) = _make_training_pair(state, mv, learner.policy.preprocessor)
state_tensors[idx].append(st_tensor)
move_tensors[idx].append(mv_tensor)
state.do_move(mv)
if state.is_end_of_game:
learner_won[idx] = state.get_winner() == learner_color[idx]
just_finished.append(idx)
# Remove games that have finished from dict.
for idx in just_finished:
del idxs_to_unfinished_states[idx]
# Swap 'current' and 'other' for next turn.
current, other = other, current
# Train on each game's results, setting the learning rate negative to 'unlearn' positions from
# games where the learner lost.
for (st_tensor, mv_tensor, won) in zip(state_tensors, move_tensors, learner_won):
optimizer.lr = K.abs(optimizer.lr) * (+1 if won else -1)
learner_net.train_on_batch(np.concatenate(st_tensor, axis=0),
np.concatenate(mv_tensor, axis=0))
# Return the win ratio.
wins = sum(state.get_winner() == pc for (state, pc) in zip(states, learner_color))
return float(wins) / num_games
def log_loss(y_true, y_pred):
'''Keras 'loss' function for the REINFORCE algorithm, where y_true is the action that was
taken, and updates with the negative gradient will make that action more likely. We use the
negative gradient because keras expects training data to minimize a loss function.
'''
return -y_true * K.log(K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon()))
def run_training(cmd_line_args=None):
import argparse
parser = argparse.ArgumentParser(description='Perform reinforcement learning to improve given policy network. Second phase of pipeline.') # noqa: E501
parser.add_argument("model_json", help="Path to policy model JSON.")
parser.add_argument("initial_weights", help="Path to HDF5 file with inital weights (i.e. result of supervised training).") # noqa: E501
parser.add_argument("out_directory", help="Path to folder where the model params and metadata will be saved after each epoch.") # noqa: E501
parser.add_argument("--learning-rate", help="Keras learning rate (Default: 0.001)", type=float, default=0.001) # noqa: E501
parser.add_argument("--policy-temp", help="Distribution temperature of players using policies (Default: 0.67)", type=float, default=0.67) # noqa: E501
parser.add_argument("--save-every", help="Save policy as a new opponent every n batches (Default: 500)", type=int, default=500) # noqa: E501
parser.add_argument("--record-every", help="Save learner's weights every n batches (Default: 1)", type=int, default=1) # noqa: E501
parser.add_argument("--game-batch", help="Number of games per mini-batch (Default: 20)", type=int, default=20) # noqa: E501
parser.add_argument("--move-limit", help="Maximum number of moves per game", type=int, default=500) # noqa: E501
parser.add_argument("--iterations", help="Number of training batches/iterations (Default: 10000)", type=int, default=10000) # noqa: E501
parser.add_argument("--resume", help="Load latest weights in out_directory and resume", default=False, action="store_true") # noqa: E501
parser.add_argument("--verbose", "-v", help="Turn on verbose mode", default=False, action="store_true") # noqa: E501
# Baseline function (TODO) default lambda state: 0 (receives either file
# paths to JSON and weights or None, in which case it uses default baseline 0)
if cmd_line_args is None:
args = parser.parse_args()
else:
args = parser.parse_args(cmd_line_args)
ZEROTH_FILE = "weights.00000.hdf5"
if args.resume:
if not os.path.exists(os.path.join(args.out_directory, "metadata.json")):
raise ValueError("Cannot resume without existing output directory")
if not os.path.exists(args.out_directory):
if args.verbose:
print("creating output directory {}".format(args.out_directory))
os.makedirs(args.out_directory)
if not args.resume:
# make a copy of weights file, "weights.00000.hdf5" in the output directory
copyfile(args.initial_weights, os.path.join(args.out_directory, ZEROTH_FILE))
if args.verbose:
print("copied {} to {}".format(args.initial_weights,
os.path.join(args.out_directory, ZEROTH_FILE)))
player_weights = ZEROTH_FILE
iter_start = 1
else:
# if resuming, we expect initial_weights to be just a
# "weights.#####.hdf5" file, not a full path
if not re.match(r"weights\.\d{5}\.hdf5", args.initial_weights):
raise ValueError("Expected to resume from weights file with name 'weights.#####.hdf5'")
args.initial_weights = os.path.join(args.out_directory,
os.path.basename(args.initial_weights))
if not os.path.exists(args.initial_weights):
raise ValueError("Cannot resume; weights {} do not exist".format(args.initial_weights))
elif args.verbose:
print("Resuming with weights {}".format(args.initial_weights))
player_weights = os.path.basename(args.initial_weights)
iter_start = 1 + int(player_weights[8:13])
# Set initial conditions
policy = CNNPolicy.load_model(args.model_json)
policy.model.load_weights(args.initial_weights)
player = ProbabilisticPolicyPlayer(policy, temperature=args.policy_temp,
move_limit=args.move_limit)
# different opponents come from simply changing the weights of 'opponent.policy.model'. That
# is, only 'opp_policy' needs to be changed, and 'opponent' will change.
opp_policy = CNNPolicy.load_model(args.model_json)
opponent = ProbabilisticPolicyPlayer(opp_policy, temperature=args.policy_temp,
move_limit=args.move_limit)
if args.verbose:
print("created player and opponent with temperature {}".format(args.policy_temp))
if not args.resume:
metadata = {
"model_file": args.model_json,
"init_weights": args.initial_weights,
"learning_rate": args.learning_rate,
"temperature": args.policy_temp,
"game_batch": args.game_batch,
"opponents": [ZEROTH_FILE], # which weights from which to sample an opponent each batch
"win_ratio": {} # map from player to tuple of (opponent, win ratio) Useful for
# validating in lieu of 'accuracy/loss'
}
else:
with open(os.path.join(args.out_directory, "metadata.json"), "r") as f:
metadata = json.load(f)
# Append args of current run to history of full command args.
metadata["cmd_line_args"] = metadata.get("cmd_line_args", [])
metadata["cmd_line_args"].append(vars(args))
def save_metadata():
with open(os.path.join(args.out_directory, "metadata.json"), "w") as f:
json.dump(metadata, f, sort_keys=True, indent=2)
optimizer = SGD(lr=args.learning_rate)
player.policy.model.compile(loss=log_loss, optimizer=optimizer)
for i_iter in range(iter_start, args.iterations + 1):
# Note that player_weights will only be saved as a file every args.record_every iterations.
# Regardless, player_weights enters into the metadata to keep track of the win ratio over
# time.
player_weights = "weights.%05d.hdf5" % i_iter
# Randomly choose opponent from pool (possibly self), and playing
# game_batch games against them.
opp_weights = np.random.choice(metadata["opponents"])
opp_path = os.path.join(args.out_directory, opp_weights)
# Load new weights into opponent's network, but keep the same opponent object.
opponent.policy.model.load_weights(opp_path)
if args.verbose:
print("Batch {}\tsampled opponent is {}".format(i_iter, opp_weights))
# Run games (and learn from results). Keep track of the win ratio vs each opponent over
# time.
win_ratio = run_n_games(optimizer, player, opponent, args.game_batch)
metadata["win_ratio"][player_weights] = (opp_weights, win_ratio)
# Save intermediate models.
if i_iter % args.record_every == 0:
player.policy.model.save_weights(os.path.join(args.out_directory, player_weights))
# Add player to batch of oppenents once in a while.
if i_iter % args.save_every == 0:
metadata["opponents"].append(player_weights)
save_metadata()
if __name__ == '__main__':
run_training()
| [
"os.path.exists",
"AlphaGo.models.policy.CNNPolicy.load_model",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.random.choice",
"os.path.join",
"re.match",
"keras.backend.epsilon",
"AlphaGo.go.GameState",
"numpy.zeros",
"keras.optimizers.SGD",
"AlphaGo.util.flatten_idx",
"os.path.basename",... | [((436, 468), 'numpy.zeros', 'np.zeros', (['(1, st.size * st.size)'], {}), '((1, st.size * st.size))\n', (444, 468), True, 'import numpy as np\n'), ((4070, 4208), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Perform reinforcement learning to improve given policy network. Second phase of pipeline."""'}), "(description=\n 'Perform reinforcement learning to improve given policy network. Second phase of pipeline.'\n )\n", (4093, 4208), False, 'import argparse\n'), ((7785, 7822), 'AlphaGo.models.policy.CNNPolicy.load_model', 'CNNPolicy.load_model', (['args.model_json'], {}), '(args.model_json)\n', (7805, 7822), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((7888, 7984), 'AlphaGo.ai.ProbabilisticPolicyPlayer', 'ProbabilisticPolicyPlayer', (['policy'], {'temperature': 'args.policy_temp', 'move_limit': 'args.move_limit'}), '(policy, temperature=args.policy_temp, move_limit=\n args.move_limit)\n', (7913, 7984), False, 'from AlphaGo.ai import ProbabilisticPolicyPlayer\n'), ((8211, 8248), 'AlphaGo.models.policy.CNNPolicy.load_model', 'CNNPolicy.load_model', (['args.model_json'], {}), '(args.model_json)\n', (8231, 8248), False, 'from AlphaGo.models.policy import CNNPolicy\n'), ((8264, 8363), 'AlphaGo.ai.ProbabilisticPolicyPlayer', 'ProbabilisticPolicyPlayer', (['opp_policy'], {'temperature': 'args.policy_temp', 'move_limit': 'args.move_limit'}), '(opp_policy, temperature=args.policy_temp,\n move_limit=args.move_limit)\n', (8289, 8363), False, 'from AlphaGo.ai import ProbabilisticPolicyPlayer\n'), ((9553, 9579), 'keras.optimizers.SGD', 'SGD', ([], {'lr': 'args.learning_rate'}), '(lr=args.learning_rate)\n', (9556, 9579), False, 'from keras.optimizers import SGD\n'), ((870, 899), 'AlphaGo.go.GameState', 'go.GameState', ([], {'size': 'board_size'}), '(size=board_size)\n', (882, 899), True, 'import AlphaGo.go as go\n'), ((6313, 6347), 'os.path.exists', 'os.path.exists', (['args.out_directory'], {}), '(args.out_directory)\n', (6327, 6347), False, 'import os\n'), ((6459, 6490), 'os.makedirs', 'os.makedirs', (['args.out_directory'], {}), '(args.out_directory)\n', (6470, 6490), False, 'import os\n'), ((7652, 7690), 'os.path.basename', 'os.path.basename', (['args.initial_weights'], {}), '(args.initial_weights)\n', (7668, 7690), False, 'import os\n'), ((10112, 10151), 'numpy.random.choice', 'np.random.choice', (["metadata['opponents']"], {}), "(metadata['opponents'])\n", (10128, 10151), True, 'import numpy as np\n'), ((10171, 10216), 'os.path.join', 'os.path.join', (['args.out_directory', 'opp_weights'], {}), '(args.out_directory, opp_weights)\n', (10183, 10216), False, 'import os\n'), ((487, 511), 'AlphaGo.util.flatten_idx', 'flatten_idx', (['mv', 'st.size'], {}), '(mv, st.size)\n', (498, 511), False, 'from AlphaGo.util import flatten_idx\n'), ((3272, 3291), 'keras.backend.abs', 'K.abs', (['optimizer.lr'], {}), '(optimizer.lr)\n', (3277, 3291), True, 'import keras.backend as K\n'), ((3349, 3382), 'numpy.concatenate', 'np.concatenate', (['st_tensor'], {'axis': '(0)'}), '(st_tensor, axis=0)\n', (3363, 3382), True, 'import numpy as np\n'), ((3419, 3452), 'numpy.concatenate', 'np.concatenate', (['mv_tensor'], {'axis': '(0)'}), '(mv_tensor, axis=0)\n', (3433, 3452), True, 'import numpy as np\n'), ((6639, 6684), 'os.path.join', 'os.path.join', (['args.out_directory', 'ZEROTH_FILE'], {}), '(args.out_directory, ZEROTH_FILE)\n', (6651, 6684), False, 'import os\n'), ((7067, 7124), 're.match', 're.match', (['"""weights\\\\.\\\\d{5}\\\\.hdf5"""', 'args.initial_weights'], {}), "('weights\\\\.\\\\d{5}\\\\.hdf5', args.initial_weights)\n", (7075, 7124), False, 'import re\n'), ((7332, 7370), 'os.path.basename', 'os.path.basename', (['args.initial_weights'], {}), '(args.initial_weights)\n', (7348, 7370), False, 'import os\n'), ((7387, 7423), 'os.path.exists', 'os.path.exists', (['args.initial_weights'], {}), '(args.initial_weights)\n', (7401, 7423), False, 'import os\n'), ((9174, 9186), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9183, 9186), False, 'import json\n'), ((9487, 9535), 'json.dump', 'json.dump', (['metadata', 'f'], {'sort_keys': '(True)', 'indent': '(2)'}), '(metadata, f, sort_keys=True, indent=2)\n', (9496, 9535), False, 'import json\n'), ((3964, 3975), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (3973, 3975), True, 'import keras.backend as K\n'), ((6169, 6218), 'os.path.join', 'os.path.join', (['args.out_directory', '"""metadata.json"""'], {}), "(args.out_directory, 'metadata.json')\n", (6181, 6218), False, 'import os\n'), ((9089, 9138), 'os.path.join', 'os.path.join', (['args.out_directory', '"""metadata.json"""'], {}), "(args.out_directory, 'metadata.json')\n", (9101, 9138), False, 'import os\n'), ((9413, 9462), 'os.path.join', 'os.path.join', (['args.out_directory', '"""metadata.json"""'], {}), "(args.out_directory, 'metadata.json')\n", (9425, 9462), False, 'import os\n'), ((10855, 10903), 'os.path.join', 'os.path.join', (['args.out_directory', 'player_weights'], {}), '(args.out_directory, player_weights)\n', (10867, 10903), False, 'import os\n'), ((3983, 3994), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (3992, 3994), True, 'import keras.backend as K\n'), ((6819, 6864), 'os.path.join', 'os.path.join', (['args.out_directory', 'ZEROTH_FILE'], {}), '(args.out_directory, ZEROTH_FILE)\n', (6831, 6864), False, 'import os\n')] |
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import itertools
from matplotlib import cm
def plot_color_table(df,
axis=1,
title=None,
rev_index=None,
color='RdYlGn',
prec=1,
):
"""
Creates color coded comparison table from dataframe values (green high, red low)
Parameters
----------
df: pd.DataFrame
DataFrame of values
axis: int, default=1
axis to normalize data upon
title: str
title for table
rev_index: list(str)
list of column names to reverse color coding
color: str, default='RdYlGn'
color palette for table
prec: int, default=1
decimals places to show
Returns
-------
plot of color coded table
"""
labels = df.values
cdf = df.copy()
if axis == 1:
cdf -= cdf.mean(axis=0)
cdf /= cdf.std(axis=0)
else:
cdf = cdf.transpose()
cdf -= cdf.mean(axis=0)
cdf /= cdf.std(axis=0)
cdf = cdf.transpose()
if rev_index:
for i in rev_index:
cdf.iloc[:, i] = 1 - cdf.iloc[:, i]
plt.figure()
if title:
plt.title(title)
sns.heatmap(cdf, cmap='RdYlGn', linewidths=0.5, annot=labels,
fmt=f'0.{prec}f', cbar=False)
plt.xticks(rotation=0)
plt.yticks(rotation=0)
def plot_explained_variance(ev,
type='bar',
toff=0.02,
boff=0.08,
yoff=None,
**kwargs,
):
"""
Plot explained variance of PCA
Parameters
----------
ev: nd.array
explained variance values
type: str, default='bar'
- 'bar': bar plots for cumulative variance
- 'line': line plot for cumulative variance
toff: float
top x offset for cumulative variance values on plot
boff: float
bottom x offset for individual variance values on plot
yoff: float
tottom y offset for individual variance values on plot
**kwargs:
kwargs for plt.plot() if type==line for cumulative variance
Returns
-------
plot of explained variance
"""
ax = pd.Series(100*ev).plot(kind='bar', figsize=(10,5),
color='steelblue', fontsize=13)
ax.set_ylabel('Explained Variance')
ax.set_yticks([0, 20, 40, 60, 80, 100])
ax.set_xlabel('Principal Component')
# Create a list to collect the plt.patches data.
totals = []
# Find the values and append to list.
for i in ax.patches:
totals.append(i.get_height())
# Set individual bar lables using patch list.
total = sum(totals)
yoff_d = {'bar': 0, 'line': 0.2}
yoff_d[type] = yoff_d[type] if yoff is None else yoff
# Set individual bar lables using patch list.
for j, i in enumerate(ax.patches):
# Get_x pulls left or right; get_height pushes up or down.
if j > 0:
ax.text(i.get_x()+boff, i.get_height()+1,
str(round(100*ev[j], 1))+'%', fontsize=11,
color='dimgrey')
ax.text(i.get_x()+toff, 100*np.cumsum(ev)[j]+1+yoff_d[type],
str(round(100*np.cumsum(ev)[j], 1))+'%', fontsize=12,
color='dimgrey')
xlab = [f'PC-{i+1}' for i, _ in enumerate(ax.patches)]
if type == 'bar':
ax2 = pd.Series(np.cumsum(100*ev)).plot(kind='bar', figsize=(10,5),
color='steelblue', fontsize=8, alpha=0.25)
for i in ax2.patches:
totals.append(i.get_height())
ax2.set_xticklabels(xlab, rotation='horizontal')
ax2.xaxis.grid(False)
elif type == 'line':
plt.plot(np.cumsum(100*ev), ':o', ms=8, c='steelblue', alpha=0.75, **kwargs)
ax.xaxis.grid(False)
ax.set_xticklabels(xlab, rotation='horizontal')
def correlation(df, sort_col=None, plot=False):
"""
Find correlation of DataFrame, plot results and/or
return sorted correlations for a single column of
the DataFrame.
Parameters
----------
df: pd.DataFrame
DataFrame of input values
sort_col: str, default=None
column to sort correlations on. If provided,
function returns DataFrame of results
plot: bool, default=False
if True plot correlation matrix
Returns
-------
ret_col: pd.Series
results for one column if sort_col is given
plot of correlation matrix if plot=True
"""
corr = df.corr().abs()
if plot:
# Plot results
fig, ax = plt.subplots(figsize=(12, 10))
ax.matshow(corr)
cmap = cm.get_cmap('coolwarm', 300)
cax = ax.imshow(corr, interpolation="nearest", cmap=cmap)
plt.xticks(range(len(corr.columns)), corr.columns)
plt.yticks(range(len(corr.columns)), corr.columns)
for tick in ax.get_xticklabels():
tick.set_rotation(45)
cax.set_clim([0, 1])
fig.colorbar(cax, ticks=[0, .25, .5, .75, 1])
if ret_col:
return corr.sort_values([col], ascending=False)[col][1:]
def plot_ROC_curve(roc,
fpr,
tpr,
color_palette='muted',
colors=None,
figsize=(10, 10),
title='ROC Curves for Studied Models',
):
"""
Plot ROC curve with AUC for multliple models.
Parameters
----------
roc: dict
dictionary of ROC AUC values
with model names as keys
fpr: dict
dictionary of false positive rate
values with model names as keys
roc: dict
dictionary of true positive rate
values with model names as keys
color_palette: str, defualt='muted'
name of seaborn color palette to use
colors: list(str), default=None
if given, use provided colors instead
of a color palette
figsize: tuple(int), default=(10,10)
figure size
title: str
figure title
Returns
-------
plot of ROC curve
"""
plt.figure(figsize=figsize)
sns.set_palette(color_palette, len(roc))
for m in roc.keys():
plt.plot(fpr[m], tpr[m], alpha=0.7, label=f'{m} (area = {roc[m]:0.3f})')
plt.plot([0, 1], [0, 1], color='dimgrey', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
if title:
plt.title(title)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc='lower right')
def plot_confusion_matrix(cm,
classes,
prec=0,
title='Confusion Matrix',
cbar=True,
cmap=plt.cm.Blues,
):
"""
Plots the confusion matrix with count and normalzied values.
Parameters
----------
cm: nd.array
confusiton matrix count values
classes: list(str)
class labels for x-axis
prec: int, default=0
precision of normalized values
title: str, default='Confusion Matrix'
figure title
cbar: bool, default=True
if True include color bar in figure
cmap: matplotlib colormap, default=plt.cm.Blues
colormap for confusion matrix
Returns
-------
plot of confusion matrix
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
if title:
plt.title(title)
if cbar:
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=0)
plt.yticks(tick_marks, classes)
# Normalize counts to %
cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2. # threshold for text color so it is visible
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, f'{cm[i, j]:.0f}\n({100*cm_norm[i, j]:.{prec}f}%)',
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.grid(False)
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.cm.astype",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylim",
"matplotlib.cm.get_cmap",
"matplotlib.pyplot.xticks",
"seaborn.heatmap",
"matplo... | [((1250, 1262), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1260, 1262), True, 'import matplotlib.pyplot as plt\n'), ((1306, 1402), 'seaborn.heatmap', 'sns.heatmap', (['cdf'], {'cmap': '"""RdYlGn"""', 'linewidths': '(0.5)', 'annot': 'labels', 'fmt': 'f"""0.{prec}f"""', 'cbar': '(False)'}), "(cdf, cmap='RdYlGn', linewidths=0.5, annot=labels, fmt=\n f'0.{prec}f', cbar=False)\n", (1317, 1402), True, 'import seaborn as sns\n'), ((1418, 1440), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(0)'}), '(rotation=0)\n', (1428, 1440), True, 'import matplotlib.pyplot as plt\n'), ((1445, 1467), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'rotation': '(0)'}), '(rotation=0)\n', (1455, 1467), True, 'import matplotlib.pyplot as plt\n'), ((6237, 6264), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (6247, 6264), True, 'import matplotlib.pyplot as plt\n'), ((6420, 6483), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""dimgrey"""', 'lw': '(2)', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='dimgrey', lw=2, linestyle='--')\n", (6428, 6483), True, 'import matplotlib.pyplot as plt\n'), ((6488, 6508), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (6496, 6508), True, 'import matplotlib.pyplot as plt\n'), ((6513, 6534), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (6521, 6534), True, 'import matplotlib.pyplot as plt\n'), ((6578, 6611), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (6588, 6611), True, 'import matplotlib.pyplot as plt\n'), ((6616, 6648), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (6626, 6648), True, 'import matplotlib.pyplot as plt\n'), ((6653, 6682), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (6663, 6682), True, 'import matplotlib.pyplot as plt\n'), ((7519, 7569), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (7529, 7569), True, 'import matplotlib.pyplot as plt\n'), ((7692, 7735), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(0)'}), '(tick_marks, classes, rotation=0)\n', (7702, 7735), True, 'import matplotlib.pyplot as plt\n'), ((7740, 7771), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (7750, 7771), True, 'import matplotlib.pyplot as plt\n'), ((8207, 8225), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8223, 8225), True, 'import matplotlib.pyplot as plt\n'), ((8230, 8254), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Label"""'], {}), "('True Label')\n", (8240, 8254), True, 'import matplotlib.pyplot as plt\n'), ((8259, 8288), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted Label"""'], {}), "('Predicted Label')\n", (8269, 8288), True, 'import matplotlib.pyplot as plt\n'), ((8293, 8308), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (8301, 8308), True, 'import matplotlib.pyplot as plt\n'), ((1285, 1301), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1294, 1301), True, 'import matplotlib.pyplot as plt\n'), ((4732, 4762), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 10)'}), '(figsize=(12, 10))\n', (4744, 4762), True, 'import matplotlib.pyplot as plt\n'), ((4803, 4831), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""coolwarm"""', '(300)'], {}), "('coolwarm', 300)\n", (4814, 4831), False, 'from matplotlib import cm\n'), ((6343, 6415), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr[m]', 'tpr[m]'], {'alpha': '(0.7)', 'label': 'f"""{m} (area = {roc[m]:0.3f})"""'}), "(fpr[m], tpr[m], alpha=0.7, label=f'{m} (area = {roc[m]:0.3f})')\n", (6351, 6415), True, 'import matplotlib.pyplot as plt\n'), ((6557, 6573), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (6566, 6573), True, 'import matplotlib.pyplot as plt\n'), ((7593, 7609), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (7602, 7609), True, 'import matplotlib.pyplot as plt\n'), ((7631, 7645), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (7643, 7645), True, 'import matplotlib.pyplot as plt\n'), ((7815, 7833), 'matplotlib.cm.astype', 'cm.astype', (['"""float"""'], {}), "('float')\n", (7824, 7833), False, 'from matplotlib import cm\n'), ((7880, 7888), 'matplotlib.cm.max', 'cm.max', ([], {}), '()\n', (7886, 7888), False, 'from matplotlib import cm\n'), ((8022, 8180), 'matplotlib.pyplot.text', 'plt.text', (['j', 'i', 'f"""{cm[i, j]:.0f}\n({100 * cm_norm[i, j]:.{prec}f}%)"""'], {'horizontalalignment': '"""center"""', 'color': "('white' if cm[i, j] > thresh else 'black')"}), '(j, i, f"""{cm[i, j]:.0f}\n({100 * cm_norm[i, j]:.{prec}f}%)""",\n horizontalalignment=\'center\', color=\'white\' if cm[i, j] > thresh else\n \'black\')\n', (8030, 8180), True, 'import matplotlib.pyplot as plt\n'), ((2374, 2393), 'pandas.Series', 'pd.Series', (['(100 * ev)'], {}), '(100 * ev)\n', (2383, 2393), True, 'import pandas as pd\n'), ((7836, 7850), 'matplotlib.cm.sum', 'cm.sum', ([], {'axis': '(1)'}), '(axis=1)\n', (7842, 7850), False, 'from matplotlib import cm\n'), ((3871, 3890), 'numpy.cumsum', 'np.cumsum', (['(100 * ev)'], {}), '(100 * ev)\n', (3880, 3890), True, 'import numpy as np\n'), ((3556, 3575), 'numpy.cumsum', 'np.cumsum', (['(100 * ev)'], {}), '(100 * ev)\n', (3565, 3575), True, 'import numpy as np\n'), ((3314, 3327), 'numpy.cumsum', 'np.cumsum', (['ev'], {}), '(ev)\n', (3323, 3327), True, 'import numpy as np\n'), ((3377, 3390), 'numpy.cumsum', 'np.cumsum', (['ev'], {}), '(ev)\n', (3386, 3390), True, 'import numpy as np\n')] |
#Pre-Proccessed Images Data Collection
import cv2
import numpy as np
import os
photo = cv2.imread("path of image") #photo should be of 640x480 pixels and axis must match.
name = input("Enter your name : ")
frames = []
outputs = []
frames.append(photo.flatten())
outputs.append([name])
X = np.array(frames)
y = np.array(outputs)
data = np.hstack([y, X])
f_name = "face_data.npy"
if os.path.exists(f_name):
old = np.load(f_name)
data = np.vstack([old, data])
np.save(f_name, data)
| [
"os.path.exists",
"numpy.hstack",
"numpy.array",
"numpy.vstack",
"numpy.load",
"cv2.imread",
"numpy.save"
] | [((91, 118), 'cv2.imread', 'cv2.imread', (['"""path of image"""'], {}), "('path of image')\n", (101, 118), False, 'import cv2\n'), ((305, 321), 'numpy.array', 'np.array', (['frames'], {}), '(frames)\n', (313, 321), True, 'import numpy as np\n'), ((327, 344), 'numpy.array', 'np.array', (['outputs'], {}), '(outputs)\n', (335, 344), True, 'import numpy as np\n'), ((355, 372), 'numpy.hstack', 'np.hstack', (['[y, X]'], {}), '([y, X])\n', (364, 372), True, 'import numpy as np\n'), ((407, 429), 'os.path.exists', 'os.path.exists', (['f_name'], {}), '(f_name)\n', (421, 429), False, 'import os\n'), ((496, 517), 'numpy.save', 'np.save', (['f_name', 'data'], {}), '(f_name, data)\n', (503, 517), True, 'import numpy as np\n'), ((442, 457), 'numpy.load', 'np.load', (['f_name'], {}), '(f_name)\n', (449, 457), True, 'import numpy as np\n'), ((470, 492), 'numpy.vstack', 'np.vstack', (['[old, data]'], {}), '([old, data])\n', (479, 492), True, 'import numpy as np\n')] |
import unittest
from numba.cuda.testing import CUDATestCase, skip_on_cudasim
from numba.tests.support import captured_stdout
@skip_on_cudasim("cudasim doesn't support cuda import at non-top-level")
class TestMonteCarlo(CUDATestCase):
"""
Test monte-carlo integration
"""
def setUp(self):
# Prevent output from this test showing up when running the test suite
self._captured_stdout = captured_stdout()
self._captured_stdout.__enter__()
super().setUp()
def tearDown(self):
# No exception type, value, or traceback
self._captured_stdout.__exit__(None, None, None)
super().tearDown()
def test_ex_montecarlo(self):
# ex_montecarlo.import.begin
import numba
import numpy as np
from numba import cuda
from numba.cuda.random import (
create_xoroshiro128p_states,
xoroshiro128p_uniform_float32,
)
# ex_montecarlo.import.end
# ex_montecarlo.define.begin
# number of samples, higher will lead to a more accurate answer
nsamps = 1000000
# ex_montecarlo.define.end
# ex_montecarlo.kernel.begin
@cuda.jit
def mc_integrator_kernel(out, rng_states, lower_lim, upper_lim):
"""
kernel to draw random samples and evaluate the function to
be integrated at those sample values
"""
size = len(out)
gid = cuda.grid(1)
if gid < size:
# draw a sample between 0 and 1 on this thread
samp = xoroshiro128p_uniform_float32(rng_states, gid)
# normalize this sample to the limit range
samp = samp * (upper_lim - lower_lim) + lower_lim
# evaluate the function to be
# integrated at the normalized
# value of the sample
y = func(samp)
out[gid] = y
# ex_montecarlo.kernel.end
# ex_montecarlo.callfunc.begin
@cuda.reduce
def sum_reduce(a, b):
return a + b
def mc_integrate(lower_lim, upper_lim, nsamps):
"""
approximate the definite integral of `func` from
`lower_lim` to `upper_lim`
"""
out = cuda.to_device(np.zeros(nsamps, dtype="float32"))
rng_states = create_xoroshiro128p_states(nsamps, seed=42)
# jit the function for use in CUDA kernels
mc_integrator_kernel.forall(nsamps)(
out, rng_states, lower_lim, upper_lim
)
# normalization factor to convert
# to the average: (b - a)/(N - 1)
factor = (upper_lim - lower_lim) / (nsamps - 1)
return sum_reduce(out) * factor
# ex_montecarlo.callfunc.end
# ex_montecarlo.launch.begin
# define a function to integrate
@numba.jit
def func(x):
return 1.0 / x
mc_integrate(1, 2, nsamps) # array(0.6929643, dtype=float32)
mc_integrate(2, 3, nsamps) # array(0.4054021, dtype=float32)
# ex_montecarlo.launch.end
# values computed independently using maple
np.testing.assert_allclose(
mc_integrate(1, 2, nsamps), 0.69315, atol=0.001
)
np.testing.assert_allclose(
mc_integrate(2, 3, nsamps), 0.4055, atol=0.001
)
if __name__ == "__main__":
unittest.main()
| [
"numba.cuda.random.create_xoroshiro128p_states",
"numba.cuda.random.xoroshiro128p_uniform_float32",
"numba.cuda.grid",
"unittest.main",
"numba.tests.support.captured_stdout",
"numpy.zeros",
"numba.cuda.testing.skip_on_cudasim"
] | [((129, 200), 'numba.cuda.testing.skip_on_cudasim', 'skip_on_cudasim', (['"""cudasim doesn\'t support cuda import at non-top-level"""'], {}), '("cudasim doesn\'t support cuda import at non-top-level")\n', (144, 200), False, 'from numba.cuda.testing import CUDATestCase, skip_on_cudasim\n'), ((3475, 3490), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3488, 3490), False, 'import unittest\n'), ((419, 436), 'numba.tests.support.captured_stdout', 'captured_stdout', ([], {}), '()\n', (434, 436), False, 'from numba.tests.support import captured_stdout\n'), ((1479, 1491), 'numba.cuda.grid', 'cuda.grid', (['(1)'], {}), '(1)\n', (1488, 1491), False, 'from numba import cuda\n'), ((2403, 2447), 'numba.cuda.random.create_xoroshiro128p_states', 'create_xoroshiro128p_states', (['nsamps'], {'seed': '(42)'}), '(nsamps, seed=42)\n', (2430, 2447), False, 'from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_uniform_float32\n'), ((1605, 1651), 'numba.cuda.random.xoroshiro128p_uniform_float32', 'xoroshiro128p_uniform_float32', (['rng_states', 'gid'], {}), '(rng_states, gid)\n', (1634, 1651), False, 'from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_uniform_float32\n'), ((2343, 2376), 'numpy.zeros', 'np.zeros', (['nsamps'], {'dtype': '"""float32"""'}), "(nsamps, dtype='float32')\n", (2351, 2376), True, 'import numpy as np\n')] |
from typing import List, Optional, Tuple
from collections import defaultdict
import pickle
import json
from os import path
import click
from tqdm import tqdm
from sklearn.feature_extraction.text import TfidfVectorizer
from flask import Flask, jsonify, request
import numpy as np
from qanta import util
from qanta.dataset import QuizBowlDataset
import nltk
MODEL_PATH = 'tfidf.pickle'
BUZZ_NUM_GUESSES = 5
BUZZ_THRESHOLD = 0.23
W2V_LENGTH = 300
W2V_MODEL = 'full_model.pkl'
W2V_LAMBDA = 0.5
IS_MULTI = True
IS_BERT = True
bert = None
guesser = None
if IS_BERT:
import qanta_bert
bert = qanta_bert.qanta_bert()
def get_topk(question, docs):
arrs = [guesser.answer_docs[d] for d in docs]
example = bert.predict(question, arrs)
if len(example) <= 0 or len(example[0]) <= 0:
return 0
else:
return int(example[0][0]["doc_index"])
def guess_and_buzz(model, question_text, idx, multi) -> Tuple[str, bool, int]:
if multi:
guesses = model.guess([question_text], [idx], BUZZ_NUM_GUESSES)[0]
else:
guesses = model.guess([question_text], [idx], BUZZ_NUM_GUESSES)[0]
scores = [guess[1] for guess in guesses]
#print(scores[0] / sum(scores))
buzz = scores[0] / sum(scores) >= BUZZ_THRESHOLD
buzz = buzz and idx > 1
if multi:
if not IS_BERT:
return [guess[0] for guess in guesses], buzz
else:
topk = get_topk(question_text, [guess[0] for guess in guesses])
buzz = scores[topk] / sum(scores) >= BUZZ_THRESHOLD
return guesses[topk][0], buzz
else:
return guesses[0][0], buzz
def batch_guess_and_buzz(model, questions, idxs, multi) -> List[Tuple[str, bool, int]]:
if multi:
question_guesses = model.guess(questions, idxs, BUZZ_NUM_GUESSES)
else:
question_guesses = model.guess(questions, idxs, BUZZ_NUM_GUESSES)
outputs = []
assert(len(questions) == len(question_guesses))
for i, guesses in tqdm(enumerate(question_guesses)):
scores = [guess[1] for guess in guesses]
buzz = scores[0] / sum(scores) >= BUZZ_THRESHOLD
buzz = buzz and idxs[i] > 1
#print(scores[0] / sum(scores))
if multi:
if not IS_BERT:
outputs.append(([guess[0] for guess in guesses], buzz))
else:
topk = get_topk(questions[i], [guess[0] for guess in guesses])
buzz = scores[topk] / sum(scores) >= BUZZ_THRESHOLD
outputs.append((guesses[topk][0], buzz))
else:
outputs.append((guesses[0][0], buzz))
return outputs
class TfidfGuesser:
def __init__(self):
self.tfidf_vectorizer = None
self.tfidf_matrix = None
self.i_to_ans = None
self.w2v = pickle.load(open(W2V_MODEL, "rb"))
self.answer_docs = defaultdict(str)
if IS_BERT:
with open("wiki_lookup.json", "r") as f:
wiki = json.load(f)
for k in tqdm(wiki):
self.answer_docs[k] += ' ' + wiki[k]["text"]
with open("data/qanta.mapped.2018.04.18.json") as f:
dataset = json.load(f)
raw_questions = dataset["questions"]
GUESSER_TRAIN_FOLD = 'guesstrain'
BUZZER_TRAIN_FOLD = 'buzztrain'
TRAIN_FOLDS = {GUESSER_TRAIN_FOLD, BUZZER_TRAIN_FOLD}
for q in tqdm(raw_questions):
if q['fold'] in TRAIN_FOLDS:
self.answer_docs[q['page']] += ' ' + q['text']
def train(self, training_data) -> None:
questions = training_data[0]
answers = training_data[1]
answer_docs = defaultdict(str)
answer_vecs = defaultdict(lambda: [])
for q, ans in tqdm(zip(questions, answers)):
text = ' '.join(q)
answer_docs[ans] += ' ' + text
for s in q:
for w in nltk.word_tokenize(s):
if w in self.w2v:
answer_vecs[ans].append(self.w2v[w])
x_array = []
y_array = []
self.vecs_array = []
for ans, doc in tqdm(answer_docs.items()):
x_array.append(doc)
y_array.append(ans)
if(len(answer_vecs[ans]) == 0):
vec = np.zeros(W2V_LENGTH)
else:
vec = np.sum(answer_vecs[ans], axis = 0) / len(answer_vecs[ans])
vec = vec / np.linalg.norm(vec)
self.vecs_array.append(vec)
self.vecs_array = np.array(self.vecs_array)
self.i_to_ans = {i: ans for i, ans in enumerate(y_array)}
print("Fitting")
self.tfidf_vectorizer = TfidfVectorizer(
ngram_range=(1, 1), min_df=2, max_df=.9
, stop_words = "english").fit(x_array)
print("Transform")
self.tfidf_matrix = self.tfidf_vectorizer.transform(x_array)
def guess(self, questions: List[str], idxs: Optional[int], max_n_guesses: Optional[int]) -> List[List[Tuple[str, float]]]:
representations = self.tfidf_vectorizer.transform(questions)
guess_matrix = self.tfidf_matrix.dot(representations.T).T
'''
print(len(questions), representations.shape)
print(self.tfidf_matrix.shape)
print(guess_matrix.shape)
print(self.vecs_array.shape)
'''
vecs_represent = []
for q in questions:
temp = []
for w in nltk.word_tokenize(q):
if w in self.w2v:
temp.append(self.w2v[w])
if not temp:
vecs_represent.append(np.zeros(W2V_LENGTH))
else:
temp = np.sum(temp, axis = 0) / len(temp)
temp = temp / np.linalg.norm(temp)
vecs_represent.append(temp)
vecs_represent = np.array(vecs_represent)
self.vecs_array = np.array(self.vecs_array)
for i in range(len(self.vecs_array)):
if(type(self.vecs_array[i]) != np.ndarray):
print(self.vecs_array[i])
vecs_matrix = self.vecs_array.dot(vecs_represent.T).T
guess_matrix = guess_matrix.toarray() + W2V_LAMBDA * vecs_matrix
guess_indices = (-guess_matrix).argsort(axis=1)[:, 0:max_n_guesses]
guesses = []
for i in range(len(questions)):
idxs = guess_indices[i]
guesses.append([(self.i_to_ans[j], guess_matrix[i, j]) for j in idxs])
return guesses
def save(self):
with open(MODEL_PATH, 'wb') as f:
pickle.dump({
'i_to_ans': self.i_to_ans,
'tfidf_vectorizer': self.tfidf_vectorizer,
'tfidf_matrix': self.tfidf_matrix,
'vecs_array': self.vecs_array
}, f)
@classmethod
def load(cls):
with open(MODEL_PATH, 'rb') as f:
params = pickle.load(f)
guesser = TfidfGuesser()
guesser.tfidf_vectorizer = params['tfidf_vectorizer']
guesser.tfidf_matrix = params['tfidf_matrix']
guesser.i_to_ans = params['i_to_ans']
guesser.vecs_array = params['vecs_array']
return guesser
def create_app(enable_batch=True):
global guesser
tfidf_guesser = TfidfGuesser.load()
guesser = tfidf_guesser
app = Flask(__name__)
@app.route('/api/1.0/quizbowl/status', methods=['GET'])
def status():
return jsonify({
'batch': enable_batch,
'batch_size': 200,
'ready': True,
'include_wiki_paragraphs': False
})
@app.route('/api/1.0/quizbowl/act', methods=['POST'])
def act():
idx = request.json['question_idx']
question = request.json['text']
guess, buzz = guess_and_buzz(tfidf_guesser, question, idx, IS_MULTI)
return jsonify({'guess': guess, 'buzz': True if buzz else False})
@app.route('/api/1.0/quizbowl/batch_act', methods=['POST'])
def batch_act():
idxs = [q['question_idx'] for q in request.json['questions']]
questions = [q['text'] for q in request.json['questions']]
return jsonify([
{'guess': guess, 'buzz': True if buzz else False}
for guess, buzz in batch_guess_and_buzz(tfidf_guesser, questions, idxs, IS_MULTI)
])
return app
@click.group()
def cli():
pass
@cli.command()
@click.option('--host', default='0.0.0.0')
@click.option('--port', default=4861)
@click.option('--disable-batch', default=False, is_flag=True)
def web(host, port, disable_batch):
"""
Start web server wrapping tfidf model
"""
app = create_app(enable_batch=not disable_batch)
app.run(host=host, port=port, debug=False)
@cli.command()
def train():
"""
Train the tfidf model, requires downloaded data and saves to models/
"""
dataset = QuizBowlDataset(guesser_train=True)
tfidf_guesser = TfidfGuesser()
tfidf_guesser.train(dataset.training_data())
tfidf_guesser.save()
@cli.command()
@click.option('--local-qanta-prefix', default='data/')
@click.option('--retrieve-paragraphs', default=False, is_flag=True)
def download(local_qanta_prefix, retrieve_paragraphs):
"""
Run once to download qanta data to data/. Runs inside the docker container, but results save to host machine
"""
util.download(local_qanta_prefix, retrieve_paragraphs)
if __name__ == '__main__':
cli()
| [
"pickle.dump",
"nltk.word_tokenize",
"flask.Flask",
"click.group",
"click.option",
"flask.jsonify",
"qanta.dataset.QuizBowlDataset",
"tqdm.tqdm",
"pickle.load",
"numpy.array",
"numpy.zeros",
"qanta_bert.qanta_bert",
"collections.defaultdict",
"sklearn.feature_extraction.text.TfidfVectorize... | [((8331, 8344), 'click.group', 'click.group', ([], {}), '()\n', (8342, 8344), False, 'import click\n'), ((8383, 8424), 'click.option', 'click.option', (['"""--host"""'], {'default': '"""0.0.0.0"""'}), "('--host', default='0.0.0.0')\n", (8395, 8424), False, 'import click\n'), ((8426, 8462), 'click.option', 'click.option', (['"""--port"""'], {'default': '(4861)'}), "('--port', default=4861)\n", (8438, 8462), False, 'import click\n'), ((8464, 8524), 'click.option', 'click.option', (['"""--disable-batch"""'], {'default': '(False)', 'is_flag': '(True)'}), "('--disable-batch', default=False, is_flag=True)\n", (8476, 8524), False, 'import click\n'), ((9015, 9068), 'click.option', 'click.option', (['"""--local-qanta-prefix"""'], {'default': '"""data/"""'}), "('--local-qanta-prefix', default='data/')\n", (9027, 9068), False, 'import click\n'), ((9070, 9136), 'click.option', 'click.option', (['"""--retrieve-paragraphs"""'], {'default': '(False)', 'is_flag': '(True)'}), "('--retrieve-paragraphs', default=False, is_flag=True)\n", (9082, 9136), False, 'import click\n'), ((598, 621), 'qanta_bert.qanta_bert', 'qanta_bert.qanta_bert', ([], {}), '()\n', (619, 621), False, 'import qanta_bert\n'), ((7320, 7335), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (7325, 7335), False, 'from flask import Flask, jsonify, request\n'), ((8852, 8887), 'qanta.dataset.QuizBowlDataset', 'QuizBowlDataset', ([], {'guesser_train': '(True)'}), '(guesser_train=True)\n', (8867, 8887), False, 'from qanta.dataset import QuizBowlDataset\n'), ((9325, 9379), 'qanta.util.download', 'util.download', (['local_qanta_prefix', 'retrieve_paragraphs'], {}), '(local_qanta_prefix, retrieve_paragraphs)\n', (9338, 9379), False, 'from qanta import util\n'), ((2843, 2859), 'collections.defaultdict', 'defaultdict', (['str'], {}), '(str)\n', (2854, 2859), False, 'from collections import defaultdict\n'), ((3694, 3710), 'collections.defaultdict', 'defaultdict', (['str'], {}), '(str)\n', (3705, 3710), False, 'from collections import defaultdict\n'), ((3733, 3757), 'collections.defaultdict', 'defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (3744, 3757), False, 'from collections import defaultdict\n'), ((4543, 4568), 'numpy.array', 'np.array', (['self.vecs_array'], {}), '(self.vecs_array)\n', (4551, 4568), True, 'import numpy as np\n'), ((5836, 5860), 'numpy.array', 'np.array', (['vecs_represent'], {}), '(vecs_represent)\n', (5844, 5860), True, 'import numpy as np\n'), ((5887, 5912), 'numpy.array', 'np.array', (['self.vecs_array'], {}), '(self.vecs_array)\n', (5895, 5912), True, 'import numpy as np\n'), ((7430, 7534), 'flask.jsonify', 'jsonify', (["{'batch': enable_batch, 'batch_size': 200, 'ready': True,\n 'include_wiki_paragraphs': False}"], {}), "({'batch': enable_batch, 'batch_size': 200, 'ready': True,\n 'include_wiki_paragraphs': False})\n", (7437, 7534), False, 'from flask import Flask, jsonify, request\n'), ((7838, 7896), 'flask.jsonify', 'jsonify', (["{'guess': guess, 'buzz': True if buzz else False}"], {}), "({'guess': guess, 'buzz': True if buzz else False})\n", (7845, 7896), False, 'from flask import Flask, jsonify, request\n'), ((2990, 3000), 'tqdm.tqdm', 'tqdm', (['wiki'], {}), '(wiki)\n', (2994, 3000), False, 'from tqdm import tqdm\n'), ((5453, 5474), 'nltk.word_tokenize', 'nltk.word_tokenize', (['q'], {}), '(q)\n', (5471, 5474), False, 'import nltk\n'), ((6547, 6709), 'pickle.dump', 'pickle.dump', (["{'i_to_ans': self.i_to_ans, 'tfidf_vectorizer': self.tfidf_vectorizer,\n 'tfidf_matrix': self.tfidf_matrix, 'vecs_array': self.vecs_array}", 'f'], {}), "({'i_to_ans': self.i_to_ans, 'tfidf_vectorizer': self.\n tfidf_vectorizer, 'tfidf_matrix': self.tfidf_matrix, 'vecs_array': self\n .vecs_array}, f)\n", (6558, 6709), False, 'import pickle\n'), ((6878, 6892), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6889, 6892), False, 'import pickle\n'), ((2956, 2968), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2965, 2968), False, 'import json\n'), ((3155, 3167), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3164, 3167), False, 'import json\n'), ((3414, 3433), 'tqdm.tqdm', 'tqdm', (['raw_questions'], {}), '(raw_questions)\n', (3418, 3433), False, 'from tqdm import tqdm\n'), ((3933, 3954), 'nltk.word_tokenize', 'nltk.word_tokenize', (['s'], {}), '(s)\n', (3951, 3954), False, 'import nltk\n'), ((4308, 4328), 'numpy.zeros', 'np.zeros', (['W2V_LENGTH'], {}), '(W2V_LENGTH)\n', (4316, 4328), True, 'import numpy as np\n'), ((4692, 4771), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'ngram_range': '(1, 1)', 'min_df': '(2)', 'max_df': '(0.9)', 'stop_words': '"""english"""'}), "(ngram_range=(1, 1), min_df=2, max_df=0.9, stop_words='english')\n", (4707, 4771), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((4369, 4401), 'numpy.sum', 'np.sum', (['answer_vecs[ans]'], {'axis': '(0)'}), '(answer_vecs[ans], axis=0)\n', (4375, 4401), True, 'import numpy as np\n'), ((4456, 4475), 'numpy.linalg.norm', 'np.linalg.norm', (['vec'], {}), '(vec)\n', (4470, 4475), True, 'import numpy as np\n'), ((5618, 5638), 'numpy.zeros', 'np.zeros', (['W2V_LENGTH'], {}), '(W2V_LENGTH)\n', (5626, 5638), True, 'import numpy as np\n'), ((5681, 5701), 'numpy.sum', 'np.sum', (['temp'], {'axis': '(0)'}), '(temp, axis=0)\n', (5687, 5701), True, 'import numpy as np\n'), ((5746, 5766), 'numpy.linalg.norm', 'np.linalg.norm', (['temp'], {}), '(temp)\n', (5760, 5766), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import csv
import sys
import os, os.path
import numpy as np
from datetime import datetime as dt
from scipy import optimize
from scripts import signals as sig
from scripts import fft_estimator
from scripts import optimizing
from scripts import utility
from scripts import crlb
from scripts import cfg
try:
task = sys.argv[1]
except Exception as e:
print("No input task provided, exiting. \n Usage: python main.py <task>")
exit(1)
SNR_dBs =[-10, 0, 10, 20, 30, 40, 50, 60]
FFT_Ks = [10, 12, 14, 16, 18, 20]
n = len(SNR_dBs)
m = len(FFT_Ks)
N = 100 # Amount of samples to generate when estimating variance
# Generate unique filename for data file output
run_number = len([name for name in os.listdir('./data') if os.path.isfile('./data/' + name)])
if task == 'a':
filename = 'data/part_a_run_' + str(run_number) + '_N_' + str(N) + '.csv'
with open(filename, 'ab') as file:
writer = csv.writer(file, delimiter=' ')
total_time_begin = dt.now()
for i in range(m):
K = FFT_Ks[i]
M = 2**K
for j in range(n):
SNR_dB = SNR_dBs[j]
w_estimates = np.zeros(N)
phi_estimates = np.zeros(N)
status_bar_progress = 0
run_time_begin = dt.now()
for k in range(N):
x_d = sig.x_discrete(SNR_dB)
omega_hat, phi_hat, _, _ = fft_estimator.estimator(x_d, M)
w_estimates[k] = omega_hat
phi_estimates[k] = phi_hat
status_bar_progress = utility.print_status_bar(k, status_bar_progress, N)
mean_f = np.mean(w_estimates) / (2*np.pi)
mean_phi = np.mean(phi_estimates)
var_f = np.var(w_estimates)
var_phi = np.var(phi_estimates)
crlb_f = crlb.omega(SNR_dB)
crlb_phi = crlb.phi(SNR_dB)
run_time_end = dt.now()
print("")
utility.print_execution_time(run_time_begin, run_time_end)
f_estimate_valid = True
phi_estimate_valid = True
if var_f < crlb_f:
f_estimate_valid = False
print("Variance for frequency lower than CRLB!")
if var_phi < crlb_phi:
phi_estimate_valid = False
print("Variance for phi lower than CRLB!")
writer.writerow([SNR_dB, K, crlb_f, var_f, f_estimate_valid, crlb_phi, var_phi, phi_estimate_valid, mean_f, mean_phi])
print("CONFIG | SNR [dB]: {}, M: 2^{}, true frequency: {}, true phase: {}".format(SNR_dB, K, cfg.f0, cfg.phi))
print("FREQUENCY | estimated mean: {}, estimated variance: {}, crlb: {}".format(mean_f, var_f, crlb_f))
print("PHASE | estimated mean: {}, estimated variance: {}, crlb: {}".format(mean_phi, var_phi, crlb_phi))
print("")
total_time_end = dt.now()
utility.print_execution_time(total_time_begin, total_time_end)
if task == 'b':
filename = 'data/part_b_run_' + str(run_number) + '_N_' + str(N) + '.csv'
with open(filename, 'ab') as file:
writer = csv.writer(file, delimiter=' ')
M = 2**10
total_time_begin = dt.now()
for SNR_dB in SNR_dBs:
w_estimates = np.zeros(N)
phi_estimates = np.zeros(N)
status_bar_progress = 0
run_time_begin = dt.now()
for i in range(N):
x_d = sig.x_discrete(SNR_dB)
omega_hat, phi_hat, _, _ = fft_estimator.estimator(x_d, M)
omega_opt = optimize.minimize(optimizing.frequency_objective_function, omega_hat, method="Nelder-Mead", args=(M, x_d, phi_hat))
phase_opt = optimize.minimize(optimizing.phase_objective_function, phi_hat, method="Nelder-Mead", args=(x_d, omega_hat))
w_estimates[i] = omega_opt.x[0]
phi_estimates[i] = phase_opt.x[0]
status_bar_progress = utility.print_status_bar(i, status_bar_progress, N)
run_time_end = dt.now()
print("")
utility.print_execution_time(run_time_begin, run_time_end)
mean_f = np.mean(w_estimates) / (2*np.pi)
mean_phi = np.mean(phi_estimates)
var_f = np.var(w_estimates)
var_phi = np.var(phi_estimates)
crlb_f = crlb.omega(SNR_dB)
crlb_phi = crlb.phi(SNR_dB)
f_estimate_valid = True
phi_estimate_valid = True
if var_f < crlb_f:
f_estimate_valid = False
print("Variance for f lower than CRLB!")
if var_phi < crlb_phi:
phi_estimate_valid = False
print("Variance for phi lower than CRLB!")
writer.writerow([SNR_dB, 10, crlb_f, var_f, f_estimate_valid, crlb_phi, var_phi, phi_estimate_valid, mean_f, mean_phi])
print("CONFIG | SNR [dB]: {}, M: 2^{}, true f: {}, true phase: {}".format(SNR_dB, 10, cfg.f0, cfg.phi))
print("FREQUENCY | estimated mean: {}, estimated variance: {}, crlb: {}".format(mean_f, var_f, crlb_f))
print("PHASE | estimated mean: {}, estimated variance: {}, crlb: {}".format(mean_phi, var_phi, crlb_phi))
print("")
total_time_end = dt.now()
utility.print_execution_time(total_time_begin, total_time_end) | [
"numpy.mean",
"scripts.utility.print_execution_time",
"os.listdir",
"csv.writer",
"scripts.fft_estimator.estimator",
"scipy.optimize.minimize",
"os.path.isfile",
"datetime.datetime.now",
"numpy.zeros",
"scripts.crlb.phi",
"scripts.utility.print_status_bar",
"scripts.signals.x_discrete",
"num... | [((940, 971), 'csv.writer', 'csv.writer', (['file'], {'delimiter': '""" """'}), "(file, delimiter=' ')\n", (950, 971), False, 'import csv\n'), ((1000, 1008), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (1006, 1008), True, 'from datetime import datetime as dt\n'), ((3071, 3079), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (3077, 3079), True, 'from datetime import datetime as dt\n'), ((3088, 3150), 'scripts.utility.print_execution_time', 'utility.print_execution_time', (['total_time_begin', 'total_time_end'], {}), '(total_time_begin, total_time_end)\n', (3116, 3150), False, 'from scripts import utility\n'), ((3302, 3333), 'csv.writer', 'csv.writer', (['file'], {'delimiter': '""" """'}), "(file, delimiter=' ')\n", (3312, 3333), False, 'import csv\n'), ((3388, 3396), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (3394, 3396), True, 'from datetime import datetime as dt\n'), ((5494, 5502), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (5500, 5502), True, 'from datetime import datetime as dt\n'), ((5511, 5573), 'scripts.utility.print_execution_time', 'utility.print_execution_time', (['total_time_begin', 'total_time_end'], {}), '(total_time_begin, total_time_end)\n', (5539, 5573), False, 'from scripts import utility\n'), ((730, 750), 'os.listdir', 'os.listdir', (['"""./data"""'], {}), "('./data')\n", (740, 750), False, 'import os, os.path\n'), ((754, 786), 'os.path.isfile', 'os.path.isfile', (["('./data/' + name)"], {}), "('./data/' + name)\n", (768, 786), False, 'import os, os.path\n'), ((3455, 3466), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (3463, 3466), True, 'import numpy as np\n'), ((3495, 3506), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (3503, 3506), True, 'import numpy as np\n'), ((3573, 3581), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (3579, 3581), True, 'from datetime import datetime as dt\n'), ((4236, 4244), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (4242, 4244), True, 'from datetime import datetime as dt\n'), ((4279, 4337), 'scripts.utility.print_execution_time', 'utility.print_execution_time', (['run_time_begin', 'run_time_end'], {}), '(run_time_begin, run_time_end)\n', (4307, 4337), False, 'from scripts import utility\n'), ((4416, 4438), 'numpy.mean', 'np.mean', (['phi_estimates'], {}), '(phi_estimates)\n', (4423, 4438), True, 'import numpy as np\n'), ((4460, 4479), 'numpy.var', 'np.var', (['w_estimates'], {}), '(w_estimates)\n', (4466, 4479), True, 'import numpy as np\n'), ((4502, 4523), 'numpy.var', 'np.var', (['phi_estimates'], {}), '(phi_estimates)\n', (4508, 4523), True, 'import numpy as np\n'), ((4546, 4564), 'scripts.crlb.omega', 'crlb.omega', (['SNR_dB'], {}), '(SNR_dB)\n', (4556, 4564), False, 'from scripts import crlb\n'), ((4588, 4604), 'scripts.crlb.phi', 'crlb.phi', (['SNR_dB'], {}), '(SNR_dB)\n', (4596, 4604), False, 'from scripts import crlb\n'), ((1182, 1193), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1190, 1193), True, 'import numpy as np\n'), ((1226, 1237), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1234, 1237), True, 'import numpy as np\n'), ((1312, 1320), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (1318, 1320), True, 'from datetime import datetime as dt\n'), ((1761, 1783), 'numpy.mean', 'np.mean', (['phi_estimates'], {}), '(phi_estimates)\n', (1768, 1783), True, 'import numpy as np\n'), ((1809, 1828), 'numpy.var', 'np.var', (['w_estimates'], {}), '(w_estimates)\n', (1815, 1828), True, 'import numpy as np\n'), ((1855, 1876), 'numpy.var', 'np.var', (['phi_estimates'], {}), '(phi_estimates)\n', (1861, 1876), True, 'import numpy as np\n'), ((1903, 1921), 'scripts.crlb.omega', 'crlb.omega', (['SNR_dB'], {}), '(SNR_dB)\n', (1913, 1921), False, 'from scripts import crlb\n'), ((1949, 1965), 'scripts.crlb.phi', 'crlb.phi', (['SNR_dB'], {}), '(SNR_dB)\n', (1957, 1965), False, 'from scripts import crlb\n'), ((1998, 2006), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (2004, 2006), True, 'from datetime import datetime as dt\n'), ((2049, 2107), 'scripts.utility.print_execution_time', 'utility.print_execution_time', (['run_time_begin', 'run_time_end'], {}), '(run_time_begin, run_time_end)\n', (2077, 2107), False, 'from scripts import utility\n'), ((3637, 3659), 'scripts.signals.x_discrete', 'sig.x_discrete', (['SNR_dB'], {}), '(SNR_dB)\n', (3651, 3659), True, 'from scripts import signals as sig\n'), ((3704, 3735), 'scripts.fft_estimator.estimator', 'fft_estimator.estimator', (['x_d', 'M'], {}), '(x_d, M)\n', (3727, 3735), False, 'from scripts import fft_estimator\n'), ((3765, 3884), 'scipy.optimize.minimize', 'optimize.minimize', (['optimizing.frequency_objective_function', 'omega_hat'], {'method': '"""Nelder-Mead"""', 'args': '(M, x_d, phi_hat)'}), "(optimizing.frequency_objective_function, omega_hat,\n method='Nelder-Mead', args=(M, x_d, phi_hat))\n", (3782, 3884), False, 'from scipy import optimize\n'), ((3909, 4022), 'scipy.optimize.minimize', 'optimize.minimize', (['optimizing.phase_objective_function', 'phi_hat'], {'method': '"""Nelder-Mead"""', 'args': '(x_d, omega_hat)'}), "(optimizing.phase_objective_function, phi_hat, method=\n 'Nelder-Mead', args=(x_d, omega_hat))\n", (3926, 4022), False, 'from scipy import optimize\n'), ((4156, 4207), 'scripts.utility.print_status_bar', 'utility.print_status_bar', (['i', 'status_bar_progress', 'N'], {}), '(i, status_bar_progress, N)\n', (4180, 4207), False, 'from scripts import utility\n'), ((4360, 4380), 'numpy.mean', 'np.mean', (['w_estimates'], {}), '(w_estimates)\n', (4367, 4380), True, 'import numpy as np\n'), ((1382, 1404), 'scripts.signals.x_discrete', 'sig.x_discrete', (['SNR_dB'], {}), '(SNR_dB)\n', (1396, 1404), True, 'from scripts import signals as sig\n'), ((1453, 1484), 'scripts.fft_estimator.estimator', 'fft_estimator.estimator', (['x_d', 'M'], {}), '(x_d, M)\n', (1476, 1484), False, 'from scripts import fft_estimator\n'), ((1623, 1674), 'scripts.utility.print_status_bar', 'utility.print_status_bar', (['k', 'status_bar_progress', 'N'], {}), '(k, status_bar_progress, N)\n', (1647, 1674), False, 'from scripts import utility\n'), ((1701, 1721), 'numpy.mean', 'np.mean', (['w_estimates'], {}), '(w_estimates)\n', (1708, 1721), True, 'import numpy as np\n')] |
from __future__ import annotations
import math
from collections import abc
from functools import partial
from typing import Callable, Iterable, List, Mapping, Optional, Sequence, Tuple, TypeVar, Union, cast
import numpy as np
import tensorflow as tf
from typing_extensions import TypeGuard
T1 = TypeVar("T1")
T2 = TypeVar("T2")
ContainerGeneric = Union[Mapping[str, "ContainerGeneric[T1]"], Sequence["ContainerGeneric[T1]"], T1]
ContainerArrays = ContainerGeneric[Union[np.ndarray, np.number]]
ContainerTensors = ContainerGeneric[Union[tf.Tensor, tf.SparseTensor]]
ContainerAnyTensors = TypeVar("ContainerAnyTensors", ContainerTensors, ContainerArrays)
ArrayLike = Union[np.ndarray, np.number, tf.Tensor, tf.SparseTensor, float]
ComparisonResultT = Tuple[bool, Optional[List["int | str"]]]
DEFAULT_ABSOLUTE_TOLERANCE = 1e-4
def container_fmap(f: Callable[[T1], T2], elements: ContainerGeneric[T1]) -> ContainerGeneric[T2]:
if isinstance(elements, abc.Mapping):
return {key: container_fmap(f, val) for key, val in elements.items()}
if isinstance(elements, tuple):
return tuple(map(partial(container_fmap, f), elements))
if isinstance(elements, abc.Sequence) and not isinstance(elements, (str, bytes)):
return [container_fmap(f, elem) for elem in elements]
return f(elements)
def _is_integer_dtype(dtype: np.dtype | tf.dtypes.DType) -> bool:
if isinstance(dtype, np.dtype):
return np.issubdtype(dtype, np.integer)
else:
return dtype.is_integer
def tensor_equality(
tensor1: ArrayLike,
tensor2: ArrayLike,
atol: float = DEFAULT_ABSOLUTE_TOLERANCE,
test_env: Optional[tf.test.TestCase] = None,
) -> bool:
"""
Checks if two tf tensors/numpy array are equal. For integral tensors checks for exact
equality. For float tensors checks for approximate equality. It expects
both tensors to have the same dtype and same shape.
For tf tensors assumes they are eager tensors. Graph tensors need to be evaluated in session
to numpy array. If you pass in tf.test.TestCase it will work with graph tensors that can
be evaluated by it.
It expects the type to be same shape with one exception, being that scalers can be compared against tensors.
"""
if isinstance(tensor1, tf.SparseTensor) or isinstance(tensor2, tf.SparseTensor):
tensor1 = tf.sparse.to_dense(tensor1)
tensor2 = tf.sparse.to_dense(tensor2)
if isinstance(tensor1, float) and isinstance(tensor2, float):
return math.isclose(tensor1, tensor2, abs_tol=atol)
if isinstance(tensor1, (int, float)):
if isinstance(tensor2, tf.Tensor):
tensor1 = tf.constant(tensor1)
else:
tensor1 = np.array(tensor1)
if isinstance(tensor2, (int, float)):
if isinstance(tensor1, tf.Tensor):
tensor2 = tf.constant(tensor2)
else:
tensor2 = np.array(tensor2)
assert not isinstance(tensor1, (int, float))
assert not isinstance(tensor2, (int, float))
if type(tensor1) != type(tensor2):
return False
if tensor1.dtype != tensor2.dtype:
return False
if tensor1.shape != tensor2.shape:
return False
if test_env:
assert isinstance(tensor1, tf.Tensor)
array1 = test_env.evaluate(tensor1)
array2 = test_env.evaluate(tensor2)
else:
if isinstance(tensor1, tf.Tensor) and isinstance(tensor2, tf.Tensor):
array1 = tensor1.numpy()
array2 = tensor2.numpy()
else:
assert isinstance(tensor1, (np.ndarray, np.number))
assert isinstance(tensor2, (np.ndarray, np.number))
array1 = tensor1
array2 = tensor2
if _is_integer_dtype(array1.dtype):
return bool(np.all(array1 == array2))
else:
comparisons = np.isclose(array1, array2, atol=atol, equal_nan=True)
return bool(comparisons.all())
def extract_nested_key(data: ContainerGeneric[T1], key_path: Iterable[int | str]) -> T1:
result = data
for key in key_path:
if isinstance(key, int):
assert isinstance(result, abc.Sequence)
result = result[key]
else:
assert isinstance(result, abc.Mapping)
result = result[key]
return cast(T1, result)
def _tensor_collection_equality(
tensors1: ContainerArrays,
tensors2: ContainerArrays,
atol: float = DEFAULT_ABSOLUTE_TOLERANCE,
debug_path: Optional[List[str | int]] = None,
) -> ComparisonResultT:
if debug_path is None:
debug_path = []
if not (isinstance(tensors1, type(tensors2)) or isinstance(tensors2, type(tensors1))):
return (False, debug_path)
# The ors are here for the type checker. While previous line guarantees that the types are the same
# the type checker is unable to use that.
if not isinstance(tensors1, (np.ndarray, dict, list, tuple, np.number)) or not isinstance(
tensors2, (np.ndarray, dict, list, tuple, np.number)
):
raise TypeError(f"Unexpected type for tensors1: {type(tensors1)}")
if isinstance(tensors1, (np.ndarray, np.number)) or isinstance(tensors2, (np.ndarray, np.number)):
result = tensor_equality(tensors1, tensors2, atol=atol)
if result:
return (result, None)
else:
return (result, debug_path)
if len(tensors1) != len(tensors2):
return (False, debug_path)
if isinstance(tensors1, dict):
assert isinstance(tensors2, dict)
for key, value in tensors1.items():
if key not in tensors2:
return (False, debug_path + [key])
key_result, key_debug_path = _tensor_collection_equality(
value, tensors2[key], atol=atol, debug_path=debug_path + [key]
)
# Short circuit if possible.
if not key_result:
return (key_result, key_debug_path)
return (True, None)
assert isinstance(tensors2, (list, tuple))
for key, (tensor1, tensor2) in enumerate(zip(tensors1, tensors2)):
debug_extension: List[int | str] = [key]
key_result, key_debug_path = _tensor_collection_equality(
tensor1, tensor2, atol=atol, debug_path=debug_path + debug_extension
)
if not key_result:
return (key_result, key_debug_path)
return (True, None)
def evaluate_tensors(tensors: ContainerTensors, test_env: Optional[tf.test.TestCase]) -> ContainerArrays:
if test_env:
return test_env.evaluate(tensors)
else:
return container_fmap(lambda tensor: tensor.numpy(), tensors) # type: ignore
def _log_tensor_equality_mismatch(
arrays1: ContainerArrays, arrays2: ContainerArrays, debug_path: Iterable[str | int]
) -> None:
debug_path_str = ",".join(map(str, debug_path))
array1: np.ndarray | np.number
array2: np.ndarray | np.number
if debug_path_str != "":
print(f"Tensor Collection mismatch occurred at {debug_path_str}")
array1 = extract_nested_key(arrays1, debug_path) # type: ignore
array2 = extract_nested_key(arrays2, debug_path) # type: ignore
else:
assert isinstance(arrays1, (np.ndarray, np.number))
assert isinstance(arrays2, (np.ndarray, np.number))
array1 = arrays1
array2 = arrays2
print(f"Mismatched tensors")
print(array1)
print(array2)
shape_match = array1.shape == array2.shape
dtype_match = array1.dtype == array2.dtype
if not shape_match:
print(f"Mismatch shapes")
print(f"Shape 1: {array1.shape} Shape 2: {array2.shape}")
if not dtype_match:
print(f"Mismatch dtypes")
print(f"Dtype 1: {array1.dtype} Dtype 2: {array2.dtype}")
if shape_match and dtype_match:
diff = np.absolute(array1 - array2) # type: ignore
print(f"Maximum absolute difference: ", diff.max())
print(f"Index Maximum Difference: ", np.unravel_index(diff.argmax(), diff.shape))
def check_container_tensors(tensors: ContainerTensors | ContainerArrays) -> TypeGuard[ContainerTensors]:
if isinstance(tensors, (abc.Mapping, abc.Sequence)):
if len(tensors) == 0:
return True
if isinstance(tensors, abc.Mapping):
return check_container_tensors(next(iter(tensors.values())))
else:
return check_container_tensors(next(iter(tensors)))
return isinstance(tensors, (tf.Tensor, tf.SparseTensor))
def tensor_collection_equality(
tensors1: ContainerAnyTensors,
tensors2: ContainerAnyTensors,
atol: float = DEFAULT_ABSOLUTE_TOLERANCE,
test_env: Optional[tf.test.TestCase] = None,
log_debug_path: bool = True,
) -> bool:
"""
Compares two collections of tensors. Tensors can either be numpy arrays or tensorflow tensors. The collection
should all be of the same type and should not mix numpy/tensorflow. Mixing the two in one collection will error out.
Both collections should also be consistent in either being numpy arrays or tensorflow tensors. An assertion error will
happen if the two collections are inconsistent type wise. If the tensors are graph tensors then test_env is required.
By default if this fails it will print out information about the mismatch. log_debug_path can be disabled
if you not want any error messages and just need the return value.
Dense and sparse tensors are both suported. Sparse tensors will all be converted to dense tensors. This does not
check whether the two collections are consistently sparse or dense.
"""
assert check_container_tensors(tensors1) == check_container_tensors(tensors2)
if check_container_tensors(tensors1):
arrays1 = evaluate_tensors(tensors1, test_env)
else:
arrays1 = cast(ContainerArrays, tensors1)
if check_container_tensors(tensors2):
arrays2 = evaluate_tensors(tensors2, test_env)
else:
arrays2 = cast(ContainerArrays, tensors2)
result, debug_path = _tensor_collection_equality(arrays1, arrays2, atol=atol)
if not result and log_debug_path:
assert debug_path is not None
_log_tensor_equality_mismatch(arrays1, arrays2, debug_path)
return result
| [
"numpy.all",
"numpy.isclose",
"math.isclose",
"numpy.absolute",
"numpy.issubdtype",
"numpy.array",
"tensorflow.constant",
"functools.partial",
"tensorflow.sparse.to_dense",
"typing.cast",
"typing.TypeVar"
] | [((298, 311), 'typing.TypeVar', 'TypeVar', (['"""T1"""'], {}), "('T1')\n", (305, 311), False, 'from typing import Callable, Iterable, List, Mapping, Optional, Sequence, Tuple, TypeVar, Union, cast\n'), ((317, 330), 'typing.TypeVar', 'TypeVar', (['"""T2"""'], {}), "('T2')\n", (324, 330), False, 'from typing import Callable, Iterable, List, Mapping, Optional, Sequence, Tuple, TypeVar, Union, cast\n'), ((590, 655), 'typing.TypeVar', 'TypeVar', (['"""ContainerAnyTensors"""', 'ContainerTensors', 'ContainerArrays'], {}), "('ContainerAnyTensors', ContainerTensors, ContainerArrays)\n", (597, 655), False, 'from typing import Callable, Iterable, List, Mapping, Optional, Sequence, Tuple, TypeVar, Union, cast\n'), ((4298, 4314), 'typing.cast', 'cast', (['T1', 'result'], {}), '(T1, result)\n', (4302, 4314), False, 'from typing import Callable, Iterable, List, Mapping, Optional, Sequence, Tuple, TypeVar, Union, cast\n'), ((1439, 1471), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.integer'], {}), '(dtype, np.integer)\n', (1452, 1471), True, 'import numpy as np\n'), ((2361, 2388), 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (['tensor1'], {}), '(tensor1)\n', (2379, 2388), True, 'import tensorflow as tf\n'), ((2407, 2434), 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (['tensor2'], {}), '(tensor2)\n', (2425, 2434), True, 'import tensorflow as tf\n'), ((2517, 2561), 'math.isclose', 'math.isclose', (['tensor1', 'tensor2'], {'abs_tol': 'atol'}), '(tensor1, tensor2, abs_tol=atol)\n', (2529, 2561), False, 'import math\n'), ((3843, 3896), 'numpy.isclose', 'np.isclose', (['array1', 'array2'], {'atol': 'atol', 'equal_nan': '(True)'}), '(array1, array2, atol=atol, equal_nan=True)\n', (3853, 3896), True, 'import numpy as np\n'), ((7818, 7846), 'numpy.absolute', 'np.absolute', (['(array1 - array2)'], {}), '(array1 - array2)\n', (7829, 7846), True, 'import numpy as np\n'), ((9807, 9838), 'typing.cast', 'cast', (['ContainerArrays', 'tensors1'], {}), '(ContainerArrays, tensors1)\n', (9811, 9838), False, 'from typing import Callable, Iterable, List, Mapping, Optional, Sequence, Tuple, TypeVar, Union, cast\n'), ((9965, 9996), 'typing.cast', 'cast', (['ContainerArrays', 'tensors2'], {}), '(ContainerArrays, tensors2)\n', (9969, 9996), False, 'from typing import Callable, Iterable, List, Mapping, Optional, Sequence, Tuple, TypeVar, Union, cast\n'), ((2670, 2690), 'tensorflow.constant', 'tf.constant', (['tensor1'], {}), '(tensor1)\n', (2681, 2690), True, 'import tensorflow as tf\n'), ((2727, 2744), 'numpy.array', 'np.array', (['tensor1'], {}), '(tensor1)\n', (2735, 2744), True, 'import numpy as np\n'), ((2853, 2873), 'tensorflow.constant', 'tf.constant', (['tensor2'], {}), '(tensor2)\n', (2864, 2873), True, 'import tensorflow as tf\n'), ((2910, 2927), 'numpy.array', 'np.array', (['tensor2'], {}), '(tensor2)\n', (2918, 2927), True, 'import numpy as np\n'), ((3785, 3809), 'numpy.all', 'np.all', (['(array1 == array2)'], {}), '(array1 == array2)\n', (3791, 3809), True, 'import numpy as np\n'), ((1110, 1136), 'functools.partial', 'partial', (['container_fmap', 'f'], {}), '(container_fmap, f)\n', (1117, 1136), False, 'from functools import partial\n')] |
# threads.py
#
import time
import pickle
from datetime import datetime
import numpy as np
from PyQt5.QtCore import QThread
from .. import __version__
from ..fly import Fly
from ..ts import FixedCourtshipTrackingSummary
from .tracking import *
class TrackingThread(QThread):
"""Worker thread to run tracking algorithm.
Parameters
----------
video_settings : list of TrackingSettings
Each TrackingSettings object should be completely filled before
creating an instance of this object, and running this thread.
logger : QTextEdit
To store information about video being currently tracked.
This may need to be changed to prevent "QObject::connect: Cannot
queue arguments of type 'QTextBlock' from being displayed.
"""
def __init__(self, video_settings, logger, progress, parent=None):
super(TrackingThread, self).__init__(parent)
self.video_settings = video_settings
self.logger = logger
self.tracking_progress = progress
def run(self):
for ix in xrange(len(self.video_settings)):
start_time = time.time()
settings = self.video_settings[ix]
video = settings.video
n_frames = video.get_n_frames()
timestamps = video.get_all_timestamps()
fps = (1. / np.mean(np.diff(timestamps)))
male = Fly()
female = Fly()
tracking_summary = FixedCourtshipTrackingSummary()
male.init_params(n_frames)
female.init_params(n_frames)
male.timestamps = timestamps
female.timestamps = timestamps
# load video attributes into FixedCourtshipTrackingSummary
tracking_summary.video.filename = settings.video_file
tracking_summary.video.timestamps = timestamps
tracking_summary.video.fps = fps
tracking_summary.video.duration_frames = n_frames
tracking_summary.video.duration_seconds = (n_frames * 1.) / fps
tracking_summary.video.start_time = datetime.fromtimestamp(
timestamps[0]).strftime('%Y-%m-%d %H:%M:%S')
tracking_summary.video.end_time = datetime.fromtimestamp(
timestamps[-1]).strftime('%Y-%m-%d %H:%M:%S')
tracking_summary.video.pixels_per_mm = settings.arena.pixels_to_mm
# load arena attributes into FixedCourtshipTrackingSummary
tracking_summary.arena.shape = 'circular'
tracking_summary.arena.center_pixel_rr = settings.arena.center[0]
tracking_summary.arena.center_pixel_cc = settings.arena.center[1]
tracking_summary.arena.radius_mm = settings.arena.radius
tracking_summary.arena.diameter_mm = 2 * settings.arena.radius
# load software attributes into FixedCourtshipTrackingSummary
tracking_summary.software.tight_threshold = settings.tight_threshold
tracking_summary.software.loose_threshold = settings.loose_threshold
tracking_summary.software.date_tracked = datetime.today(
).strftime('%Y-%m-%d %H:%M:%S')
tracking_summary.software.version = __version__
# set the `group` attribute for the FixedCourtshipTrackingSummary
tracking_summary.group = settings.group
self.logger.append(
'Tracking started for video: {} \nStart Time: {}'.format(
settings.video_file,
time.strftime('%H:%M:%S', time.localtime(start_time))
))
# get the location and region properties that define
# the fixed female.
f_props, f_head, f_rear = find_female(
image=settings.arena.background_image,
female=settings.female,
lp_threshold=settings.tight_threshold
)
# update female based on props we just found --
# this ensures that the ellipse used to mask the female is
# not biased by variation in user-defined ellipses.
tighten_female_ellipse(
female=settings.female,
female_props=f_props
)
# loop through each frame in the video, and find the male.
for frame_ix in xrange(n_frames):
frame_ix = long(frame_ix)
frame, ts = video.get_frame(frame_ix)
try:
male_props = find_male(
image=frame,
female=settings.female,
arena=settings.arena,
lp_threshold=settings.tight_threshold
)
except NoPropsDetected as NPD:
self.logger.append(
'\t' + NPD.message +
' Body @ frame {}'.format(frame_ix)
)
continue
wing_props = find_wings(
image=frame,
female=settings.female,
arena=settings.arena,
male_props=male_props,
loose_threshold=settings.loose_threshold,
logger=self.logger,
frame_ix=frame_ix
)
# if wing_props is None:
# # male.timestamps[frame_ix] = ts
# # female.timestamps[frame_ix] = ts
# continue
male.body.centroid.row[frame_ix] = male_props.centroid[0]
male.body.centroid.col[frame_ix] = male_props.centroid[1]
male.body.orientation[frame_ix] = male_props.orientation
set_male_props(male, wing_props, frame_ix)
set_female_props(female, f_props, f_head, f_rear, frame_ix)
percent_complete = (frame_ix + 1.) / n_frames * 100
self.tracking_progress.emit(
percent_complete,
'Tracking video {}/{}.'.format(
ix + 1, len(self.video_settings)))
# update the tracking settings dictionary with male and female items.
# tracking_settings.update({'male': male, 'female': female})
# tracking_summary.set_attributes(**tracking_settings)
tracking_summary.male = male
tracking_summary.female = female
save_file = settings.save_file
save_type = save_file.split('.')[-1]
if save_type == 'xlsx':
tracking_summary.to_xlsx(save_file)
elif save_type == 'fcts':
with open(save_file, 'wb') as SAVE:
pickle.dump(tracking_summary, SAVE)
end_time = time.time()
elapsed_time = end_time - start_time
time_hrs = int(elapsed_time / 3600)
time_mins = int((elapsed_time - time_hrs * 3600) / 60)
time_secs = int(elapsed_time - time_hrs * 3600 - time_mins * 60)
self.logger.append(
'End Time: {}\nTotal Time Elapse: {}'.format(
time.strftime('%H:%M:%S', time.localtime(end_time)),
'{:02d}:{:02d}:{:02d}'.format(
time_hrs, time_mins, time_secs))
)
self.logger.append('TRACKING COMPLETE') | [
"datetime.datetime.fromtimestamp",
"pickle.dump",
"numpy.diff",
"datetime.datetime.today",
"time.localtime",
"time.time"
] | [((1170, 1181), 'time.time', 'time.time', ([], {}), '()\n', (1179, 1181), False, 'import time\n'), ((7048, 7059), 'time.time', 'time.time', ([], {}), '()\n', (7057, 7059), False, 'import time\n'), ((1399, 1418), 'numpy.diff', 'np.diff', (['timestamps'], {}), '(timestamps)\n', (1406, 1418), True, 'import numpy as np\n'), ((2147, 2184), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['timestamps[0]'], {}), '(timestamps[0])\n', (2169, 2184), False, 'from datetime import datetime\n'), ((2280, 2318), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['timestamps[-1]'], {}), '(timestamps[-1])\n', (2302, 2318), False, 'from datetime import datetime\n'), ((3175, 3191), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (3189, 3191), False, 'from datetime import datetime\n'), ((3634, 3660), 'time.localtime', 'time.localtime', (['start_time'], {}), '(start_time)\n', (3648, 3660), False, 'import time\n'), ((6986, 7021), 'pickle.dump', 'pickle.dump', (['tracking_summary', 'SAVE'], {}), '(tracking_summary, SAVE)\n', (6997, 7021), False, 'import pickle\n'), ((7450, 7474), 'time.localtime', 'time.localtime', (['end_time'], {}), '(end_time)\n', (7464, 7474), False, 'import time\n')] |
"""
To run this function, we need to change her cprand function because it doesn't return exact error for now
"""
import numpy as np
import tensorly as tl
from src._base import random_init_fac
from src._comparaison import init_factors
from src._hercprand import her_CPRAND
import copy
def test_nbrestart(i):
"""
Compute the nb restart for 4 cases :
1 - simple case with exact error
2 - simple case with estimated error
3 - complicated case with exact error
4 - complicated case with estimated error
In each case, we simulate nb_rand noised I*J*K rank r random tensors.
For each tensor, we do 5 factor matrices initializations.
Parameters
----------
i : int
choice of case.
Returns
-------
float
mean value of nbrestart
"""
I=50
J=50
K=50
r=10 # rank
n_samples=int(10*r*np.log(r)+1) # nb of randomized samples
nb_rand=10 # nb of random initialization
list_pct=[]
for i in range(nb_rand) :
# Random initialization of a noised cp_tensor
np.random.seed(i)
if i==1 or i==2:
fac_true,noise=init_factors(I,J,K,r,scale=False)
else :
fac_true,noise=init_factors(I,J,K,r,scale=True)
t=tl.cp_to_tensor((None,fac_true))+noise
for j in range(5):
factors=random_init_fac(t,r)
if i==1 :
# simple case with exact error
weights,factors,it,error,_,pct=her_CPRAND(t,r,n_samples,factors=copy.deepcopy(factors),exact_err=True,it_max=500,err_it_max=100)
list_pct.append(pct)
if i==2:
# simple case with estimated error
weights,factors,it,_,error,pct=her_CPRAND(t,r,n_samples,factors=copy.deepcopy(factors),exact_err=False,it_max=500,err_it_max=100)
list_pct.append(pct)
if i==3:
# complicated case with exact error
weights,factors,it,error,_,pct=her_CPRAND(t,r,n_samples,factors=copy.deepcopy(factors),exact_err=True,it_max=500,err_it_max=100)
list_pct.append(pct)
if i==4:
# complicated case with estimated error
weights,factors,it,_,error,pct=her_CPRAND(t,r,n_samples,factors=copy.deepcopy(factors),exact_err=False,it_max=500,err_it_max=400)
list_pct.append(pct)
return(np.mean(list_pct))
| [
"numpy.mean",
"src._base.random_init_fac",
"numpy.log",
"numpy.random.seed",
"copy.deepcopy",
"tensorly.cp_to_tensor",
"src._comparaison.init_factors"
] | [((2404, 2421), 'numpy.mean', 'np.mean', (['list_pct'], {}), '(list_pct)\n', (2411, 2421), True, 'import numpy as np\n'), ((1073, 1090), 'numpy.random.seed', 'np.random.seed', (['i'], {}), '(i)\n', (1087, 1090), True, 'import numpy as np\n'), ((1143, 1180), 'src._comparaison.init_factors', 'init_factors', (['I', 'J', 'K', 'r'], {'scale': '(False)'}), '(I, J, K, r, scale=False)\n', (1155, 1180), False, 'from src._comparaison import init_factors\n'), ((1219, 1255), 'src._comparaison.init_factors', 'init_factors', (['I', 'J', 'K', 'r'], {'scale': '(True)'}), '(I, J, K, r, scale=True)\n', (1231, 1255), False, 'from src._comparaison import init_factors\n'), ((1262, 1295), 'tensorly.cp_to_tensor', 'tl.cp_to_tensor', (['(None, fac_true)'], {}), '((None, fac_true))\n', (1277, 1295), True, 'import tensorly as tl\n'), ((1348, 1369), 'src._base.random_init_fac', 'random_init_fac', (['t', 'r'], {}), '(t, r)\n', (1363, 1369), False, 'from src._base import random_init_fac\n'), ((878, 887), 'numpy.log', 'np.log', (['r'], {}), '(r)\n', (884, 887), True, 'import numpy as np\n'), ((1518, 1540), 'copy.deepcopy', 'copy.deepcopy', (['factors'], {}), '(factors)\n', (1531, 1540), False, 'import copy\n'), ((1773, 1795), 'copy.deepcopy', 'copy.deepcopy', (['factors'], {}), '(factors)\n', (1786, 1795), False, 'import copy\n'), ((2030, 2052), 'copy.deepcopy', 'copy.deepcopy', (['factors'], {}), '(factors)\n', (2043, 2052), False, 'import copy\n'), ((2290, 2312), 'copy.deepcopy', 'copy.deepcopy', (['factors'], {}), '(factors)\n', (2303, 2312), False, 'import copy\n')] |
from decouple import config
import sys, os
from .analyze.analyzeSignal import calcPowers
from .analyze.pereiraChangeOfMean import pereiraLikelihood, getChangePoints, cleanLikelihoods
from .websocket import wsManager
from . import data as dataHp
import json
import numpy as np
from django.http import JsonResponse
def indicesInDoubleArray(array2, value, thres):
index1 = -1
index2 = -1
minDist = float("inf")
for i, array in enumerate(array2):
for j, val in enumerate(array):
dist = abs(val - value)
if dist < thres and dist < minDist:
minDist = dist
index1 = i
index2 = j
return index1, index2
def findEvents(power, thres, pre, post, voting, minDist, m):
likelihoods = pereiraLikelihood(power, threshold=thres, preEventLength=pre, postEventLength=post, linearFactor=m, verbose=True)
# likelihoods = cleanLikelihoods(likelihoods, 5*threshold)
# Get change indices
changeIndices = getChangePoints(power, likelihoods, windowSize=voting, minDist=minDist)
return changeIndices
def findUniqueStates(power, changeIndices, thres, minDist):
LINE_NOISE = 1.0
# Get State Seuence from all state changes
# Handle start state
stateSequence = [{'index': 0, 'endIndex': changeIndices[0] if len(changeIndices) > 0 else len(power)}]
# Changes in between
for i, change in enumerate(changeIndices[:-1]):
stateSequence.append({'index': change, 'endIndex': changeIndices[i+1]})
# handle end state
if len(changeIndices) > 0: stateSequence.append({'index': changeIndices[-1], 'endIndex': len(power)-1})
# Get Steady states point after each state change
for i in range(len(stateSequence)):
slice = power[ stateSequence[i]['index'] : stateSequence[i]['endIndex'] ]
stateSequence[i]['ssIndex'] = int(stateSequence[i]['index']+minDist )
stateSequence[i]['ssEndIndex'] = int(max(stateSequence[i]['endIndex']-minDist/2, stateSequence[i]['ssIndex']+1))
# Construct mean value of state
for i in range(len(stateSequence)):
if stateSequence[i]['ssIndex'] is None or stateSequence[i]['ssEndIndex'] is None or stateSequence[i]['ssEndIndex'] - stateSequence[i]['ssIndex'] < 1:
stateSequence[i]['mean'] = None
else:
stateSequence[i]['mean'] = np.mean(power[stateSequence[i]['ssIndex']:stateSequence[i]['ssEndIndex']])
if stateSequence[i]['mean'] <= LINE_NOISE: stateSequence[i]['mean'] = 0
means = sorted([stateSequence[i]['mean'] for i in range(len(stateSequence))])
print(means)
cluster = 0
clusters = [0]
# lastMean = means[0]
# for i in range(1, len(means)):
# if abs(lastMean-means[i]) > thres:
# lastMean = means[i]
# cluster += 1
# # lastMean = np.mean(np.array([means[i], lastMean]))
# clusters.append(cluster)
for i in range(1, len(means)):
if abs(means[i-1]-means[i]) > thres:
cluster += 1
clusters.append(cluster)
for i in range(len(stateSequence)):
stateSequence[i]["stateID"] = clusters[means.index(stateSequence[i]['mean'])]
# prevent Self loops
# if len(stateSequence) > 1:
# newStateSequence = []
# source = stateSequence[0]
# for i in range(len(stateSequence)-1):
# dest = stateSequence[i+1]
# if source["stateID"] == dest["stateID"]:
# source['endIndex'] = dest["endIndex"]
# source['ssEndIndex'] = dest["ssEndIndex"]
# #recalculate mean based on the length of the arrays
# source['mean'] = (source['mean'] * (source['endIndex'] - source['index']) + dest["mean"] * (dest['endIndex'] - dest['index']))/(dest['endIndex'] - source['index'])
# else:
# newStateSequence.append(source)
# if dest == stateSequence[-1]:
# newStateSequence.append(dest)
# source = dest
# stateSequence = newStateSequence
return stateSequence
def autoLabel(request):
if request.method != "POST": Http404
response = {}
data = json.loads(request.body)
parameter = data["parameter"]
sessionID = request.session.session_key
sessionData = request.session.get('dataInfo', {})
if sessionData["type"] == "fired":
wsManager.sendStatus(sessionID, "Loading 50Hz power data...", percent=10)
dataDict = dataHp.getSessionData(sessionID, sessionData)
usablePower = ["s", "s_l1", "p", "p_l1"]
usableKeys = list(set(usablePower) & set(dataDict["measures"]))
if len(usableKeys) < 1:
if "v" in dataDict["measures"] and "i" in dataDict["measures"]:
pass
p,q,s = calcPowers(dataDict["data"]["v"], dataDict["data"]["i"], dataDict["samplingrate"])
power = s
response["msg"] = "Calculated apparent power using Current and Voltage"
else:
response["msg"] = "Could not find power, or voltage and current in data. Name it as \"p\",\"s\" or \"v\",\"i\".\n"
response["msg"] += "If you have electricity data of multiple supply legs, name it as \"<measure>_l1\", \"<measure>_l2\", ... accordingly."
return JsonResponse(response)
else:
power = list(dataDict["data"][sorted(usableKeys)[-1]])
sr = dataDict["samplingrate"]
# We only do this at a max samplingrate of 50 Hz
if sr > 50:
wsManager.sendStatus(sessionID, text="Resampling to 50Hz...", percent=15)
power, timestamps = dataHp.resampleDict(dataDict, sorted(usableKeys)[-1], 50, forceEvenRate=True)
#power = dataHp.resample(power, sr, 50)
# print(power)
sr = 50
newSr = None
if "sr" in parameter: newSr = float(parameter["sr"])
if newSr != None and newSr != -1 or "ts" in dataDict:
if "ts" in dataDict and newSr is None: newSr = max(1/3.0, dataDict["samplingrate"])
wsManager.sendStatus(sessionID, text="Resampling to "+ str(round(newSr, 2)) + "Hz...", percent=17)
power, timestamps = dataHp.resampleDict(dataDict, sorted(usableKeys)[-1], newSr, forceEvenRate=True)
#power = dataHp.resample(power, sr, newSr)
sr = newSr
thres = 5.0
if "thres" in parameter: thres = float(parameter["thres"])
thres = max(thres, 0.1)
pre = 1.0*sr
if "pre" in parameter: pre = int(float(parameter["pre"])*sr)
pre = max(pre, 2)
post = 1.0*sr
if "post" in parameter: post = int(float(parameter["post"])*sr)
post = max(post, 2)
voting = 2.0*sr
if "voting" in parameter: voting = int(float(parameter["voting"])*sr)
voting = max(voting, 1)
minDist = 1.0*sr
if "minDist" in parameter: minDist = int(float(parameter["minDist"])*sr)
minDist = max(minDist, 1)
m = 0.005
if "linearCoeff" in parameter: m = float(parameter["linearCoeff"])
print("sr: {}Hz, thres: {}W, pre: {}samples, post: {}:samples, voting: {}samples, minDist: {} samples, m:{}".format(sr, thres, pre, post, voting, minDist, m), flush=True)
wsManager.sendStatus(sessionID, "Finding Events...", percent=20)
changeIndices = findEvents(power, thres, pre, post, voting, minDist, m)
wsManager.sendStatus(sessionID, "Clustering Events...", percent=70)
stateSequence = findUniqueStates(power, changeIndices, thres, minDist)
if len(changeIndices) == 0:
response["msg"] = "No Changes found in signal..."
if len(changeIndices) >= 200:
response["msg"] = "Too many events found, you may want to change settings"
changeIndices = []
wsManager.sendStatus(sessionID, "Generating Labels...")
# Convert change indices to timestamps
ts = 0
if "timestamp" in dataDict: ts = dataDict["timestamp"]
# labels = [{"startTs": ts+(float(i/sr)), "label":""} for i in changeIndices]
labels = [{"startTs": ts+(float(i["index"]/sr)), "label":"S" + str(i["stateID"])} for i in stateSequence]
response["labels"] = labels
return JsonResponse(response)
| [
"numpy.mean",
"json.loads",
"django.http.JsonResponse"
] | [((4216, 4240), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (4226, 4240), False, 'import json\n'), ((8077, 8099), 'django.http.JsonResponse', 'JsonResponse', (['response'], {}), '(response)\n', (8089, 8099), False, 'from django.http import JsonResponse\n'), ((2371, 2445), 'numpy.mean', 'np.mean', (["power[stateSequence[i]['ssIndex']:stateSequence[i]['ssEndIndex']]"], {}), "(power[stateSequence[i]['ssIndex']:stateSequence[i]['ssEndIndex']])\n", (2378, 2445), True, 'import numpy as np\n'), ((5311, 5333), 'django.http.JsonResponse', 'JsonResponse', (['response'], {}), '(response)\n', (5323, 5333), False, 'from django.http import JsonResponse\n')] |
"""
Simplistic implementation of the two-layer neural network.
Training method is stochastic (online) gradient descent with momentum.
As an example it computes XOR for given input.
"""
import numpy
import time
class Dataset:
"""
TODO: docstring
"""
def __init__(self, n_in, n_samples):
"""
TODO: docstring
"""
self.X = numpy.random.binomial(1, 0.5, (n_samples, n_in))
self.T = self.X^1
class NeuralNet:
"""
Details:
- tanh activation for hidden layer
- sigmoid activation for output layer
- cross-entropy loss
"""
def __init__(self, dataset):
"""
TODO: docstring
"""
learning_rate = 0.01
momentum = 0.9
n_hidden = 10
n_in = 10
n_out = 10
numpy.random.seed(0)
self.V = numpy.random.normal(scale=0.1, size=(n_in, n_hidden))
self.W = numpy.random.normal(scale=0.1, size=(n_hidden, n_out))
self.bv = numpy.zeros(n_hidden)
self.bw = numpy.zeros(n_out)
params = [self.V, self.W, self.bv, self.bw]
X, T = dataset.X, dataset.T
# train
for epoch in range(100):
err, upd = [], [0] * len(params)
t0 = time.clock()
for i in range(X.shape[0]):
loss, grad = self.update(X[i], T[i], *params)
for j in range(len(params)):
params[j] -= upd[j]
for j in range(len(params)):
upd[j] = learning_rate * grad[j] + momentum * upd[j]
err.append(loss)
print('Epoch: {:d}, Loss: {:.8f}, Time: {:.4f}s'.format(
epoch, numpy.mean(err), time.clock() - t0))
def __call__(self, x):
"""
make predictions
"""
A = numpy.dot(x, self.V) + self.bv
B = numpy.dot(numpy.tanh(A), self.W) + self.bw
return (self.sigmoid(B) > 0.5).astype(int)
def sigmoid(self, x):
"""
TODO: docstring
"""
return 1.0 / (1.0 + numpy.exp(-x))
def tanh_prime(self, x):
"""
TODO: docstring
"""
return 1 - numpy.tanh(x)**2
def update(self, x, t, V, W, bv, bw):
"""
use error for each layer as a gradient for biases
"""
# forward
A = numpy.dot(x, V) + bv
Z = numpy.tanh(A)
B = numpy.dot(Z, W) + bw
Y = self.sigmoid(B)
# backward
Ew = Y - t
Ev = self.tanh_prime(A) * numpy.dot(W, Ew)
dW = numpy.outer(Z, Ew)
dV = numpy.outer(x, Ev)
loss = -numpy.mean (t * numpy.log(Y) + (1 - t) * numpy.log(1 - Y))
return loss, (dV, dW, Ev, Ew)
if __name__ == '__main__':
dataset = Dataset(10, 300)
neural_net = NeuralNet(dataset)
prediction = neural_net(numpy.random.binomial(1, 0.5, 10))
print('XOR Prediction:', prediction)
| [
"numpy.random.normal",
"numpy.mean",
"time.clock",
"numpy.log",
"numpy.tanh",
"numpy.exp",
"numpy.zeros",
"numpy.outer",
"numpy.dot",
"numpy.random.seed",
"numpy.random.binomial"
] | [((369, 417), 'numpy.random.binomial', 'numpy.random.binomial', (['(1)', '(0.5)', '(n_samples, n_in)'], {}), '(1, 0.5, (n_samples, n_in))\n', (390, 417), False, 'import numpy\n'), ((809, 829), 'numpy.random.seed', 'numpy.random.seed', (['(0)'], {}), '(0)\n', (826, 829), False, 'import numpy\n'), ((847, 900), 'numpy.random.normal', 'numpy.random.normal', ([], {'scale': '(0.1)', 'size': '(n_in, n_hidden)'}), '(scale=0.1, size=(n_in, n_hidden))\n', (866, 900), False, 'import numpy\n'), ((918, 972), 'numpy.random.normal', 'numpy.random.normal', ([], {'scale': '(0.1)', 'size': '(n_hidden, n_out)'}), '(scale=0.1, size=(n_hidden, n_out))\n', (937, 972), False, 'import numpy\n'), ((991, 1012), 'numpy.zeros', 'numpy.zeros', (['n_hidden'], {}), '(n_hidden)\n', (1002, 1012), False, 'import numpy\n'), ((1031, 1049), 'numpy.zeros', 'numpy.zeros', (['n_out'], {}), '(n_out)\n', (1042, 1049), False, 'import numpy\n'), ((2387, 2400), 'numpy.tanh', 'numpy.tanh', (['A'], {}), '(A)\n', (2397, 2400), False, 'import numpy\n'), ((2564, 2582), 'numpy.outer', 'numpy.outer', (['Z', 'Ew'], {}), '(Z, Ew)\n', (2575, 2582), False, 'import numpy\n'), ((2596, 2614), 'numpy.outer', 'numpy.outer', (['x', 'Ev'], {}), '(x, Ev)\n', (2607, 2614), False, 'import numpy\n'), ((2852, 2885), 'numpy.random.binomial', 'numpy.random.binomial', (['(1)', '(0.5)', '(10)'], {}), '(1, 0.5, 10)\n', (2873, 2885), False, 'import numpy\n'), ((1249, 1261), 'time.clock', 'time.clock', ([], {}), '()\n', (1259, 1261), False, 'import time\n'), ((1826, 1846), 'numpy.dot', 'numpy.dot', (['x', 'self.V'], {}), '(x, self.V)\n', (1835, 1846), False, 'import numpy\n'), ((2354, 2369), 'numpy.dot', 'numpy.dot', (['x', 'V'], {}), '(x, V)\n', (2363, 2369), False, 'import numpy\n'), ((2413, 2428), 'numpy.dot', 'numpy.dot', (['Z', 'W'], {}), '(Z, W)\n', (2422, 2428), False, 'import numpy\n'), ((2534, 2550), 'numpy.dot', 'numpy.dot', (['W', 'Ew'], {}), '(W, Ew)\n', (2543, 2550), False, 'import numpy\n'), ((1879, 1892), 'numpy.tanh', 'numpy.tanh', (['A'], {}), '(A)\n', (1889, 1892), False, 'import numpy\n'), ((2070, 2083), 'numpy.exp', 'numpy.exp', (['(-x)'], {}), '(-x)\n', (2079, 2083), False, 'import numpy\n'), ((2182, 2195), 'numpy.tanh', 'numpy.tanh', (['x'], {}), '(x)\n', (2192, 2195), False, 'import numpy\n'), ((1700, 1715), 'numpy.mean', 'numpy.mean', (['err'], {}), '(err)\n', (1710, 1715), False, 'import numpy\n'), ((2647, 2659), 'numpy.log', 'numpy.log', (['Y'], {}), '(Y)\n', (2656, 2659), False, 'import numpy\n'), ((2672, 2688), 'numpy.log', 'numpy.log', (['(1 - Y)'], {}), '(1 - Y)\n', (2681, 2688), False, 'import numpy\n'), ((1717, 1729), 'time.clock', 'time.clock', ([], {}), '()\n', (1727, 1729), False, 'import time\n')] |
import os
import numpy as np
from PIL import Image
import cv2
import pickle
face_cascade = cv2.CascadeClassifier("cascade.xml")
recognizer = cv2.face.LBPHFaceRecognizer_create()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
image_dir = os.path.join(BASE_DIR, "images")
current = 0
names_id_mapping = {}
train = []
names = []
for root, dirs, files in os.walk(image_dir):
for file in files:
if file.endswith("png") or file.endswith("jpg"):
path = os.path.join(root, file)
label = os.path.basename(os.path.dirname(path)).replace(" ", "-").lower()
# print(f"{label} => ${path}")
if label in names_id_mapping:
pass
else:
names_id_mapping[label] = current
current += 1
id_ = names_id_mapping[label]
pillow_image = Image.open(path).convert("L")
image_array = np.array(pillow_image, "uint8")
faces = face_cascade.detectMultiScale(image_array, scaleFactor=1.5, minNeighbors=5)
for (x, y, w, h) in faces:
region_of_image = image_array[y: y+h, x: x+w]
train.append(region_of_image)
names.append(id_)
with open("labels.pickle", "wb") as file:
pickle.dump(names_id_mapping, file)
recognizer.train(train, np.array(names))
recognizer.save("trainer.yml") | [
"PIL.Image.open",
"pickle.dump",
"os.path.join",
"cv2.face.LBPHFaceRecognizer_create",
"numpy.array",
"os.path.dirname",
"os.path.abspath",
"cv2.CascadeClassifier",
"os.walk"
] | [((92, 128), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""cascade.xml"""'], {}), "('cascade.xml')\n", (113, 128), False, 'import cv2\n'), ((142, 178), 'cv2.face.LBPHFaceRecognizer_create', 'cv2.face.LBPHFaceRecognizer_create', ([], {}), '()\n', (176, 178), False, 'import cv2\n'), ((246, 278), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""images"""'], {}), "(BASE_DIR, 'images')\n", (258, 278), False, 'import os\n'), ((362, 380), 'os.walk', 'os.walk', (['image_dir'], {}), '(image_dir)\n', (369, 380), False, 'import os\n'), ((207, 232), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (222, 232), False, 'import os\n'), ((1278, 1313), 'pickle.dump', 'pickle.dump', (['names_id_mapping', 'file'], {}), '(names_id_mapping, file)\n', (1289, 1313), False, 'import pickle\n'), ((1339, 1354), 'numpy.array', 'np.array', (['names'], {}), '(names)\n', (1347, 1354), True, 'import numpy as np\n'), ((481, 505), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (493, 505), False, 'import os\n'), ((921, 952), 'numpy.array', 'np.array', (['pillow_image', '"""uint8"""'], {}), "(pillow_image, 'uint8')\n", (929, 952), True, 'import numpy as np\n'), ((865, 881), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (875, 881), False, 'from PIL import Image\n'), ((543, 564), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (558, 564), False, 'import os\n')] |
#/usr/bin/python!
#####
# unmix_spectra.py performs fully-constrained least-squares
# spectral ummixing on an image file
#
# unmixing implementation described here:
# http://pysptools.sourceforge.net/abundance_maps.html
#
# based on the algorithm desribed here:
# <NAME>, <NAME>, and <NAME>. Fully Constrained
# Least-Squares Based Linear Unmixing. Althouse. IEEE. 1999.
#
# c. 2016 <NAME>
#####
import os
import sys
import aei
import random
import gdal as gdal
import numpy as np
import pysptools.abundance_maps as abm
# create a class to parse out the arguments passed to the main function
class parse_args:
def __init__(self, arglist):
# set up main variables and defaults to parse
self.infile = ''
self.outfile = ''
self.spectral_libs = []
self.n = 20
self.bands = []
self.of = 'GTiff'
self.normalize = False
# exit if no arguments passed
if len(arglist) == 1:
usage(exit=True)
# read arguments from command line
i = 1
while i < len(arglist):
arg = arglist[i]
# check input flag
if arg.lower() == '-i':
i += 1
arg = arglist[i]
if type(arg) is str:
self.infile = arg
if not aei.fn.checkFile(self.infile, quiet = True):
usage()
aei.fn.checkFile(self.infile)
sys.exit(1)
# check output flag
if arg.lower() == '-o':
i += 1
arg = arglist[i]
if type(arg) is str:
self.outfile = arg
outpath = os.path.dirname(self.outfile)
if outpath == '':
outpath = '.'
if not os.access(outpath, os.W_OK):
usage()
print("[ ERROR ]: unable to write to output path: %s" % outpath)
sys.exit(1)
# check spectral library paths
if arg.lower() == "-lib":
i += 1
arg = arglist[i]
libs = arg.split(" ")
# throw an error if only one lib specified
if len(libs) == 1:
usage()
print("[ ERROR ]: unable to unmix with one spectral library: %s" % libs[0])
sys.exit(1)
# loop through each lib and update spec_lib list
for j in range(len(libs)):
if not aei.fn.checkFile(libs[j], quiet=True):
usage()
aei.fn.checkFile(libs[j])
sys.exit()
self.spectral_libs.append(libs[j])
# check number of iterations
if arg.lower() == "-n":
i += 1
arg = arglist[i]
try:
self.n = int(arg)
except ValueError:
usage()
print("[ ERROR ]: -n argument is not an integer: %s" % arg)
sys.exit(1)
# check indices
if arg.lower() == "-bands":
i += 1
arg = arglist[i]
band = arg.split(" ")
# loop through and make sure each is a number
for j in range(len(band)):
try:
int(band[j])
except ValueError:
usage()
print("[ ERROR ]: invalid index set: %s" % ind[j])
sys.exit(1)
self.bands.append(int(band[j]))
# check normalize flag
if arg.lower() == "-normalize":
self.normalize = True
# check output format
if arg.lower() == "-of":
i += 1
arg = arglist[i]
import gdal_types as gdt
if not arg in gdt.list():
usage()
print("[ ERROR }: invalid output format: %s" % arg)
sys.exit(1)
self.of = arg
i += 1
def usage(exit=False):
"""
describes the aeilas.py procedure in case of incorrect parameter calls
syntax: usage()
"""
print(
"""
$ unmix_spectra.py -lib "lib1 lib2 ... libx" [-n n_random_selections]
[-bands "band1 band2 ... bandx"] [-normalize] [-of output_format]
[-i] input_file [-o] output_file
"""
)
if exit:
sys.exit(1)
def main():
"""
the main program for unmix_spectra.py
syntax: main()
"""
# parse the argument list
args = parse_args(sys.argv)
# load the spectral libraries
lib = {}
n_libs = len(args.spectral_libs)
lnb = np.zeros(n_libs)
# get info from each library
for i in range(n_libs):
# assign libraries to dictionary
lib['lib_%s' % i] = aei.read.spectralLib(args.spectral_libs[i])
lnb[i] = (lib['lib_%s' % i].spectra.shape[-1])
# get random indices for each library
lib['rnd_%s' % i] = random.sample(range(
lib['lib_%s' % i].spectra.shape[0]), args.n)
# normalize if set
if args.normalize:
lib['lib_%s' % i].bn(inds=args.bands)
# load the input image and get parameters
inf = gdal.Open(args.infile)
ns = inf.RasterXSize
nl = inf.RasterYSize
nb = inf.RasterCount
geo = inf.GetGeoTransform()
prj = inf.GetProjection()
n_bundles = lnb.shape[0]
b1 = inf.GetRasterBand(1)
# if bands were not set in command line, use all bands
if not args.bands:
args.bands = range(nb)
# check no data param
if hasattr(b1, 'GetNoDataValue'):
nd = b1.GetNoDataValue()
if nd is None:
nd = -9999
else:
nd = -9999
# check that the spectral libraries match the bands
if not 0 in np.where((lnb-nb) != 0)[0].shape:
print("[ ERROR ]: number of image bands does not match number of spectral library bands")
inf = None
sys.exit(1)
# report a little
print("[ STATUS ]: Beginning fully-constrained least-squares unmixing")
print("[ STATUS ]: Input file : %s" % args.infile)
print("[ STATUS ]: Output file: %s" % args.outfile)
# create an output file
ouf = gdal.GetDriverByName(args.of).Create(
args.outfile, ns, nl, n_bundles, gdal.GDT_Float32)
ouf.SetGeoTransform(geo)
ouf.SetProjection(prj)
# create an output array
arr = np.zeros((nl, ns, n_bundles))
# read the image file and flip dimensions
img = inf.ReadAsArray()
img = img.transpose([1,2,0])
# find no data vals
gd = np.where(img[:,:,0] != nd)
# check that the file is not all no-data
if gd[0].shape[0] == 0:
print("[ ERROR ]: No good-data found in input file")
print("[ ERROR ]: Exiting...")
sys.exit(1)
# subset the image array to good data only
img = img[gd[0], gd[1], :]
# normalize the data if set
if args.normalize:
img = aei.fn.bn(img, inds = args.bands)
args.bands = range(len(args.bands))
# add a shallow dimension for unmixing algorithm
img = np.expand_dims(img[:,args.bands], 0)
# set up unmixing class
unmixer = abm.FCLS()
# loop through each random index and unmix
for i in range(args.n):
print("[ STATUS ]: Iteration [%s] of [%s]" % ((i + 1), args.n))
# set up the bundles
bundles = np.zeros((n_libs,len(args.bands)))
for j in range(n_libs):
bundles[j,:] = lib['lib_%s' % j].spectra[lib['rnd_%s' % j][i],args.bands]
# perform the unmixing
arr[gd[0],gd[1],:] += unmixer.map(img, bundles).squeeze()
# divide by n iterations to get average response
arr /= args.n
# report completion
print("[ STATUS ]: Completed unmixing iterations")
print("[ STATUS ]: Writing data to file")
# and write the results to the output file
for i in range(n_libs):
band = ouf.GetRasterBand(i+1)
band.WriteArray(arr[:,:,i])
band.SetNoDataValue(0.0)
band.FlushCache()
if __name__ == "__main__":
main() | [
"aei.read.spectralLib",
"gdal.Open",
"aei.fn.checkFile",
"gdal.GetDriverByName",
"numpy.where",
"os.access",
"gdal_types.list",
"os.path.dirname",
"numpy.zeros",
"numpy.expand_dims",
"sys.exit",
"aei.fn.bn",
"pysptools.abundance_maps.FCLS"
] | [((5283, 5299), 'numpy.zeros', 'np.zeros', (['n_libs'], {}), '(n_libs)\n', (5291, 5299), True, 'import numpy as np\n'), ((5882, 5904), 'gdal.Open', 'gdal.Open', (['args.infile'], {}), '(args.infile)\n', (5891, 5904), True, 'import gdal as gdal\n'), ((7114, 7143), 'numpy.zeros', 'np.zeros', (['(nl, ns, n_bundles)'], {}), '((nl, ns, n_bundles))\n', (7122, 7143), True, 'import numpy as np\n'), ((7294, 7322), 'numpy.where', 'np.where', (['(img[:, :, 0] != nd)'], {}), '(img[:, :, 0] != nd)\n', (7302, 7322), True, 'import numpy as np\n'), ((7826, 7863), 'numpy.expand_dims', 'np.expand_dims', (['img[:, args.bands]', '(0)'], {}), '(img[:, args.bands], 0)\n', (7840, 7863), True, 'import numpy as np\n'), ((7910, 7920), 'pysptools.abundance_maps.FCLS', 'abm.FCLS', ([], {}), '()\n', (7918, 7920), True, 'import pysptools.abundance_maps as abm\n'), ((5015, 5026), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5023, 5026), False, 'import sys\n'), ((5444, 5487), 'aei.read.spectralLib', 'aei.read.spectralLib', (['args.spectral_libs[i]'], {}), '(args.spectral_libs[i])\n', (5464, 5487), False, 'import aei\n'), ((6637, 6648), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6645, 6648), False, 'import sys\n'), ((7507, 7518), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7515, 7518), False, 'import sys\n'), ((7676, 7707), 'aei.fn.bn', 'aei.fn.bn', (['img'], {'inds': 'args.bands'}), '(img, inds=args.bands)\n', (7685, 7707), False, 'import aei\n'), ((6910, 6939), 'gdal.GetDriverByName', 'gdal.GetDriverByName', (['args.of'], {}), '(args.of)\n', (6930, 6939), True, 'import gdal as gdal\n'), ((1809, 1838), 'os.path.dirname', 'os.path.dirname', (['self.outfile'], {}), '(self.outfile)\n', (1824, 1838), False, 'import os\n'), ((2583, 2594), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2591, 2594), False, 'import sys\n'), ((4533, 4544), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4541, 4544), False, 'import sys\n'), ((6478, 6501), 'numpy.where', 'np.where', (['(lnb - nb != 0)'], {}), '(lnb - nb != 0)\n', (6486, 6501), True, 'import numpy as np\n'), ((1382, 1423), 'aei.fn.checkFile', 'aei.fn.checkFile', (['self.infile'], {'quiet': '(True)'}), '(self.infile, quiet=True)\n', (1398, 1423), False, 'import aei\n'), ((1483, 1512), 'aei.fn.checkFile', 'aei.fn.checkFile', (['self.infile'], {}), '(self.infile)\n', (1499, 1512), False, 'import aei\n'), ((1537, 1548), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1545, 1548), False, 'import sys\n'), ((1942, 1969), 'os.access', 'os.access', (['outpath', 'os.W_OK'], {}), '(outpath, os.W_OK)\n', (1951, 1969), False, 'import os\n'), ((2116, 2127), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2124, 2127), False, 'import sys\n'), ((2747, 2784), 'aei.fn.checkFile', 'aei.fn.checkFile', (['libs[j]'], {'quiet': '(True)'}), '(libs[j], quiet=True)\n', (2763, 2784), False, 'import aei\n'), ((2842, 2867), 'aei.fn.checkFile', 'aei.fn.checkFile', (['libs[j]'], {}), '(libs[j])\n', (2858, 2867), False, 'import aei\n'), ((2892, 2902), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2900, 2902), False, 'import sys\n'), ((3376, 3387), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3384, 3387), False, 'import sys\n'), ((4401, 4411), 'gdal_types.list', 'gdt.list', ([], {}), '()\n', (4409, 4411), True, 'import gdal_types as gdt\n'), ((3946, 3957), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3954, 3957), False, 'import sys\n')] |
import numpy as np
from numpy import zeros
from dipy.segment.threshold import upper_bound_by_percent, upper_bound_by_rate
from numpy.testing import assert_equal, run_module_suite
def test_adjustment():
imga = zeros([128, 128])
for y in range(128):
for x in range(128):
if y > 10 and y < 115 and x > 10 and x < 115:
imga[x, y] = 100
if y > 39 and y < 88 and x > 39 and x < 88:
imga[x, y] = 150
if y > 59 and y < 69 and x > 59 and x < 69:
imga[x, y] = 255
high_1 = upper_bound_by_rate(imga)
high_2 = upper_bound_by_percent(imga)
vol1 = np.interp(imga, xp=[imga.min(), high_1], fp=[0, 255])
vol2 = np.interp(imga, xp=[imga.min(), high_2], fp=[0, 255])
count2 = (88 - 40) * (88 - 40)
count1 = (114 - 10) * (114 - 10)
count1_test = 0
count2_test = 0
count2_upper = (88 - 40) * (88 - 40)
count1_upper = (114 - 10) * (114 - 10)
count1_upper_test = 0
count2_upper_test = 0
value1 = np.unique(vol1)
value2 = np.unique(vol2)
for i in range(128):
for j in range(128):
if vol1[i][j] > value1[1]:
count2_test = count2_test + 1
if vol1[i][j] > 0:
count1_test = count1_test + 1
for i in range(128):
for j in range(128):
if vol2[i][j] > value2[1]:
count2_upper_test = count2_upper_test + 1
if vol2[i][j] > 0:
count1_upper_test = count1_upper_test + 1
assert_equal(count2, count2_test)
assert_equal(count1, count1_test)
assert_equal(count2_upper, count2_upper_test)
assert_equal(count1_upper, count1_upper_test)
if __name__ == '__main__':
run_module_suite()
| [
"numpy.unique",
"numpy.testing.assert_equal",
"dipy.segment.threshold.upper_bound_by_percent",
"dipy.segment.threshold.upper_bound_by_rate",
"numpy.zeros",
"numpy.testing.run_module_suite"
] | [((216, 233), 'numpy.zeros', 'zeros', (['[128, 128]'], {}), '([128, 128])\n', (221, 233), False, 'from numpy import zeros\n'), ((572, 597), 'dipy.segment.threshold.upper_bound_by_rate', 'upper_bound_by_rate', (['imga'], {}), '(imga)\n', (591, 597), False, 'from dipy.segment.threshold import upper_bound_by_percent, upper_bound_by_rate\n'), ((611, 639), 'dipy.segment.threshold.upper_bound_by_percent', 'upper_bound_by_percent', (['imga'], {}), '(imga)\n', (633, 639), False, 'from dipy.segment.threshold import upper_bound_by_percent, upper_bound_by_rate\n'), ((1035, 1050), 'numpy.unique', 'np.unique', (['vol1'], {}), '(vol1)\n', (1044, 1050), True, 'import numpy as np\n'), ((1064, 1079), 'numpy.unique', 'np.unique', (['vol2'], {}), '(vol2)\n', (1073, 1079), True, 'import numpy as np\n'), ((1544, 1577), 'numpy.testing.assert_equal', 'assert_equal', (['count2', 'count2_test'], {}), '(count2, count2_test)\n', (1556, 1577), False, 'from numpy.testing import assert_equal, run_module_suite\n'), ((1582, 1615), 'numpy.testing.assert_equal', 'assert_equal', (['count1', 'count1_test'], {}), '(count1, count1_test)\n', (1594, 1615), False, 'from numpy.testing import assert_equal, run_module_suite\n'), ((1621, 1666), 'numpy.testing.assert_equal', 'assert_equal', (['count2_upper', 'count2_upper_test'], {}), '(count2_upper, count2_upper_test)\n', (1633, 1666), False, 'from numpy.testing import assert_equal, run_module_suite\n'), ((1671, 1716), 'numpy.testing.assert_equal', 'assert_equal', (['count1_upper', 'count1_upper_test'], {}), '(count1_upper, count1_upper_test)\n', (1683, 1716), False, 'from numpy.testing import assert_equal, run_module_suite\n'), ((1749, 1767), 'numpy.testing.run_module_suite', 'run_module_suite', ([], {}), '()\n', (1765, 1767), False, 'from numpy.testing import assert_equal, run_module_suite\n')] |
#!/usr/bin/env python3
import sys
from pathlib import Path
from .posterization_gui import *
from .simplepalettes import *
import numpy as np
import cv2
try:
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
except ImportError:
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from skimage.transform import rescale
from qtwidgets import Toggle
from PIL import Image
# In command line: "pip install opencv-python-headless" to avoid qt complaining two set of binaries
### This sometimes happens: 'qt.gui.icc: fromIccProfile: failed minimal tag size sanity'
class TimerMessageBox( QMessageBox ):
def __init__( self, timeout = 3, parent = None ):
super( TimerMessageBox, self ).__init__( parent )
self.setWindowTitle( "Algorithm is processing your image. Please hold on." )
self.time_to_wait = timeout
self.setText( "Algorithm is processing your image. Please hold on." )
self.setStandardButtons( QMessageBox.NoButton )
self.timer = QTimer()
self.timer.setInterval( 100 )
self.timer.timeout.connect( self.changeContent )
self.timer.start()
def changeContent( self ):
self.time_to_wait -= 1
if self.time_to_wait <= 0:
self.close()
def closeEvent( self, event ):
self.timer.stop()
event.accept()
class MainWindow( QWidget ):
def __init__( self ):
super().__init__()
self.title = 'Posterization'
self.x = 300
self.y = 0
self.width = 100
self.height = 300
self.initUI()
def initUI( self ):
self.setWindowTitle( self.title )
self.setGeometry( self.x, self.y, self.width, self.height )
#self.setStyleSheet("background-color: white;")
# Set the welcome icon in GIF
self.welcome_img_path = str( Path( __file__ ).parent / "car.jpg" )
#self.welcome_img_path = "car.jpg"
self.welcome = QPixmap( self.welcome_img_path )
self.imageLabel = QLabel()
self.imageLabel.setPixmap( self.welcome )
# Toggle for switching to downsampled version
self.switch = Toggle()
self.switch.setMaximumWidth( 200 )
### palette images
self.paletteLabel = QLabel()
self.paletteLabel.setPixmap( QPixmap() )
#### Variables
self.imagePath = ""
self.palette = None
# used for recoloring
self.weights_per_pixel_smooth = None
self.weights_per_pixel = None
self.palette_recolor = None
self.palette_og = None
self.waitingtime = 1
self.show_palette = 1
self.show_input = 0
self.live_smoothing = True
self.blur_window_slider_val = 7 # default
self.blur_slider_val = 0.1 # default
self.binary_slider_val = 0.8 # default
self.cluster_slider_val = 20 # default
self.palette_slider_val = 6 # default
self.blend_slider_val = 3 # default
self.current_image_indx = -1 # Track the current image index in the image list
self.imageList = []
self.add_to_imageList( cv2.cvtColor( cv2.imread( self.welcome_img_path ), cv2.COLOR_BGR2RGB ) )
self.paletteList = [-1 * np.ones( ( 1, 1, 1 ) )]
self.input_image = None # Store it as np array
self.saliency_map = None
self.posterized_image_wo_smooth = -1 * np.ones( ( 1, 1, 1 ) ) # Store it as np array
self.posterized_image_w_smooth = -1 * np.ones( ( 1, 1, 1 ) ) # Store it as np array
#### BOXES
btns_io_box = QHBoxLayout() # set bottons' box for I/O
# algorithm_btns_box = QVBoxLayout() # set bottons' box for algorithms
sld_box_palette = QHBoxLayout()
sld_box_blend = QHBoxLayout()
sld_box_cluster = QHBoxLayout()
sld_box_binary = QHBoxLayout()
toggle_box = QHBoxLayout()
sld_box_blur = QHBoxLayout()
sld_box_window = QHBoxLayout()
btns_posterize_box = QHBoxLayout() # set bottons' box for posterization and reset
sld_r_recolor = QHBoxLayout()
sld_g_recolor = QHBoxLayout()
sld_b_recolor = QHBoxLayout()
blur_box = QHBoxLayout()
img_box = QHBoxLayout() # set image's box
pages_box = QVBoxLayout() # set next-previous box
show_hide_box = QVBoxLayout()
combo_recolor_box = QHBoxLayout()
recolor_btn_box = QHBoxLayout()
#### BUTTONS
# button for selecting an input image
self.img_btn = QPushButton( 'Choose Image' )
self.img_btn.clicked.connect( self.get_image )
self.img_btn.setToolTip( 'Press the button to <b>select</b> an image.' )
self.img_btn.setMaximumWidth( 150 )
# button for posterizing the given image
self.posterize_btn = QPushButton( 'Posterize' )
self.posterize_btn.clicked.connect( self.posterize )
self.posterize_btn.setToolTip( 'Press the button to <b>posterize</b> your image.' )
self.posterize_btn.setMaximumWidth( 110 )
# button for reseting posterization parameters
self.reset_posterize_btn = QPushButton( 'Reset' )
self.reset_posterize_btn.clicked.connect( self.reset_posterize )
self.reset_posterize_btn.setToolTip( 'Press the button to <b>reset</b> all posterization parameters.' )
self.reset_posterize_btn.setMaximumWidth( 110 )
# button for re-smoothing the posterized image
self.smooth_btn = QPushButton( 'Re-Smooth' )
self.smooth_btn.clicked.connect( self.smooth )
self.smooth_btn.setToolTip( 'Press the button to <b>re-smooth</b> your posterized image.' )
self.smooth_btn.setMaximumWidth( 150 )
# button for loading the saliency map
self.map_btn = QPushButton( 'Smooth with Custom Map' )
self.map_btn.clicked.connect( self.pop_up_load_saliency_map )
self.map_btn.setToolTip( 'Press the button to <b>load</b> your own map to blur.' )
self.map_btn.setMaximumWidth( 180 )
# button for saving the posterized image
self.save_btn = QPushButton( 'Save Current Image' )
self.save_btn.clicked.connect( self.save_current_image )
self.save_btn.setToolTip( 'Press the button to <b>save</b> your current image.' )
self.save_btn.setMaximumWidth( 150 )
# button for saving the palette
self.save_palette_btn = QPushButton( 'Save Palette' )
self.save_palette_btn.clicked.connect( self.save_current_palette )
self.save_palette_btn.setToolTip( 'Press the button to <b>save</b> the palette.' )
self.save_palette_btn.setMaximumWidth( 150 )
#### Previous-next buttons
self.previous_btn = QPushButton( 'Previous Posterization' )
self.previous_btn.clicked.connect( self.paste_previous_image )
self.previous_btn.setToolTip( 'Press the button to see your <b>previous</b> image in the gallory.' )
self.previous_btn.setMinimumWidth( 165 )
self.previous_btn.setMaximumWidth( 165 )
self.next_btn = QPushButton( 'Next Posterization' )
self.next_btn.clicked.connect( self.paste_next_image )
self.next_btn.setToolTip( 'Press the button to see your <b>next</b> image in the gallory.' )
self.next_btn.setMinimumWidth( 165 )
self.next_btn.setMaximumWidth( 165 )
#### Show/Hide buttons
self.palette_btn = QPushButton( 'Show/Hide Palette' )
self.palette_btn.clicked.connect( self.show_hide_palette )
self.palette_btn.setToolTip( 'Press the button to <b>show</b> or <b>hide</b> the palette.' )
self.palette_btn.setMinimumWidth( 165 )
self.palette_btn.setMaximumWidth( 165 )
self.og_img_btn = QPushButton( 'Show/Hide Input Image' )
self.og_img_btn.clicked.connect( self.show_hide_input_image )
self.og_img_btn.setToolTip( 'Press the button to <b>show</b> your input image.' )
self.og_img_btn.setMinimumWidth( 165 )
self.og_img_btn.setMaximumWidth( 165 )
#### SLIDERS
# slider for palette size
self.blend_sld = QSlider( Qt.Horizontal )
self.blend_sld.setRange( 0, 15 )
self.blend_sld.setFocusPolicy( Qt.NoFocus )
self.blend_sld.setSliderPosition( self.blend_slider_val )
self.blend_sld.setPageStep( 1 )
self.blend_sld.setToolTip( 'Fine-tune the slider to get your desired main palette size.' )
self.blend_sld.setMinimumWidth( 150 )
self.blend_sld.setMaximumWidth( 200 )
self.blend_sld.valueChanged.connect( self.blend_change_slider )
# slider for palette size
self.palette_sld = QSlider( Qt.Horizontal )
self.palette_sld.setRange( 4, 15 )
self.palette_sld.setFocusPolicy( Qt.NoFocus )
self.palette_sld.setSliderPosition( self.palette_slider_val )
self.palette_sld.setPageStep( 1 )
self.palette_sld.setToolTip( 'Fine-tune the slider to get your desired main palette size.' )
self.palette_sld.setMinimumWidth( 150 )
self.palette_sld.setMaximumWidth( 200 )
self.palette_sld.valueChanged.connect( self.palette_change_slider )
# slider for number of clusters for Kmeans
self.cluster_sld = QSlider( Qt.Horizontal )
self.cluster_sld.setRange( 15, 50 )
self.cluster_sld.setFocusPolicy( Qt.NoFocus )
self.cluster_sld.setSliderPosition( self.cluster_slider_val )
self.cluster_sld.setPageStep( 1 )
self.cluster_sld.setToolTip( 'Fine-tune the slider to get your desired threshold for outlier colors.' )
self.cluster_sld.setMinimumWidth( 150 )
self.cluster_sld.setMaximumWidth( 200 )
self.cluster_sld.valueChanged.connect( self.cluster_change_slider )
# slider for binary penalization
self.binary_sld = QSlider( Qt.Horizontal )
self.binary_sld.setRange( 1, 200 )
self.binary_sld.setFocusPolicy( Qt.NoFocus )
self.binary_sld.setSliderPosition( int( 100 * self.binary_slider_val ) )
self.binary_sld.setPageStep( 1 )
self.binary_sld.setToolTip( 'Fine-tune the slider to get your desired penalization on binary term.' )
self.binary_sld.setMinimumWidth( 150 )
self.binary_sld.setMaximumWidth( 200 )
self.binary_sld.valueChanged.connect( self.binary_change_slider )
# slider for blurring threshold
self.blur_sld = QSlider( Qt.Horizontal )
self.blur_sld.setRange( 0, 100 )
self.blur_sld.setFocusPolicy( Qt.NoFocus )
self.blur_sld.setSliderPosition( int( 100 * self.blur_slider_val ) )
self.blur_sld.setPageStep( 1 )
self.blur_sld.setToolTip( 'Fine-tune the slider to get your desired blurring threshold.' )
self.blur_sld.setMinimumWidth( 150 )
self.blur_sld.setMaximumWidth( 200 )
self.blur_sld.valueChanged.connect( self.blur_change_slider )
# slider for blurring threshold
self.blur_window_sld = QSlider( Qt.Horizontal )
self.blur_window_sld.setRange( 0, 3 )
self.blur_window_sld.setFocusPolicy( Qt.NoFocus )
self.blur_window_sld.setSliderPosition( ( self.blur_window_slider_val - 3 ) / 2 )
self.blur_window_sld.setPageStep( 1 )
self.blur_window_sld.setToolTip( 'Fine-tune the slider to get your desired blurring window size.' )
self.blur_window_sld.setMinimumWidth( 150 )
self.blur_window_sld.setMaximumWidth( 200 )
self.blur_window_sld.valueChanged.connect( self.blur_window_change_slider )
### LABELS
self.switch_text = QLabel( 'Downsampled Version? ' )
self.switch_text.setAlignment( Qt.AlignLeft )
# labels
self.blur_window_text = QLabel( 'Boundary smoothess (Default: 7):' )
self.blur_text = QLabel( 'Detail abstraction (Default: 0.1): ' )
self.binary_text = QLabel( 'Region clumpiness (Default: 0.8): ' )
self.cluster_text = QLabel( 'Rare color suppression (Default: 20):' )
self.palette_text = QLabel( 'Palette size (Default: 6): ' )
self.blend_text = QLabel( 'Palette blends (Default: 3): ' )
self.blur_window_text.setMaximumWidth( 250 )
self.blur_text.setMaximumWidth( 250 )
self.binary_text.setMaximumWidth( 250 )
self.cluster_text.setMaximumWidth( 250 )
self.palette_text.setMaximumWidth( 250 )
self.blend_text.setMaximumWidth( 250 )
# label text for blur slider
self.blur_window_sld_label = QLabel( '7' )
self.blur_window_sld_label.setAlignment( Qt.AlignLeft )
self.blur_window_sld_label.setMinimumWidth( 80 )
self.blur_sld_label = QLabel( '0.1' )
self.blur_sld_label.setAlignment( Qt.AlignLeft )
self.blur_sld_label.setMinimumWidth( 80 )
# label text for binary penalization slider
self.binary_sld_label = QLabel( '0.8' )
self.binary_sld_label.setAlignment( Qt.AlignLeft )
self.binary_sld_label.setMinimumWidth( 80 )
# label text for kmeans cluster slider
self.cluster_sld_label = QLabel( '20' )
self.cluster_sld_label.setAlignment( Qt.AlignLeft )
self.cluster_sld_label.setMinimumWidth( 80 )
# label text for palette size slider
self.palette_sld_label = QLabel( '6' )
self.palette_sld_label.setAlignment( Qt.AlignLeft )
self.palette_sld_label.setMinimumWidth( 80 )
# label text for blending way slider
self.blend_sld_label = QLabel( '3' )
self.blend_sld_label.setAlignment( Qt.AlignLeft )
self.blend_sld_label.setMinimumWidth( 80 )
####
### combo boxes for recoloring
####
self.combobox = QComboBox(self)
self.combobox.setMaximumWidth(100)
self.combotext = QLabel( 'Choose color: ' )
self.r_slider_val = 0
self.g_slider_val = 0
self.b_slider_val = 0
self.rgb_text = QLabel( 'Recolor the image via its palette:' )
self.rgb_text.setMaximumWidth( 250 )
self.r_sld_label = QLabel( '0' )
self.r_sld_label.setAlignment( Qt.AlignLeft )
self.r_sld_label.setMinimumWidth( 80 )
self.r_sld_text_label = QLabel( 'R:' )
self.r_sld_text_label.setAlignment( Qt.AlignLeft )
self.g_sld_label = QLabel( '0' )
self.g_sld_label.setAlignment( Qt.AlignLeft )
self.g_sld_label.setMinimumWidth( 80 )
self.g_sld_text_label = QLabel( 'G:' )
self.g_sld_text_label.setAlignment( Qt.AlignRight )
self.b_sld_label = QLabel( '0' )
self.b_sld_label.setAlignment( Qt.AlignLeft )
self.b_sld_label.setMinimumWidth( 80 )
self.b_sld_text_label = QLabel( 'B:' )
self.b_sld_text_label.setAlignment( Qt.AlignRight )
# slider for palette recoloring
self.r_sld = QSlider( Qt.Horizontal )
self.r_sld.setRange( 0, 255 )
self.r_sld.setFocusPolicy( Qt.NoFocus )
self.r_sld.setSliderPosition( self.r_slider_val )
self.r_sld.setPageStep( 1 )
self.r_sld.setToolTip( 'Fine-tune the slider to get your desired recoloring for R-channel.' )
self.r_sld.setMinimumWidth( 150 )
self.r_sld.setMaximumWidth( 200 )
self.r_sld.valueChanged.connect( self.r_change_slider )
self.g_sld = QSlider( Qt.Horizontal )
self.g_sld.setRange( 0, 255 )
self.g_sld.setFocusPolicy( Qt.NoFocus )
self.g_sld.setSliderPosition( self.g_slider_val )
self.g_sld.setPageStep( 1 )
self.g_sld.setToolTip( 'Fine-tune the slider to get your desired recoloring for G-channel.' )
self.g_sld.setMinimumWidth( 150 )
self.g_sld.setMaximumWidth( 200 )
self.g_sld.valueChanged.connect( self.g_change_slider )
self.b_sld = QSlider( Qt.Horizontal )
self.b_sld.setRange( 0, 255 )
self.b_sld.setFocusPolicy( Qt.NoFocus )
self.b_sld.setSliderPosition( self.b_slider_val )
self.b_sld.setPageStep( 1 )
self.b_sld.setToolTip( 'Fine-tune the slider to get your desired recoloring for B-channel.' )
self.b_sld.setMinimumWidth( 150 )
self.b_sld.setMaximumWidth( 200 )
self.b_sld.valueChanged.connect( self.b_change_slider )
self.recolor_btn = QPushButton( 'Reset Current Color' )
self.recolor_btn.clicked.connect( self.reset_current_recoloring )
self.recolor_btn.setToolTip( 'Press the button to <b>reset</b> the current palette color.' )
self.recolor_btn.setMinimumWidth( 150 )
self.recolor_btn.setMaximumWidth( 150 )
self.undo_recolor_btn = QPushButton( 'Reset All Colors' )
self.undo_recolor_btn.clicked.connect( self.reset_all_recoloring )
self.undo_recolor_btn.setToolTip( 'Press the button to <b>undo</b> your all previous recolorings.' )
self.undo_recolor_btn.setMinimumWidth( 150 )
self.undo_recolor_btn.setMaximumWidth( 150 )
### BOX FRAMES
btns_io_box.addWidget( self.img_btn )
btns_io_box.addWidget( self.save_btn )
btns_io_box.addWidget( self.save_palette_btn )
btns_io_box.addStretch(40)
# Separate boxes for parameters
sld_box_palette.addWidget( self.palette_text )
sld_box_palette.addWidget( self.palette_sld )
sld_box_palette.addWidget( self.palette_sld_label )
sld_box_palette.addStretch(8)
sld_box_blend.addWidget( self.blend_text )
sld_box_blend.addWidget( self.blend_sld )
sld_box_blend.addWidget( self.blend_sld_label )
sld_box_blend.addStretch(8)
sld_box_cluster.addWidget( self.cluster_text )
sld_box_cluster.addWidget( self.cluster_sld )
sld_box_cluster.addWidget( self.cluster_sld_label )
sld_box_cluster.addStretch(8)
sld_box_binary.addWidget( self.binary_text )
sld_box_binary.addWidget( self.binary_sld )
sld_box_binary.addWidget( self.binary_sld_label )
sld_box_binary.addStretch(8)
toggle_box.addWidget( self.switch_text )
toggle_box.addWidget( self.switch )
toggle_box.addStretch(8)
btns_posterize_box.addWidget( self.posterize_btn )
btns_posterize_box.addWidget( self.reset_posterize_btn )
btns_posterize_box.addStretch(8)
sld_box_blur.addWidget( self.blur_text )
sld_box_blur.addWidget( self.blur_sld )
sld_box_blur.addWidget( self.blur_sld_label )
sld_box_blur.addStretch(8)
sld_box_window.addWidget( self.blur_window_text )
sld_box_window.addWidget( self.blur_window_sld )
sld_box_window.addWidget( self.blur_window_sld_label )
sld_box_window.addStretch(8)
# blur box for re-smooth and smooth by map
blur_box.addWidget( self.smooth_btn )
blur_box.addWidget( self.map_btn )
blur_box.addStretch(8)
# recoloring box
combo_recolor_box.addWidget( self.combotext )
combo_recolor_box.addWidget( self.combobox )
combo_recolor_box.addStretch(8)
sld_r_recolor.addWidget( self.r_sld_text_label )
sld_r_recolor.addWidget( self.r_sld )
sld_r_recolor.addWidget( self.r_sld_label )
sld_r_recolor.addStretch(8)
sld_g_recolor.addWidget( self.g_sld_text_label )
sld_g_recolor.addWidget( self.g_sld )
sld_g_recolor.addWidget( self.g_sld_label )
sld_g_recolor.addStretch(8)
sld_b_recolor.addWidget( self.b_sld_text_label )
sld_b_recolor.addWidget( self.b_sld )
sld_b_recolor.addWidget( self.b_sld_label )
sld_b_recolor.addStretch(8)
recolor_btn_box.addWidget( self.recolor_btn )
recolor_btn_box.addWidget( self.undo_recolor_btn )
recolor_btn_box.addStretch(8)
# Image box
img_box.addStretch(1)
img_box.addWidget( self.paletteLabel )
img_box.addStretch(1)
img_box.addWidget( self.imageLabel )
img_box.addStretch(4)
# Previous-next box
pages_box.addWidget( self.previous_btn )
show_hide_box.addWidget( self.next_btn )
# Show-hide box
pages_box.addWidget( self.palette_btn )
show_hide_box.addWidget( self.og_img_btn )
# Set grid layout
grid = QGridLayout()
grid.setSpacing(12)
grid.addLayout( btns_io_box, 0, 0 )
### parameters for posterization
grid.addLayout( sld_box_palette, 1, 0 )
grid.addLayout( sld_box_blend, 2, 0 )
grid.addLayout( sld_box_cluster, 3, 0 )
grid.addLayout( sld_box_binary, 4, 0 )
grid.addLayout( toggle_box, 5, 0 )
grid.addLayout( btns_posterize_box, 6, 0 )
### parameters for smoothing
grid.addLayout( sld_box_blur, 8, 0 )
grid.addLayout( sld_box_window, 9, 0 )
grid.addLayout( blur_box, 10, 0 )
### boxes for previous/next and show/hide
grid.addLayout( pages_box, 0, 10 )
grid.addLayout( show_hide_box, 0, 11 )
### sliders for recoloring
grid.addWidget( self.rgb_text, 12, 0 )
grid.addLayout( combo_recolor_box, 13, 0 )
grid.addLayout( sld_r_recolor, 14, 0 )
grid.addLayout( sld_g_recolor, 15, 0 )
grid.addLayout( sld_b_recolor, 16, 0 )
grid.addLayout( recolor_btn_box, 17, 0 )
grid.addLayout( img_box, 1, 1, 19, 19 )
self.setLayout(grid)
self.show()
### Recoloring functions
def set_rgb_slider( self, color ):
color = color * 255.
self.r_change_slider( int( color[0] ) )
self.g_change_slider( int( color[1] ) )
self.b_change_slider( int( color[2] ) )
self.r_sld.setSliderPosition( int( color[0] ) )
self.g_sld.setSliderPosition( int( color[1] ) )
self.b_sld.setSliderPosition( int( color[2] ) )
def onActivated( self, text ):
color_indx = int( text ) - 1
color = self.palette_recolor[ color_indx ]
self.set_rgb_slider( color )
def set_combo_icon( self ):
self.combobox.clear() # reset combo box
for i in range( len( self.palette_recolor ) ):
self.combobox.addItem( str( i + 1 ) )
self.combobox.activated[str].connect( self.onActivated )
default_color = self.palette_recolor[0]
self.set_rgb_slider( default_color )
def get_recolor_img_and_palette( self ):
#recolor_img = ( self.weights_per_pixel @ self.palette_recolor ).reshape( self.input_image.shape )
# fix it for odd resolution when downsampled version applied
if self.input_image.shape[0] % 2 == 1 and self.switch.isChecked():
w = self.input_image.shape[0] + 1
else:
w = self.input_image.shape[0]
if self.input_image.shape[1] % 2 == 1 and self.switch.isChecked():
h = self.input_image.shape[1] + 1
else:
h = self.input_image.shape[1]
recolor_smooth_img = np.clip( 0, 255, ( self.weights_per_pixel_smooth @ self.palette_recolor ).reshape( ( w, h, 3 ) ) * 255. ).astype( np.uint8 )
#recolor_smooth_img = post_smoothing( PIL.Image.fromarray( np.clip( 0, 255, recolor_img * 255. ).astype( np.uint8 ), 'RGB' ),
#self.blur_slider_val, blur_window = self.blur_window_slider_val )
new_palette = np.ascontiguousarray( np.clip( 0, 255, simplepalettes.palette2swatch( self.palette_recolor ) *
255. ).astype( np.uint8 ).transpose( ( 1, 0, 2 ) ) )
return recolor_smooth_img, new_palette
def recolor_via_palette( self ):
color_indx = int( self.combobox.currentText() ) - 1
r_value = self.r_sld.value()
g_value = self.g_sld.value()
b_value = self.b_sld.value()
self.palette_recolor[ color_indx ] = np.array([ r_value, g_value, b_value ]) / 255.
recolor_img, new_palette = self.get_recolor_img_and_palette()
self.add_to_paletteList( new_palette )
self.add_to_imageList( recolor_img )
self.set_image( self.imageLabel, self.imageList[-1] )
self.set_image( self.paletteLabel, self.paletteList[-1] )
# update current index position
self.current_image_indx = len( self.imageList ) - 1
def reset_current_recoloring( self ):
if self.posterized_image_wo_smooth[0][0][0] == -1:
QMessageBox.warning( self, 'Warning', 'Please posterize your image first' )
else:
# visualization for current combox text
color_indx = int( self.combobox.currentText() ) - 1
current_color = self.palette_og[ color_indx ]
self.set_rgb_slider( current_color )
self.palette_recolor[ color_indx ] = self.palette_og[ color_indx ].copy()
recolor_img, new_palette = self.get_recolor_img_and_palette()
self.add_to_paletteList( new_palette )
self.add_to_imageList( recolor_img )
self.set_image( self.imageLabel, self.imageList[-1] )
self.set_image( self.paletteLabel, self.paletteList[-1] )
# update current index position
self.current_image_indx = len( self.imageList ) - 1
def reset_all_recoloring( self ):
if self.posterized_image_wo_smooth[0][0][0] == -1:
QMessageBox.warning( self, 'Warning', 'Please posterize your image first' )
else:
# visualization for current combox text
color_indx = int( self.combobox.currentText() ) - 1
current_color = self.palette_og[ color_indx ]
self.set_rgb_slider( current_color )
self.palette_recolor = self.palette_og.copy()
self.add_to_paletteList( self.palette )
self.add_to_imageList( self.posterized_image_w_smooth )
self.set_image( self.imageLabel, self.imageList[-1] )
self.set_image( self.paletteLabel, self.paletteList[-1] )
# update current index position
self.current_image_indx = len( self.imageList ) - 1
### Reset for posterization parameters
def reset_posterize( self ):
self.binary_change_slider( 80 )
self.cluster_change_slider( 20 )
self.palette_change_slider( 6 )
self.blend_change_slider( 3 )
self.binary_sld.setSliderPosition( 80 )
self.cluster_sld.setSliderPosition( 20 )
self.palette_sld.setSliderPosition( 6 )
self.blend_sld.setSliderPosition( 3 )
self.binary_sld.repaint()
self.cluster_sld.repaint()
self.palette_sld.repaint()
self.blend_sld.repaint()
### Slider functions
def r_change_slider(self, value):
self.r_slider_val = value
self.r_sld_label.setText( str( value ) )
if self.live_smoothing: self.recolor_via_palette()
def g_change_slider(self, value):
self.g_slider_val = value
self.g_sld_label.setText( str( value ) )
if self.live_smoothing: self.recolor_via_palette()
def b_change_slider(self, value):
self.b_slider_val = value
self.b_sld_label.setText( str( value ) )
if self.live_smoothing: self.recolor_via_palette()
def blur_window_change_slider(self, value):
self.blur_window_slider_val = 2 * value + 3
self.blur_window_sld_label.setText( str( 2 * value + 3 ) )
if self.live_smoothing: self.smooth()
def blur_change_slider(self, value):
self.blur_slider_val = value / 100
self.blur_sld_label.setText( str( value / 100 ) )
if self.live_smoothing: self.smooth()
def binary_change_slider(self, value):
self.binary_slider_val = value / 100
self.binary_sld_label.setText( str( value / 100 ) )
def cluster_change_slider(self, value):
self.cluster_slider_val = value
self.cluster_sld_label.setText( str( value ) )
def palette_change_slider(self, value):
self.palette_slider_val = value
self.palette_sld_label.setText( str( value ) )
def blend_change_slider(self, value):
self.blend_slider_val = value
self.blend_sld_label.setText( str( value ) )
# Function for selecting an input image
def get_image( self ):
img = QFileDialog.getOpenFileName( self, 'Select file' )
if img:
path = img[0]
self.load_image( path )
else:
QMessageBox.warning( self, 'Warning' , 'No file selected.' )
def paste_previous_image( self ):
self.current_image_indx -= 1
if self.current_image_indx == -2:
QMessageBox.warning( self,'Warning','Please select an image first.' )
self.current_image_indx += 1
elif self.current_image_indx == -1:
QMessageBox.warning( self,'Warning','No more previous image.' )
self.current_image_indx += 1
else:
if self.current_image_indx != 0 and self.show_palette == 1:
self.set_image( self.paletteLabel, self.paletteList[self.current_image_indx] )
else: # input image has no palette, so place a blank
self.paletteLabel.setPixmap( QPixmap() )
self.paletteLabel.repaint()
self.set_image( self.imageLabel, self.imageList[self.current_image_indx] )
def paste_next_image( self ):
self.current_image_indx += 1
if self.current_image_indx == 0:
QMessageBox.warning( self,'Warning','Please select an image first.' )
self.current_image_indx -= 1
elif self.current_image_indx == len( self.imageList ):
QMessageBox.warning( self,'Warning','No more next image.' )
self.current_image_indx -= 1
else:
if self.current_image_indx != 0 and self.show_palette == 1:
self.set_image( self.paletteLabel, self.paletteList[self.current_image_indx] )
else: # input image has no palette, so place a blank
self.paletteLabel.setPixmap( QPixmap() )
self.paletteLabel.repaint()
self.set_image( self.imageLabel, self.imageList[self.current_image_indx] )
#Load new image function
def set_image( self, panel, image ):
#Load the image into the label
height, width, dim = image.shape
qim = QImage( image.data, width, height, 3 * width, QImage.Format_RGB888 )
panel.setPixmap( QPixmap( qim ) )
panel.repaint()
def add_to_imageList( self, image ):
self.imageList.append( np.asarray( image ) )
def add_to_paletteList( self, palette ):
self.paletteList.append( np.asarray( palette ) )
def load_image( self, path ):
print ( "Loading Image." )
self.imageList = [] # initialized back to empty when giving another input image
self.paletteList = [-1 * np.ones( ( 1, 1, 1 ) )]
self.paletteLabel.setPixmap( QPixmap() )
# push input image in the list
self.current_image_indx += 1
self.input_image = cv2.cvtColor( cv2.imread( path ), cv2.COLOR_BGR2RGB )
self.add_to_imageList( self.input_image )
self.imageLabel.setPixmap( QPixmap( path ) )
self.imagePath = path
def show_hide_palette( self ):
#if self.imagePath == "":
# QMessageBox.warning( self, 'Warning', 'Please select an image first.' )
if self.paletteList[-1][0, 0, 0] == -1:
QMessageBox.warning( self, 'Warning', 'You do not have palette. Please posterize the image first.' )
else:
self.show_palette = 1 - self.show_palette
if self.current_image_indx != 0 and self.show_palette == 1:
self.set_image( self.paletteLabel, self.paletteList[self.current_image_indx] )
else: # input image has no palette, so place a blank
self.paletteLabel.setPixmap( QPixmap() )
def show_hide_input_image( self ):
#if self.imagePath == "":
# QMessageBox.warning( self, 'Warning', 'Please select an image first.' )
if self.posterized_image_wo_smooth[0][0][0] == -1:
QMessageBox.warning( self, 'Warning', 'This is your input image.' )
else:
self.show_input = 1 - self.show_input
if self.show_input == 1:
self.set_image( self.imageLabel, self.imageList[0] )
else:
self.set_image( self.imageLabel, self.imageList[self.current_image_indx] )
# posterization
def posterize( self ):
#if self.imagePath == "":
# QMessageBox.warning( self, 'Warning', 'Please select an image first.' )
#else:
if self.imagePath == "":
img_arr = np.asfarray( PIL.Image.open( self.welcome_img_path ).convert( 'RGB' ) ) / 255.
self.input_image = img_arr
path = self.welcome_img_path
else:
img_arr = np.asfarray( PIL.Image.open( self.imagePath ).convert( 'RGB' ) ) / 255.
path = self.imagePath
width, height, dim = img_arr.shape
length = max( width, height )
self.message = "This image has size " + str( height ) + ' x ' + str( width ) + '.\n\n'
if length >= 1800:
self.message += 'This is a large image and may take more than 8 mins to process.\n' + 'We suggest you posterize a downsized version to select appropriate parameters or vectorize the output.\n\n'
else:
if 500 < length < 600:
self.waitingtime = 2
elif 600 < length < 1000:
self.waitingtime = 3
elif 1000 <= length:
self.waitingtime = 4
self.message += 'This will take roughly ' + str( self.waitingtime ) + ' minutes to process.\n\n'
reply = QMessageBox.question( self, 'Message', self.message + 'Do you want to proceed and posterize the image?',
QMessageBox.Yes | QMessageBox.No, QMessageBox.No )
if reply == QMessageBox.Yes:
print( "Start posterizing." )
# algorithm starts
start = time.time()
messagebox = TimerMessageBox( 1, self )
messagebox.open()
# if downsampled version is selected, downsize the input and divide the penality by 2
if self.switch.isChecked():
print( 'Downsampled version selected.' )
img_arr = rescale( img_arr, 0.5, order=0, multichannel=True , anti_aliasing=False )
self.binary_slider_val /= 2
# K-means
img_arr_re = img_arr.reshape( ( -1, 3 ) )
img_arr_cluster = get_kmeans_cluster_image( self.cluster_slider_val, img_arr_re, img_arr.shape[0], img_arr.shape[1] )
# MLO
post_img, final_colors, add_mix_layers, palette = \
posterization( path, img_arr, img_arr_cluster, self.palette_slider_val, self.blend_slider_val, self.binary_slider_val )
if self.switch.isChecked():
### 'Greyscale' might fail in this case since the palette size for greyscale is 2.
new_am = add_mix_layers.reshape( ( post_img.shape[0], post_img.shape[1], self.palette_slider_val ) )
self.weights_per_pixel = rescale( new_am, 2, order=0, multichannel=True, anti_aliasing=False ).reshape( -1, self.palette_slider_val )
else:
self.weights_per_pixel = add_mix_layers # save weight list per pixel
# save palette
# 'ascontiguousarray' to make a C contiguous copy
self.palette = np.ascontiguousarray( np.clip( 0, 255, simplepalettes.palette2swatch( palette ) * 255. ).astype( np.uint8 ).transpose( ( 1, 0, 2 ) ) )
if self.switch.isChecked():
post_img = rescale( post_img, 2, order=0, multichannel=True, anti_aliasing=False )
self.posterized_image_wo_smooth = np.clip( 0, 255, post_img*255. ).astype( np.uint8 )
self.posterized_image_wo_smooth = cv2.medianBlur( self.posterized_image_wo_smooth, 5 )
else:
self.posterized_image_wo_smooth = np.clip( 0, 255, post_img * 255. ).astype( np.uint8 )
# convert to uint8 format
#self.posterized_image_wo_smooth = np.clip( 0, 255, post_img * 255. ).astype( np.uint8 )
# make a map from unique colors to weights
unique_colors, unique_indices = np.unique( self.posterized_image_wo_smooth.reshape( -1, 3 ), return_index = True, axis = 0 )
color2weights = {}
for col, index in zip( unique_colors, unique_indices ):
weights = self.weights_per_pixel[ index ]
color2weights[ tuple( col ) ] = weights
# post-smoothing
self.posterized_image_w_smooth = post_smoothing( PIL.Image.fromarray( self.posterized_image_wo_smooth, 'RGB' ), self.blur_slider_val, blur_window = self.blur_window_slider_val )
# pass smoothing along to the weights
self.weights_per_pixel_smooth = self.weights_per_pixel.copy()
for col, weights in color2weights.items():
#color_mask = ( self.posterized_image_w_smooth.reshape( -1, 3 ) == np.array( col ) [None,:] ).all()
color_mask = np.where( np.all( self.posterized_image_w_smooth.reshape( -1, 3 ) == np.array( col ), axis = 1 ) )[0]
self.weights_per_pixel_smooth[ color_mask ] = weights
self.weights_per_pixel_smooth.shape = self.weights_per_pixel.shape
### setting for recoloring
self.palette_recolor = palette # save for palette recoloring
self.palette_og = self.palette_recolor.copy()
self.set_combo_icon()
end = time.time()
print( "Finished. Total time: ", end - start )
self.add_to_paletteList( self.palette )
self.add_to_imageList( self.posterized_image_w_smooth )
self.set_image( self.imageLabel, self.imageList[-1] )
self.set_image( self.paletteLabel, self.paletteList[-1] )
# update current index position
self.current_image_indx = len( self.imageList ) - 1
else:
pass
# re-smooth the image
def smooth( self ):
#if self.imagePath == "":
# QMessageBox.warning( self,'Warning','Please select an image first!' )
#else:
if self.posterized_image_wo_smooth[0][0][0] == -1:
QMessageBox.warning( self, 'Warning', 'Please posterize your image first' )
else:
print( "Start smoothing." )
#messagebox = TimerMessageBox( 1, self )
#messagebox.open()
self.posterized_image_w_smooth = post_smoothing( PIL.Image.fromarray( self.posterized_image_wo_smooth, 'RGB' ), self.blur_slider_val, blur_window = self.blur_window_slider_val )
print( "Smoothing Finished." )
self.add_to_paletteList( self.paletteList[-1] )
self.add_to_imageList( self.posterized_image_w_smooth )
self.set_image( self.imageLabel, self.imageList[-1] )
# update current index position
self.current_image_indx = len( self.imageList ) - 1
# function to save current image
def save_current_image( self ):
#if self.imagePath == "":
# QMessageBox.warning( self,'Warning','Please select an image first.' )
#else:
if self.posterized_image_wo_smooth[0][0][0] == -1:
QMessageBox.warning( self, 'Warning', 'Please posterize your image first.' )
else:
reply = QMessageBox.question( self, 'Message', "Are you sure to save your current image on this panel?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No )
if reply == QMessageBox.Yes:
image_name = QFileDialog.getSaveFileName( self, 'Save Image' )
if not image_name:
return
if image_name[0][-4:] in ['.jpg', '.png']:
path_name = image_name[0]
else:
path_name = image_name[0] + '.png'
Image.fromarray( self.imageList[self.current_image_indx] ).save( path_name )
else:
pass
# function to save current image
def save_current_palette( self ):
#if self.imagePath == "":
# QMessageBox.warning( self,'Warning','Please select an image first.' )
#else:
if self.posterized_image_wo_smooth[0][0][0] == -1:
QMessageBox.warning( self, 'Warning', 'Please posterize your image first.' )
else:
reply = QMessageBox.question( self, 'Message', "Are you sure to save your current palette on this panel?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No )
if reply == QMessageBox.Yes:
image_name = QFileDialog.getSaveFileName( self, 'Save Palette' )
if not image_name:
return
if image_name[0][-4:] in ['.jpg', '.png']:
path_name = image_name[0]
else:
path_name = image_name[0] + '.png'
Image.fromarray( self.paletteList[self.current_image_indx] ).save( path_name )
else:
pass
# load user's own blurring map
def pop_up_load_saliency_map( self ):
#if self.imagePath == "":
# QMessageBox.warning( self,'Warning','Please select an image first.' )
#else:
if self.posterized_image_wo_smooth[0][0][0] == -1:
QMessageBox.warning( self, 'Warning', 'Please posterize your image first.' )
else:
reply = QMessageBox.question( self, 'Message', "Do you have your own blurring map (in grayscale and in .jpg/.png extension)?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No )
if reply == QMessageBox.Yes:
map = QFileDialog.getOpenFileName( self, 'Select file' )
map_path = map[0]
if map_path[-4:] not in ['.jpg', '.png']:
QMessageBox.warning( self, 'Warning', 'Please upload your map with .jpg or .png extension.' )
return
self.saliency_map = cv2.imread( map[0] ) / 255
h_s, w_s, dim_s = self.saliency_map.shape
h_i, w_i, dim_i = self.input_image.shape
if ( h_i, w_i ) != ( h_s, w_s ):
QMessageBox.warning( self, 'Warning', 'Please upload your map with size:\n\n ' + ' ' + str( h_i ) + ' x ' + str( w_i ) + '\n\n' + 'You upload the map with size:\n\n ' + ' ' + str( h_s ) + ' x ' + str( w_s ) )
return
if not np.array_equal( self.saliency_map[:,:,0], self.saliency_map[:,:,1] ) or not np.array_equal( self.saliency_map[:,:,1], self.saliency_map[:,:,2] ):
QMessageBox.warning( self, 'Warning', 'Please upload your map with grayscale.' )
return
print( "Start smoothing." )
messagebox = TimerMessageBox( 1, self )
messagebox.open()
self.posterized_image_w_smooth = post_smoothing(
PIL.Image.fromarray( self.posterized_image_wo_smooth, 'RGB' ),
self.blur_slider_val,
blur_window = self.blur_window_slider_val,
blur_map = self.saliency_map[:, :, 0]
)
print( "Smoothing Finished." )
self.add_to_paletteList( self.paletteList[-1] )
self.add_to_imageList( self.posterized_image_w_smooth )
self.set_image( self.imageLabel, self.imageList[-1] )
# update current index position
self.current_image_indx = len( self.imageList ) - 1
# Function if users tend to close the app
def closeEvent( self, event ):
reply = QMessageBox.question( self, 'Message', "Are you sure you want to quit?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No )
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def main():
app = QApplication( sys.argv )
ex = MainWindow()
sys.exit( app.exec_() )
if __name__ == '__main__':
main()
| [
"numpy.clip",
"PIL.Image.fromarray",
"numpy.ones",
"pathlib.Path",
"numpy.asarray",
"cv2.medianBlur",
"numpy.array",
"numpy.array_equal",
"qtwidgets.Toggle",
"skimage.transform.rescale",
"cv2.imread"
] | [((2289, 2297), 'qtwidgets.Toggle', 'Toggle', ([], {}), '()\n', (2295, 2297), False, 'from qtwidgets import Toggle\n'), ((3647, 3665), 'numpy.ones', 'np.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (3654, 3665), True, 'import numpy as np\n'), ((3744, 3762), 'numpy.ones', 'np.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (3751, 3762), True, 'import numpy as np\n'), ((24825, 24862), 'numpy.array', 'np.array', (['[r_value, g_value, b_value]'], {}), '([r_value, g_value, b_value])\n', (24833, 24862), True, 'import numpy as np\n'), ((31821, 31838), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (31831, 31838), True, 'import numpy as np\n'), ((31930, 31949), 'numpy.asarray', 'np.asarray', (['palette'], {}), '(palette)\n', (31940, 31949), True, 'import numpy as np\n'), ((32374, 32390), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (32384, 32390), False, 'import cv2\n'), ((3370, 3403), 'cv2.imread', 'cv2.imread', (['self.welcome_img_path'], {}), '(self.welcome_img_path)\n', (3380, 3403), False, 'import cv2\n'), ((3471, 3489), 'numpy.ones', 'np.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (3478, 3489), True, 'import numpy as np\n'), ((32166, 32184), 'numpy.ones', 'np.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (32173, 32184), True, 'import numpy as np\n'), ((35856, 35926), 'skimage.transform.rescale', 'rescale', (['img_arr', '(0.5)'], {'order': '(0)', 'multichannel': '(True)', 'anti_aliasing': '(False)'}), '(img_arr, 0.5, order=0, multichannel=True, anti_aliasing=False)\n', (35863, 35926), False, 'from skimage.transform import rescale\n'), ((37293, 37362), 'skimage.transform.rescale', 'rescale', (['post_img', '(2)'], {'order': '(0)', 'multichannel': '(True)', 'anti_aliasing': '(False)'}), '(post_img, 2, order=0, multichannel=True, anti_aliasing=False)\n', (37300, 37362), False, 'from skimage.transform import rescale\n'), ((37518, 37568), 'cv2.medianBlur', 'cv2.medianBlur', (['self.posterized_image_wo_smooth', '(5)'], {}), '(self.posterized_image_wo_smooth, 5)\n', (37532, 37568), False, 'import cv2\n'), ((1982, 1996), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1986, 1996), False, 'from pathlib import Path\n'), ((44178, 44196), 'cv2.imread', 'cv2.imread', (['map[0]'], {}), '(map[0])\n', (44188, 44196), False, 'import cv2\n'), ((36734, 36801), 'skimage.transform.rescale', 'rescale', (['new_am', '(2)'], {'order': '(0)', 'multichannel': '(True)', 'anti_aliasing': '(False)'}), '(new_am, 2, order=0, multichannel=True, anti_aliasing=False)\n', (36741, 36801), False, 'from skimage.transform import rescale\n'), ((37416, 37449), 'numpy.clip', 'np.clip', (['(0)', '(255)', '(post_img * 255.0)'], {}), '(0, 255, post_img * 255.0)\n', (37423, 37449), True, 'import numpy as np\n'), ((37639, 37672), 'numpy.clip', 'np.clip', (['(0)', '(255)', '(post_img * 255.0)'], {}), '(0, 255, post_img * 255.0)\n', (37646, 37672), True, 'import numpy as np\n'), ((41955, 42011), 'PIL.Image.fromarray', 'Image.fromarray', (['self.imageList[self.current_image_indx]'], {}), '(self.imageList[self.current_image_indx])\n', (41970, 42011), False, 'from PIL import Image\n'), ((43052, 43110), 'PIL.Image.fromarray', 'Image.fromarray', (['self.paletteList[self.current_image_indx]'], {}), '(self.paletteList[self.current_image_indx])\n', (43067, 43110), False, 'from PIL import Image\n'), ((44698, 44768), 'numpy.array_equal', 'np.array_equal', (['self.saliency_map[:, :, 0]', 'self.saliency_map[:, :, 1]'], {}), '(self.saliency_map[:, :, 0], self.saliency_map[:, :, 1])\n', (44712, 44768), True, 'import numpy as np\n'), ((44774, 44844), 'numpy.array_equal', 'np.array_equal', (['self.saliency_map[:, :, 1]', 'self.saliency_map[:, :, 2]'], {}), '(self.saliency_map[:, :, 1], self.saliency_map[:, :, 2])\n', (44788, 44844), True, 'import numpy as np\n'), ((38893, 38906), 'numpy.array', 'np.array', (['col'], {}), '(col)\n', (38901, 38906), True, 'import numpy as np\n')] |
import pandas as pd
import re
from IPython.display import display
import matplotlib.pyplot as plt
import numpy as np
import os
from pathlib import Path
from glob import glob
import black
import natsort
import operator
# from matplotlib.pyplot import cm
import matplotlib as mpl
from constants import *
from matplotlib.lines import Line2D
plt.style.use("style.mplstyle") #matplotlib style sheet
avg_parameter = 3
background_colour = ["r","b","k", "m"]
double_size = 8
layer_start, layer_end = 1,5 # setting of layers for graphs from layer_name
output_files = "output_files"
layer_name = ["Serial", "MPIIO", "PHDF5", "ADIOS2_HDF5", "ADIOS2_BP4"]
def dict_output(
filename, layer_name, array_size
): # function to return array, time, rate from CSV file
mydata = pd.read_csv(
filename,
index_col=False,
skiprows=0,
names=[
"Fields",
"Layername",
"ArraySize",
"NumNodes",
"AverageTime",
"AverageRate"
]
)
"""
Set array filter
"""
num_nodes = mydata.NumNodes[(mydata.Layername == layer_name) & (mydata.ArraySize == array_size)] # number of nodes
rate = mydata.AverageRate[(mydata.Layername == layer_name) & (mydata.ArraySize == array_size)] # Gb/s
return rate, num_nodes
def average_dir(dir, min_size, max_size):
rate = []
rate_avg = []
ranks = []
Global_size = []
rate_persize = []
"""
Obtain paths for subdirectories
"""
dirFiles = glob(
f"{os.getcwd()}/output_dirs/{dir}/*/"
) # assign full output directories for csv file
output_dirs = natsort.natsorted(dirFiles)
# N_size = 2 ** 8 # only checking 2^8 cube len
for layers in range(layer_start, layer_end):
for size in range(min_size,max_size):
cube_len = 2 ** size
for i in range(len(output_dirs)):
trail = os.path.basename(
os.path.normpath(output_dirs[i])
) # gives name of cores, and processors
if "_v1" in trail: # start with first version of file, iterate from then.
rate_avg = 0
avgd_num = 0
for job_num in range(1,avg_parameter+1): #for v1,v2,v3
avgd_dir = output_dirs[i].replace("v1",f"v{job_num}") # replace v1 with v2,v3? Preceeding path can stay same.
avgd_file = f"{avgd_dir}output.csv"
if os.path.isfile(avgd_file): #check if v1, v2, v3 exist for that node config
avgd_num += 1 # accurate avg parameter, iterates for every version that actually exits.
rate, ranks = dict_output(avgd_file,layer_name[layers],cube_len)
rate_avg += rate
rate_avg = rate_avg/avgd_num
Global_size = ((cube_len ** 3) * double_size * ranks.values) / (10 ** 9) # global array size from cube length
rate_persize.append([layer_name[layers], ranks.values[0], cube_len, Global_size, rate_avg.values[0]]) # append plot output data to rate_persize
# # output of rate_persize list array to csv file
output_data = pd.DataFrame(rate_persize, columns=[ 'LayerName','Ranks', 'CubeLen', 'GlobalSize', 'Rate'])
out = output_data.to_csv(f"output_csv/{dir}_avg_output.csv", index=False)
def plot_rate_v_ranks(target_dir, layer_start, layer_end, min_size, max_size, ax1):
max_rate, max_rank = 0, 0
# outputdata_layer_v_size(f"{target_dir}", min_size, max_size) # data processing function, outputs everything to output.csv
average_dir(f"{target_dir}", min_size, max_size) # data processing function, outputs everything to output.csv
input_data = pd.read_csv( # read from output.csv
f"output_csv/{target_dir}_avg_output.csv",
index_col=False,
skiprows=0
)
for x in range(layer_start, layer_end):
for y in range(min_size, max_size):
size_t = 2 ** y
label1 = layer_name[x]
ranks = input_data.Ranks[ (input_data.LayerName == layer_name[x]) & (input_data.CubeLen == size_t) ]
rate = input_data.Rate[ ( input_data.LayerName == layer_name[x]) & (input_data.CubeLen == size_t ) ]
ax1.plot(ranks, rate, marker_desc[x], label=label1, c=background_colour[x-layer_start])
if max_rate < max(rate):
max_rate = max(rate)
max_rank = ranks[rate.idxmax()]
return max_rate, max_rank
def plotting_mult_dirs(target_dir, min_size, max_size, param):
"""
Plot formatting
"""
# fig1 = plt.figure(figsize=(8,6))
fig1 = plt.figure(figsize=(6,5.5))
ax1 = plt.axes()
"""
Inits
"""
max_rate, max_rank, local_rate, local_rank = 0,0,0,0 # init variable for finding max rate and corresponding ranks
for x in target_dir:
local_rate, local_rank = plot_rate_v_ranks(x, layer_start, layer_end, min_size, max_size, ax1)
if max_rate < local_rate: #in case multiple set of directories
max_rate = local_rate
max_rank = local_rank
text = "Rank={:.0f},Max Rate={:.3f}GB/s".format(max_rank,max_rate)
ax1.annotate(text, xy=(max_rank, max_rate), xytext=(0.95, 0.96), **kw)
legend_elements = [
Line2D([0], [0], marker = "D", color='r', lw=1, label='MPIIO'),
Line2D([0], [0], marker= "o", color='b', lw=1, label='PHDF5'),
Line2D([0], [0], marker= "v", color='k', lw=1,label='ADIOS2/HDF5'),
Line2D([0], [0], marker= "^", color='m', lw=1, label='ADIOS2/BP4')]
ax1.legend(handles=legend_elements)
ax1.set_xlabel("MPI ranks")
ax1.set_ylabel("Average Rate (GB/s)")
ax1.set_yscale("log")
ax1.set_xscale("log")
ax1.set_ybound(0,4)
fig1.suptitle(f"I/O rate comparison b/w NextGenIO & Fulhame - {param} striping")
fig1.tight_layout()
def speedup(dir):
"""
Inits
"""
double_size = 8
layer_start = 3 # setting of layers for graphs from layer_name
layer_end = 5
rate_hdf5 = []
num_nodes_hdf5 = []
array_hdf5 = []
"""
Plot formatting
"""
# fig1 = plt.figure(figsize=(8,6))
fig1 = plt.figure(figsize=(8,6))
# plt.rcParams["font.size"] = "10"
ax1 = plt.axes()
"""
Obtain paths for subdirectories
"""
dirFiles = glob(
f"{os.getcwd()}/{dir}/*/"
) # assign full output directories for csv file
output_dirs = natsort.natsorted(dirFiles)
num_dir = len(output_dirs) # number of outputs
# marker_desc = ["-o", "-*", "-+", "-D", "-^", "-<", "-s", "-.", "-p", "-h"]
background_colour = ["r","b","k","m","y"]
"""
Plots
"""
max_rate = 0
max_time = 0
max_time_ar = 0
max_rate_ar = 0
new_rate = []
marker_desc = ["-*","-o","--*","--o"]
iter = 0
for x in range(layer_start, layer_end):
for i in range(num_dir): # number of tails for same proc
trail = os.path.basename(
os.path.normpath(output_dirs[i])
) # gives name of core_no.ofproc
filename = output_dirs[i] + "output.csv"
if f"_v{1}" in trail: # select particular v.
rate, num_nodes, array = output_speedup(filename, layer_name[x])
rate_hdf5, num_nodes_hdf5, array_hdf5 = output_speedup(filename, layer_name[2]) # obtain hdf5 parameters for comparison
new_rate = speedup_comp(rate_hdf5, rate)
layer = f"{layer_name[x]}/{num_nodes.values[x]}"
if num_nodes.values[0] == 1: # hacky way of selecting num_nodes.
ax1.plot(array, new_rate, marker_desc[iter], c=background_colour[iter], label=layer)
iter += 1
if num_nodes.values[0] == 384: # hacky way of selecting num_nodes.
ax1.plot(array, new_rate, marker_desc[iter], c=background_colour[iter], label=layer)
iter += 1
ax1.legend(loc = "upper right")# Don't allow the axis tdo be on top of your data
ax1.set_xlabel("Global Size (GB)")
ax1.set_ylabel("Speedup compared to HDF5")
ax1.set_yscale("log")
ax1.set_xscale("log")
# fig1.suptitle("Benchmarking speedup results w.r.t. I/O rates for HDF5")
fig1.tight_layout()
def output_speedup(
filename, layer_name
): # function to return array, time, rate from CSV file
double_size = 8
mydata = pd.read_csv(
filename,
index_col=False,
skiprows=0,
names=[
"Fields",
"Layername",
"ArraySize",
"NumNodes",
"AverageTime",
"AverageRate"
]
)
"""
Set array filter
"""
array = mydata.ArraySize[(mydata.Layername == layer_name) ] # N1 local array dimension
num_nodes = mydata.NumNodes[(mydata.Layername == layer_name)] # number of nodes
Global_size = ((array** 3) * double_size * num_nodes.values[0]) / (10 ** 9) # global array size from cube length
# time = mydata.AverageTime[(mydata.Layername == layer_name) & (mydata.ArraySize == array_size)] # units sec
rate = mydata.AverageRate[(mydata.Layername == layer_name)] # Gb/s
return rate, num_nodes, Global_size
def speedup_comp(rate_hdf5,rate):
new_rate = []
for x in range(len(rate_hdf5)):
new_rate.append(rate.values[x]/rate_hdf5.values[x])
return new_rate
def xcompact():
"""
Plot formatting
"""
fig1 = plt.figure(figsize=(6,5.5))
# fig1 = plt.figure(figsize=(8,6))
# plt.rcParams["font.size"] = "10"
ax1 = plt.axes()
xcom = pd.read_csv(
"xcompact.csv",
index_col=False,
skiprows=0,
names=[
"Rank",
"TotalTime",
"WriteTime",
"ComputeTime",
"BW"
]
)
arrow_dim = 300
point_dim = 550
plt.arrow(x=point_dim+arrow_dim, y=60, dx=-arrow_dim, dy=0, width=1.5, head_length=30, facecolor='red')
plt.annotate('I/O bottleneck', xy = (point_dim+arrow_dim+50, 58))
ax1.plot(xcom.Rank,xcom.TotalTime, "-^", c="r", label="Total")
ax1.plot(xcom.Rank,xcom.WriteTime,"-o", c="b",label = "Write" )
ax1.plot(xcom.Rank,xcom.ComputeTime, "-<", c="k", label = "Compute")
ax1.set_xlabel("MPI ranks")
ax1.set_ylabel("Time taken (s)")
ax1.legend(loc = "upper right")
ax1.set_yscale("log")
ax1.set_xscale("log")
plt.axvline(x=512, color='k', linestyle='--')
# fig1.suptitle("Benchmarking for XCompact3D")
def outputdata_layer_v_size(filedir, cube_len_start, cube_len_end):
"""
Inits
"""
layer_name = ["Serial", "MPIIO", "PHDF5", "ADIOS2_HDF5", "ADIOS2_BP4"]
rate_persize = []
target_dir = f"{os.getcwd()}/output_dirs/{filedir}/*/"
layer_start, layer_end = 1, len(layer_name)
"""
Obtain paths for subdirectories
"""
dirFiles = glob(
target_dir
) # assign full output directories for csv file
output_dirs = natsort.natsorted(dirFiles)
"""
Find data points recursively from the target dir, selecting based on layer name
"""
for l in range(layer_start, layer_end): # select layer name
for i in range(len(output_dirs)):
filename = output_dirs[i] + "output.csv"
trail = os.path.basename(
os.path.normpath(output_dirs[i])
) # gives name of core_no.ofproc
for x in range (cube_len_start,cube_len_end): # specify max array size parameter
N_size = 2 ** x
rate, ranks = dict_output(f"{output_dirs[i]}output.csv",layer_name[l],N_size) # info from specified directory, against matching layername and cube length
Global_size = ((N_size ** 3) * double_size * ranks.values[0]) / (10 ** 9) # global array size from cube length
rate_persize.append([layer_name[l], ranks.values[0], N_size, Global_size, rate.values[0]]) # append plot output data to rate_persize
# output of rate_persize list array to csv file
output_data = pd.DataFrame(rate_persize, columns=[ 'LayerName','Ranks', 'CubeLen', 'GlobalSize', 'Rate'])
out = output_data.to_csv(f"output_csv/{filedir}_output.csv", index=False)
def compare_benchio_f(): # bar plot to compare benchio with benchmark_c
""" Both run with 1 I/O runs and 1 rank. Array size = 250*250*250.
Can increase this to multiple for better accuracy. """
""" Plot formatting """
# fig1 = plt.figure(figsize=(8,6))
fig1 = plt.figure(figsize=(6,5.5))
ax1 = plt.axes()
"""Data initialisation"""
rate_c = np.empty(3)
array = (
256 * 256 * 256 * double_size / (10 ** 9)
) # for array size in GB with size of double = 8bytes
"""Data from benchio fortran"""
# avg rate from benchio fortran computed in B/s
f = open(f"{output_files}/nvram_benchiof.out", 'r')
avg_rates_f = re.findall("avgrate\s=\s+(\d+\.\d+)", f.read())
rate_f = [x * (2 ** 20) / (10 ** 9) for x in [float(i) for i in avg_rates_f]] #convert units from MiB/s to GB/s
"""Data from benchio c"""
for i in range(0, 3): # to compare serial, mpi, hdf5
rate_c[i], num_nodes_temp = dict_output(f"{output_files}/nvram.csv", layer_name[i], 256)
"""Bar plot"""
index = np.arange(3)
bar_width = 0.35
benchio_c = ax1.bar(index, rate_c, bar_width, label="benchmark_c")
benchio_f = ax1.bar(index + bar_width, rate_f, bar_width, label="benchio")
ax1.set_xlabel("Layers")
ax1.set_ylabel("Averate Rate (GB/s)")
ax1.legend()
ax1.set_xticks(index + bar_width / 2)
ax1.set_xticklabels(layer_name[0:3])
fig1.suptitle("NVRAM verification")
fig1.tight_layout() | [
"pandas.read_csv",
"numpy.arange",
"matplotlib.pyplot.style.use",
"os.getcwd",
"os.path.normpath",
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.axes",
"natsort.natsorted",
"numpy.empty",
"os.path.isfile",
"pandas.DataFrame",
"mat... | [((340, 371), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""style.mplstyle"""'], {}), "('style.mplstyle')\n", (353, 371), True, 'import matplotlib.pyplot as plt\n'), ((782, 922), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'index_col': '(False)', 'skiprows': '(0)', 'names': "['Fields', 'Layername', 'ArraySize', 'NumNodes', 'AverageTime', 'AverageRate']"}), "(filename, index_col=False, skiprows=0, names=['Fields',\n 'Layername', 'ArraySize', 'NumNodes', 'AverageTime', 'AverageRate'])\n", (793, 922), True, 'import pandas as pd\n'), ((1656, 1683), 'natsort.natsorted', 'natsort.natsorted', (['dirFiles'], {}), '(dirFiles)\n', (1673, 1683), False, 'import natsort\n'), ((3323, 3418), 'pandas.DataFrame', 'pd.DataFrame', (['rate_persize'], {'columns': "['LayerName', 'Ranks', 'CubeLen', 'GlobalSize', 'Rate']"}), "(rate_persize, columns=['LayerName', 'Ranks', 'CubeLen',\n 'GlobalSize', 'Rate'])\n", (3335, 3418), True, 'import pandas as pd\n'), ((3871, 3958), 'pandas.read_csv', 'pd.read_csv', (['f"""output_csv/{target_dir}_avg_output.csv"""'], {'index_col': '(False)', 'skiprows': '(0)'}), "(f'output_csv/{target_dir}_avg_output.csv', index_col=False,\n skiprows=0)\n", (3882, 3958), True, 'import pandas as pd\n'), ((4795, 4823), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 5.5)'}), '(figsize=(6, 5.5))\n', (4805, 4823), True, 'import matplotlib.pyplot as plt\n'), ((4833, 4843), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (4841, 4843), True, 'import matplotlib.pyplot as plt\n'), ((6317, 6343), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (6327, 6343), True, 'import matplotlib.pyplot as plt\n'), ((6397, 6407), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (6405, 6407), True, 'import matplotlib.pyplot as plt\n'), ((6587, 6614), 'natsort.natsorted', 'natsort.natsorted', (['dirFiles'], {}), '(dirFiles)\n', (6604, 6614), False, 'import natsort\n'), ((8639, 8779), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'index_col': '(False)', 'skiprows': '(0)', 'names': "['Fields', 'Layername', 'ArraySize', 'NumNodes', 'AverageTime', 'AverageRate']"}), "(filename, index_col=False, skiprows=0, names=['Fields',\n 'Layername', 'ArraySize', 'NumNodes', 'AverageTime', 'AverageRate'])\n", (8650, 8779), True, 'import pandas as pd\n'), ((9698, 9726), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 5.5)'}), '(figsize=(6, 5.5))\n', (9708, 9726), True, 'import matplotlib.pyplot as plt\n'), ((9819, 9829), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (9827, 9829), True, 'import matplotlib.pyplot as plt\n'), ((9842, 9965), 'pandas.read_csv', 'pd.read_csv', (['"""xcompact.csv"""'], {'index_col': '(False)', 'skiprows': '(0)', 'names': "['Rank', 'TotalTime', 'WriteTime', 'ComputeTime', 'BW']"}), "('xcompact.csv', index_col=False, skiprows=0, names=['Rank',\n 'TotalTime', 'WriteTime', 'ComputeTime', 'BW'])\n", (9853, 9965), True, 'import pandas as pd\n'), ((10095, 10204), 'matplotlib.pyplot.arrow', 'plt.arrow', ([], {'x': '(point_dim + arrow_dim)', 'y': '(60)', 'dx': '(-arrow_dim)', 'dy': '(0)', 'width': '(1.5)', 'head_length': '(30)', 'facecolor': '"""red"""'}), "(x=point_dim + arrow_dim, y=60, dx=-arrow_dim, dy=0, width=1.5,\n head_length=30, facecolor='red')\n", (10104, 10204), True, 'import matplotlib.pyplot as plt\n'), ((10204, 10271), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""I/O bottleneck"""'], {'xy': '(point_dim + arrow_dim + 50, 58)'}), "('I/O bottleneck', xy=(point_dim + arrow_dim + 50, 58))\n", (10216, 10271), True, 'import matplotlib.pyplot as plt\n'), ((10645, 10690), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(512)', 'color': '"""k"""', 'linestyle': '"""--"""'}), "(x=512, color='k', linestyle='--')\n", (10656, 10690), True, 'import matplotlib.pyplot as plt\n'), ((11113, 11129), 'glob.glob', 'glob', (['target_dir'], {}), '(target_dir)\n', (11117, 11129), False, 'from glob import glob\n'), ((11209, 11236), 'natsort.natsorted', 'natsort.natsorted', (['dirFiles'], {}), '(dirFiles)\n', (11226, 11236), False, 'import natsort\n'), ((12295, 12390), 'pandas.DataFrame', 'pd.DataFrame', (['rate_persize'], {'columns': "['LayerName', 'Ranks', 'CubeLen', 'GlobalSize', 'Rate']"}), "(rate_persize, columns=['LayerName', 'Ranks', 'CubeLen',\n 'GlobalSize', 'Rate'])\n", (12307, 12390), True, 'import pandas as pd\n'), ((12750, 12778), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 5.5)'}), '(figsize=(6, 5.5))\n', (12760, 12778), True, 'import matplotlib.pyplot as plt\n'), ((12788, 12798), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (12796, 12798), True, 'import matplotlib.pyplot as plt\n'), ((12843, 12854), 'numpy.empty', 'np.empty', (['(3)'], {}), '(3)\n', (12851, 12854), True, 'import numpy as np\n'), ((13525, 13537), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (13534, 13537), True, 'import numpy as np\n'), ((5433, 5493), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'marker': '"""D"""', 'color': '"""r"""', 'lw': '(1)', 'label': '"""MPIIO"""'}), "([0], [0], marker='D', color='r', lw=1, label='MPIIO')\n", (5439, 5493), False, 'from matplotlib.lines import Line2D\n'), ((5501, 5561), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'marker': '"""o"""', 'color': '"""b"""', 'lw': '(1)', 'label': '"""PHDF5"""'}), "([0], [0], marker='o', color='b', lw=1, label='PHDF5')\n", (5507, 5561), False, 'from matplotlib.lines import Line2D\n'), ((5568, 5634), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'marker': '"""v"""', 'color': '"""k"""', 'lw': '(1)', 'label': '"""ADIOS2/HDF5"""'}), "([0], [0], marker='v', color='k', lw=1, label='ADIOS2/HDF5')\n", (5574, 5634), False, 'from matplotlib.lines import Line2D\n'), ((5640, 5705), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'marker': '"""^"""', 'color': '"""m"""', 'lw': '(1)', 'label': '"""ADIOS2/BP4"""'}), "([0], [0], marker='^', color='m', lw=1, label='ADIOS2/BP4')\n", (5646, 5705), False, 'from matplotlib.lines import Line2D\n'), ((10958, 10969), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (10967, 10969), False, 'import os\n'), ((1550, 1561), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1559, 1561), False, 'import os\n'), ((6493, 6504), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6502, 6504), False, 'import os\n'), ((7153, 7185), 'os.path.normpath', 'os.path.normpath', (['output_dirs[i]'], {}), '(output_dirs[i])\n', (7169, 7185), False, 'import os\n'), ((11554, 11586), 'os.path.normpath', 'os.path.normpath', (['output_dirs[i]'], {}), '(output_dirs[i])\n', (11570, 11586), False, 'import os\n'), ((1998, 2030), 'os.path.normpath', 'os.path.normpath', (['output_dirs[i]'], {}), '(output_dirs[i])\n', (2014, 2030), False, 'import os\n'), ((2554, 2579), 'os.path.isfile', 'os.path.isfile', (['avgd_file'], {}), '(avgd_file)\n', (2568, 2579), False, 'import os\n')] |
#!/usr/bin/env python
# Copyright (c) 2015 SnapDisco Pty Ltd, Australia.
# All rights reserved.
#
# This source code is licensed under the terms of the MIT license
# found in the "LICENSE" file in the root directory of this source tree.
import _refcount
import numpy as np
import sys
def refcount(obj):
# Subtract 3 from the refcount, because each function call adds 1.
# (+1 for this func, +1 for `sys.getrefcount`, +1 for some other reason?
# Perhaps the 3rd +1 is for the function implementing the `-` operator?)
return sys.getrefcount(obj) - 3
def check_refcount(varname, var, expected_rc):
rc = refcount(var) - 2 # and -2 more for this function...
print("Get refcount(%s) -> %d" % (varname, rc))
assertion = "refcount(%s) == %d" % (varname, expected_rc)
assert (rc == expected_rc), assertion
print("TEST PASSED: %s" % assertion)
a = np.arange(20, dtype=np.int32).reshape((5, 4))
print("a =")
print(a)
check_refcount("a", a, 1)
print("\nb = _refcount.createsome(a)")
print("> Crossing into Nim...")
b = _refcount.createsome(a)
print("\n... and we're back in Python")
check_refcount("a", a, 1)
check_refcount("b", b, 1)
print("b =")
print(b)
print("\n------------\n")
c = np.arange(20, 40, dtype=np.int32).reshape((5, 4))
print("c =")
print(c)
check_refcount("c", c, 1)
print("\nd = _refcount.identity(c)")
print("> Crossing into Nim...")
d = _refcount.identity(c)
print("\n... and we're back in Python")
check_refcount("c", c, 2)
check_refcount("d", d, 2)
print("\n------------\n")
e = np.arange(20, 40, dtype=np.int32).reshape((5, 4))
print("e =")
print(e)
check_refcount("e", e, 1)
print("\n_refcount.identity(e)")
print("> Crossing into Nim...")
_refcount.identity(c)
print("\n... and we're back in Python")
check_refcount("e", e, 1)
print("\n------------\n")
print("f = _refcount.identity(np.arange(...))")
print("> Crossing into Nim...")
f = _refcount.identity(np.arange(40, 60, dtype=np.int32).reshape((5, 4)))
print("\n... and we're back in Python")
check_refcount("f", f, 1)
print("\n------------\n")
g = np.arange(60, 80, dtype=np.int32).reshape((5, 4))
h = np.arange(80, 100, dtype=np.int32).reshape((5, 4))
print("g =")
print(g)
check_refcount("g", g, 1)
print("\nh =")
print(h)
check_refcount("h", h, 1)
print("\ni = _refcount.twogoinonecomesout(g, h)")
print("> Crossing into Nim...")
i = _refcount.twogoinonecomesout(g, h)
print("\n... and we're back in Python")
check_refcount("g", g, 1)
check_refcount("h", h, 2)
check_refcount("i", i, 2)
print("i =")
print(i)
| [
"_refcount.twogoinonecomesout",
"_refcount.identity",
"_refcount.createsome",
"sys.getrefcount",
"numpy.arange"
] | [((1052, 1075), '_refcount.createsome', '_refcount.createsome', (['a'], {}), '(a)\n', (1072, 1075), False, 'import _refcount\n'), ((1392, 1413), '_refcount.identity', '_refcount.identity', (['c'], {}), '(c)\n', (1410, 1413), False, 'import _refcount\n'), ((1700, 1721), '_refcount.identity', '_refcount.identity', (['c'], {}), '(c)\n', (1718, 1721), False, 'import _refcount\n'), ((2355, 2389), '_refcount.twogoinonecomesout', '_refcount.twogoinonecomesout', (['g', 'h'], {}), '(g, h)\n', (2383, 2389), False, 'import _refcount\n'), ((543, 563), 'sys.getrefcount', 'sys.getrefcount', (['obj'], {}), '(obj)\n', (558, 563), False, 'import sys\n'), ((883, 912), 'numpy.arange', 'np.arange', (['(20)'], {'dtype': 'np.int32'}), '(20, dtype=np.int32)\n', (892, 912), True, 'import numpy as np\n'), ((1221, 1254), 'numpy.arange', 'np.arange', (['(20)', '(40)'], {'dtype': 'np.int32'}), '(20, 40, dtype=np.int32)\n', (1230, 1254), True, 'import numpy as np\n'), ((1537, 1570), 'numpy.arange', 'np.arange', (['(20)', '(40)'], {'dtype': 'np.int32'}), '(20, 40, dtype=np.int32)\n', (1546, 1570), True, 'import numpy as np\n'), ((2066, 2099), 'numpy.arange', 'np.arange', (['(60)', '(80)'], {'dtype': 'np.int32'}), '(60, 80, dtype=np.int32)\n', (2075, 2099), True, 'import numpy as np\n'), ((2120, 2154), 'numpy.arange', 'np.arange', (['(80)', '(100)'], {'dtype': 'np.int32'}), '(80, 100, dtype=np.int32)\n', (2129, 2154), True, 'import numpy as np\n'), ((1918, 1951), 'numpy.arange', 'np.arange', (['(40)', '(60)'], {'dtype': 'np.int32'}), '(40, 60, dtype=np.int32)\n', (1927, 1951), True, 'import numpy as np\n')] |
"""
Unit and regression test for the measure module.
"""
# Import package, test suite, and other packages as needed
import molecool
import pytest
import sys
import numpy as np
def test_calculate_distance():
"""test will pass so long as `calculate_distance` function within `measure`
module run as expected"""
r1 = np.array([1, 0, 0])
r2 = np.array([0, 0, 0])
expected_distance = 1.0
calculated_distance = molecool.calculate_distance(r1, r2)
assert calculated_distance == expected_distance
def test_calculate_distance_typeerror():
"""test will pass so long as calling `calculate_distance` function within
`measure` module does not raise TypeError"""
r1 = [1, 0, 0]
r2 = [0, 0, 0]
with pytest.raises(TypeError):
calculated_distance = molecool.calculate_distance(r1, r2)
def test_calculate_angle():
"""test will pass so long as `calculate_angle` function within `measure`
module run as expected"""
r1 = np.array([1, 0, 0])
r2 = np.array([0, 0, 0])
r3 = np.array([0, 1, 0])
expected_angle_in_degrees = 90
calculated_angle_in_degrees = molecool.calculate_angle(r1, r2, r3, degrees=True)
assert calculated_angle_in_degrees == expected_angle_in_degrees
@pytest.mark.parametrize("p1, p2, p3, expected_angle_in_degrees", [
(np.array([np.sqrt(2)/2, np.sqrt(2)/2, 0]), np.array([0, 0, 0]), np.array([0, 1, 0]), 45),
(np.array([1, 0, 0]), np.array([0, 0, 0]), np.array([0, 1, 0]), 90)
])
def test_calculate_angle_many(p1, p2, p3, expected_angle_in_degrees):
"""test will pass so long as `calculate_angle` function within `measure`
module run as expected"""
calculated_angle_in_degrees = molecool.calculate_angle(p1, p2, p3, degrees=True)
assert calculated_angle_in_degrees == expected_angle_in_degrees
| [
"molecool.calculate_distance",
"numpy.sqrt",
"molecool.calculate_angle",
"numpy.array",
"pytest.raises"
] | [((330, 349), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (338, 349), True, 'import numpy as np\n'), ((359, 378), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (367, 378), True, 'import numpy as np\n'), ((434, 469), 'molecool.calculate_distance', 'molecool.calculate_distance', (['r1', 'r2'], {}), '(r1, r2)\n', (461, 469), False, 'import molecool\n'), ((978, 997), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (986, 997), True, 'import numpy as np\n'), ((1007, 1026), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (1015, 1026), True, 'import numpy as np\n'), ((1036, 1055), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (1044, 1055), True, 'import numpy as np\n'), ((1126, 1176), 'molecool.calculate_angle', 'molecool.calculate_angle', (['r1', 'r2', 'r3'], {'degrees': '(True)'}), '(r1, r2, r3, degrees=True)\n', (1150, 1176), False, 'import molecool\n'), ((1688, 1738), 'molecool.calculate_angle', 'molecool.calculate_angle', (['p1', 'p2', 'p3'], {'degrees': '(True)'}), '(p1, p2, p3, degrees=True)\n', (1712, 1738), False, 'import molecool\n'), ((740, 764), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (753, 764), False, 'import pytest\n'), ((796, 831), 'molecool.calculate_distance', 'molecool.calculate_distance', (['r1', 'r2'], {}), '(r1, r2)\n', (823, 831), False, 'import molecool\n'), ((1358, 1377), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (1366, 1377), True, 'import numpy as np\n'), ((1379, 1398), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (1387, 1398), True, 'import numpy as np\n'), ((1406, 1425), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (1414, 1425), True, 'import numpy as np\n'), ((1427, 1446), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (1435, 1446), True, 'import numpy as np\n'), ((1448, 1467), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (1456, 1467), True, 'import numpy as np\n'), ((1325, 1335), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1332, 1335), True, 'import numpy as np\n'), ((1339, 1349), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1346, 1349), True, 'import numpy as np\n')] |
#<NAME>
#Last Modified: 12/15/2015
import numpy as np
import matplotlib as mpl
from ..calculations import HexDhCal
from scipy.integrate import trapz
Tbulk = np.array([1,0])
def Tbulk(z,FluxPro,Tbulkin,NFuel,qlin,Cp,Uinlet,rho,HaF):
for i in range(2,8):
Tbulk = Tbulkin + (np.array(trapz(z,None,i,0),FluxPro(np.array([0,i])))*NFuel*qlin)/(Cp*Uinlet*rho*HexDhCal.HaF(HexDhCal.Ha(Ac),NFuel,FoCD,WoD)) #Bulk Temperature of Coolant - C
return Tbulk
print(Tbulk)
| [
"numpy.array",
"scipy.integrate.trapz"
] | [((173, 189), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (181, 189), True, 'import numpy as np\n'), ((309, 329), 'scipy.integrate.trapz', 'trapz', (['z', 'None', 'i', '(0)'], {}), '(z, None, i, 0)\n', (314, 329), False, 'from scipy.integrate import trapz\n'), ((335, 351), 'numpy.array', 'np.array', (['[0, i]'], {}), '([0, i])\n', (343, 351), True, 'import numpy as np\n')] |
"""
formation evaluation
Author: <NAME>
Email: <EMAIL>
"""
import lasio
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.ticker as ticker
import warnings
plt.style.use('ggplot')
warnings.filterwarnings("ignore")
class formation_eval:
"""
Evaluates the formation and determines formation characteristic such as shale volume,
reservoir and non-reservoir zones.
args::
datapath: LAS datapath
mnemonics: list of well log mnemonics, if 'None', density, neutron, Gamma ray, SP and resistivity
logs are passed if available.
"""
def __init__(self, datapath: str = None, mnemonics: list = None):
"""
:type datapath: str
:type mnemonics: list
"""
if mnemonics is None:
self.mnemonics = mnemonics
if datapath is None:
pass
else:
self.datapath = datapath
def read_lasio(self):
"""
reads the .LAS file and converts to a dataframe
:return: log data description and log dataframe
"""
path = self.datapath
lasfile = lasio.read(path)
df = lasfile.df()
if 'D' or 'E' or 'P' in df.index.name:
df.reset_index(inplace=True)
return lasfile.header, df
def well_logs(self, dataframe: pd.DataFrame):
"""
filters the dataframe and returns a dataframe with the specified mnemonics or necessary and available mnemonics
:param dataframe: pandas dataframe object
:return: well log dataframe
"""
if self.mnemonics is None:
logs = ['DEPTH', 'ROP', 'GR', 'SP', 'CALI', 'BS', 'RD', 'RT', 'RM', 'RS', 'NPHI',
'CNL', 'RHOB', 'DRHO', 'PHID', 'DT', 'PEF']
logsnew = []
for i in logs:
if i in dataframe.columns:
logsnew.append(i)
else:
logs = self.mnemonics
logsnew = []
for i in logs:
if i in dataframe.columns:
logsnew.append(i)
well_logs = logsnew
if 'GR' in well_logs:
dataframe['GR'] = np.where(dataframe['GR'] > 150, 150, dataframe['GR'])
dataframe['GR'] = np.where(dataframe['GR'] < 0, 0, dataframe['GR'])
if 'NPHI' in well_logs:
dataframe['NPHI'] = np.where(dataframe['NPHI'] > 0.50, 0.50, dataframe['NPHI'])
dataframe['NPHI'] = np.where(dataframe['NPHI'] < -0.15, -0.15, dataframe['NPHI'])
return dataframe[well_logs]
@staticmethod
def lognan_cleaning(dataframe: pd.DataFrame, fill_value: int or float = None):
"""
This fills the missing numbers in the dataframe
:param fill_value: value to replace missing number with, if 'None', mean is used.
:param dataframe: pandas dataframe
:rtype: pd.Dataframe
"""
df = dataframe.copy()
if fill_value is None:
df.fillna(np.mean(df), inplace=True)
else:
df.fillna(fill_value, axis=1, inplace=True)
return df
@staticmethod
def log_viz(data, min_depth: float or int or None = None, max_depth: float or int or None = None,
plotsize: tuple = None):
"""
well log plots
:param data: well logs dataframe
:param min_depth: top of reservoir, closer to the surface (length units)
:param max_depth: bottom of reservoir, closer to the subsurface (length units)
:param plotsize: the plot figsize in tuple form
"""
if plotsize is None:
plotsize = (18, 15)
logs = data.columns
fig, ax = plt.subplots(nrows=1, ncols=6, figsize=plotsize, sharey=False)
fig.suptitle("Logs Visualization", fontsize=22, y=1.02)
# General setting for all axis
for axes in ax:
axes.get_xaxis().set_visible(False)
axes.invert_yaxis()
axes.spines['left'].set_color('k')
axes.spines['right'].set_color('k')
axes.minorticks_on()
if min_depth and max_depth is not None:
axes.set_ylim(max_depth, min_depth)
else:
axes.set_ylim(data['DEPTH'].max(), data['DEPTH'].min())
# 1st track: CALI, BS
if 'CALI' and 'BS' not in logs:
fig.delaxes(ax=ax[0])
ax[1].set_ylim(max_depth, min_depth)
else:
ax[0].minorticks_on()
ax[0].grid(b=True, which='major', color='black', linestyle='--')
ax[0].grid(b=True, which='minor', color='grey', linestyle=':')
if 'CALI' in logs:
cali = ax[0].twiny()
cali.minorticks_on()
cali.set_xlim(6, 26)
cali.plot(data.CALI, data.DEPTH, label='CALI[in]', color='red')
cali.spines['top'].set_position(('outward', 20))
cali.spines['top'].set_color('r')
cali.set_xlabel('CALI[in]', color='red')
cali.tick_params(axis='x', colors='red')
cali.grid(b=True, which='major', color='k', linestyle='--')
cali.grid(b=True, which='minor', color='grey', linestyle=':')
else:
pass
if 'BS' in logs:
bs = ax[0].twiny()
bs.minorticks_on()
bs.set_xlim(6, 26)
bs.plot(data.BS, data.DEPTH, label='BS[in]', color='y')
bs.spines['top'].set_position(('outward', 60))
bs.spines['top'].set_color('y')
bs.set_xlabel('BS[in]', color='y')
bs.tick_params(axis='x', colors='y')
bs.grid(b=True, which='major', color='k', linestyle='--')
bs.grid(b=True, which='minor', color='grey', linestyle=':')
else:
pass
# 2nd track: GR, SP
if 'CALI' and 'BS' not in logs:
ax[1].set_ylim(max_depth, min_depth)
ax[1].minorticks_on()
ax[1].grid(b=True, which='major', color='black', linestyle='--')
ax[1].grid(b=True, which='minor', color='grey', linestyle=':')
if 'GR' in logs:
gr = ax[1].twiny()
gr.minorticks_on()
gr.set_xlim(0, 150)
gr.plot(data.GR, data.DEPTH, label='GR[api]', color='green')
gr.spines['top'].set_position(('outward', 20))
gr.spines['top'].set_color('g')
gr.set_xlabel('GR[api]', color='green')
gr.tick_params(axis='x', colors='green')
gr.grid(b=True, which='major', color='k', linestyle='--')
gr.grid(b=True, which='minor', color='grey', linestyle=':')
if 'SP' in logs:
sp = ax[1].twiny()
sp.minorticks_on()
sp.set_xlim(data['SP'].min(), data.max())
sp.plot(data.GR, data.DEPTH, label='SP[mV]', color='b')
sp.spines['top'].set_position(('outward', 60))
sp.spines['top'].set_color('b')
sp.set_xlabel('GR[api]', color='b')
sp.tick_params(axis='x', colors='b')
sp.grid(b=True, which='major', color='k', linestyle='--')
sp.grid(b=True, which='minor', color='grey', linestyle=':')
# 3rd track: resistivity track
ax[2].grid(b=True, which='major', color='black', linestyle='--')
ax[2].grid(b=True, which='minor', color='grey', linestyle=':')
ax[2].minorticks_on()
if 'RD' in logs:
rd = ax[2].twiny()
rd.minorticks_on()
rd.set_xlim(0.2, 2500)
rd.xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=3))
rd.spines['top'].set_position(('outward', 10))
rd.spines['top'].set_color('k')
rd.semilogx(data.RD, data.DEPTH, '--', linewidth=1, c='black')
rd.set_xlabel('RD [ohm.m]', color='black')
rd.tick_params(axis='x', colors='black')
rd.grid(b=True, which='major', color='black', linestyle='--')
rd.grid(b=True, which='minor', color='grey', linestyle=':')
if 'RS' in logs:
rs = ax[2].twiny()
rs.minorticks_on()
rs.set_xlim(0.2, 2500)
rs.xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=3))
rs.spines['top'].set_position(('outward', 50))
rs.spines['top'].set_color('b')
rs.semilogx(data.RS, data.DEPTH, linewidth=1, c='b', )
rs.set_xlabel('RS [ohm.m]', color='b')
rs.tick_params(axis='x', colors='b')
rs.grid(b=True, which='major', color='black', linestyle='--')
rs.grid(b=True, which='minor', color='grey', linestyle=':')
if 'RM' in logs:
rm = ax[2].twiny()
rm.minorticks_on()
rm.set_xlim(0.2, 2500)
rm.xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=3))
rm.spines['top'].set_position(('outward', 90))
rm.spines['top'].set_color('gold')
rm.semilogx(data.RM, data.DEPTH, linewidth=1, c='gold', )
rm.set_xlabel('RM [ohm.m]', color='gold')
rm.tick_params(axis='x', colors='gold')
rm.grid(b=True, which='major', color='black', linestyle='--')
rm.grid(b=True, which='minor', color='grey', linestyle=':')
if 'RT' in logs:
rt = ax[2].twiny()
rt.minorticks_on()
rt.set_xlim(0.2, 2500)
rt.xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=3))
rt.spines['top'].set_position(('outward', 130))
rt.spines['top'].set_color('brown')
rt.semilogx(data.RT, data.DEPTH, linewidth=1, c='brown', )
rt.set_xlabel('RT [ohm.m]', color='brown')
rt.tick_params(axis='x', colors='brown')
rt.grid(b=True, which='major', color='black', linestyle='--')
rt.grid(b=True, which='minor', color='grey', linestyle=':')
if 'RXO' in logs:
rxo = ax[2].twiny()
rxo.minorticks_on()
rxo.set_xlim(0.2, 2500)
rxo.xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=3))
rxo.spines['top'].set_position(('outward', 170))
rxo.spines['top'].set_color('c')
rxo.semilogx(data.RXO, data.DEPTH, linewidth=1, c='c', )
rxo.set_xlabel('RXO [ohm.m]', color='c')
rxo.tick_params(axis='x', colors='c')
rxo.grid(b=True, which='major', color='black', linestyle='--')
rxo.grid(b=True, which='minor', color='grey', linestyle=':')
# 4th track NPHI, DPHI, RHOB
ax[3].minorticks_on()
ax[3].grid(b=True, which='major', color='black', linestyle='--')
ax[3].grid(b=True, which='minor', color='grey', linestyle=':')
if 'NPHI' in logs:
nphi = ax[3].twiny()
nphi.minorticks_on()
nphi.set_xlim(0.45, -0.15)
nphi.spines['top'].set_position(('outward', 20))
nphi.spines['top'].set_color('blue')
nphi.set_xlabel("v/v")
nphi.plot(data.NPHI, data.DEPTH, linewidth=1, label='v/v', color='blue')
nphi.set_xlabel('NPHI [v/v]', color='blue')
nphi.tick_params(axis='x', colors='blue')
nphi.xaxis.set_major_locator(plt.MultipleLocator(0.2))
nphi.grid(b=True, which='major', color='black', linestyle='--')
nphi.grid(b=True, which='minor', color='grey', linestyle=':')
if 'RHOB' in logs:
rhob = ax[3].twiny()
rhob.set_xlim(1.95, 2.95)
rhob.plot(data.RHOB, data.DEPTH, '--', linewidth=1, label='g/cm^3', color='red')
rhob.spines['top'].set_position(('outward', 60))
rhob.spines['top'].set_color('red')
rhob.set_xlabel('RHOB [g/cm^3]', color='red')
rhob.tick_params(axis='x', colors='red')
rhob.xaxis.set_major_locator(plt.MultipleLocator(0.4))
elif 'PHID' in logs:
phid = ax[3].twiny()
phid.set_xlim(0.45, -0.15)
phid.plot(data.PHID, data.DEPTH, '--', linewidth=1, label='%', color='red')
phid.spines['top'].set_position(('outward', 60))
phid.spines['top'].set_color('red')
phid.set_xlabel('PHID [%]', color='red')
phid.tick_params(axis='x', colors='red')
phid.xaxis.set_major_locator(plt.MultipleLocator(0.4))
if 'NPHI' and 'RHOB' in logs:
# https://stackoverflow.com/questions/57766457/how-to-plot-fill-betweenx-to-fill-the-area-between-y1-and-y2-with-different-scal
x2p, _ = (rhob.transData + nphi.transData.inverted()).transform(np.c_[data.RHOB, data.DEPTH]).T
nphi.autoscale(False)
nphi.fill_betweenx(data.DEPTH, data.NPHI, x2p, color="goldenrod", alpha=0.4, where=(x2p > data.NPHI))
nphi.fill_betweenx(data.DEPTH, data.NPHI, x2p, color="turquoise", alpha=0.4, where=(x2p < data.NPHI))
# 5th DT and PEF
if 'PEF' and 'DT' not in logs:
fig.delaxes(ax=ax[4])
else:
ax[4].minorticks_on()
ax[4].grid(b=True, which='major', color='black', linestyle='--')
ax[4].grid(b=True, which='minor', color='grey', linestyle=':')
if 'DT' in logs:
dt = ax[4].twiny()
dt.minorticks_on()
dt.set_xlim(200, 40)
dt.spines['top'].set_position(('outward', 20))
dt.spines['top'].set_color('c')
dt.plot(data.DT, data.DEPTH, linewidth=1, label="US/F", color='c')
dt.set_xlabel("DT", color='c')
dt.tick_params(axis='x', colors='c')
dt.grid(b=True, which='major', color='black', linestyle='--')
dt.grid(b=True, which='minor', color='grey', linestyle=':')
else:
pass
if 'PEF' in logs:
pef = ax[4].twiny()
pef.plot(data.PEF, data.DEPTH, '--', linewidth=1, label="b/elc", color='lime')
pef.spines['top'].set_position(('outward', 60))
pef.spines['top'].set_color('lime')
pef.set_xlabel("PEF", color='lime')
pef.tick_params(axis='x', colors='lime')
pef.grid(b=True, which='major', color='black', linestyle='--')
pef.grid(b=True, which='minor', color='grey', linestyle=':')
else:
pass
# 6th track: vsh_larionov, vsh_linear
if 'vsh_linear' and 'vsh_larionov' not in logs:
fig.delaxes(ax=ax[5])
else:
ax[5].minorticks_on()
ax[5].grid(b=True, which='major', color='black', linestyle='--')
ax[5].grid(b=True, which='minor', color='grey', linestyle=':')
if 'vsh_linear' in logs:
vsh_linear = ax[5].twiny()
vsh_linear.minorticks_on()
vsh_linear.plot(data.vsh_linear, data.DEPTH, label='CALI[in]', color='k')
vsh_linear.spines['top'].set_position(('outward', 20))
vsh_linear.spines['top'].set_color('k')
vsh_linear.set_xlabel('vsh_linear[%]', color='k')
vsh_linear.tick_params(axis='x', colors='k')
vsh_linear.grid(b=True, which='major', color='black', linestyle='--')
vsh_linear.grid(b=True, which='minor', color='grey', linestyle=':')
else:
pass
if 'vsh_larionov' in logs:
vsh_larionov = ax[5].twiny()
vsh_larionov.minorticks_on()
vsh_larionov.plot(data.vsh_larionov, data.DEPTH, label='BS[in]', color='brown')
vsh_larionov.spines['top'].set_position(('outward', 60))
vsh_larionov.spines['top'].set_color('brown')
vsh_larionov.set_xlabel('vsh_larionov[%]', color='brown')
vsh_larionov.tick_params(axis='x', colors='brown')
vsh_larionov.grid(b=True, which='major', color='black', linestyle='--')
vsh_larionov.grid(b=True, which='minor', color='grey', linestyle=':')
else:
pass
if 'PHIE' in logs:
phie = ax[5].twiny()
phie.minorticks_on()
phie.plot(data.PHIE, data.DEPTH, linewidth=1, label='%', color='indigo')
phie.set_xlim(1, 0.0)
phie.spines['top'].set_position(('outward', 100))
phie.spines['top'].set_color('indigo')
phie.set_xlabel('PHIE [%]', color='indigo')
phie.tick_params(axis='x', colors='indigo')
else:
pass
plt.tight_layout(pad=2, h_pad=10, w_pad=2)
@staticmethod
def triple_combo_plot(data, min_depth: float or int or None, max_depth: float or int or None,
plotsize: tuple = None):
"""
This gives the 'sp-gr', 'resistivity' and 'neutron-density' log plot
:param data: well logs dataframe
:param min_depth: top of reservoir, closer to the surface (length units)
:param max_depth: bottom of reservoir, closer to the subsurface (length units)
:param plotsize: the plot figsize in tuple form, default is (14,22)
"""
if plotsize is None:
plotsize = (12, 15)
logs = data.columns
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=plotsize, sharey='all')
fig.suptitle("Triple-Combo Plot", fontsize=22, y=1.02)
# General setting for all axis
for axes in ax:
axes.get_xaxis().set_visible(False)
axes.invert_yaxis()
axes.spines['left'].set_color('k')
axes.spines['right'].set_color('k')
if min_depth is not None:
axes.set_ylim(max_depth, min_depth)
else:
axes.set_ylim(data['DEPTH'].max(), data['DEPTH'].min())
# 1st track: GR, SP
ax[0].minorticks_on()
ax[0].grid(b=True, which='major', color='black', linestyle='--')
ax[0].grid(b=True, which='minor', color='grey', linestyle=':')
# 1st track: GR, SP
if 'GR' in logs:
gr = ax[0].twiny()
gr.minorticks_on()
gr.set_xlim(0, 150)
gr.plot(data.GR, data.DEPTH, label='GR[api]', color='green')
gr.spines['top'].set_position(('outward', 20))
gr.spines['top'].set_color('g')
gr.set_xlabel('GR[api]', color='green')
gr.tick_params(axis='x', colors='green')
gr.grid(b=True, which='major', color='k', linestyle='--')
gr.grid(b=True, which='minor', color='grey', linestyle=':')
if 'SP' in logs:
sp = ax[0].twiny()
sp.minorticks_on()
sp.set_xlim(data['SP'].min(), data.max())
sp.plot(data.GR, data.DEPTH, label='SP[mV]', color='b')
sp.spines['top'].set_position(('outward', 60))
sp.spines['top'].set_color('b')
sp.set_xlabel('GR[api]', color='b')
sp.tick_params(axis='x', colors='b')
sp.grid(b=True, which='major', color='k', linestyle='--')
sp.grid(b=True, which='minor', color='grey', linestyle=':')
# 2nd track: resistivity track
ax[1].minorticks_on()
ax[1].grid(b=True, which='major', color='black', linestyle='--')
ax[1].grid(b=True, which='minor', color='grey', linestyle=':')
if 'RD' in logs:
rd = ax[1].twiny()
rd.set_xlim(0.2, 2500)
rd.spines['top'].set_position(('outward', 10))
rd.spines['top'].set_color('y')
rd.semilogx(data.RD, data.DEPTH, '--', linewidth=1, c='y')
rd.set_xlabel('RD [ohm.m]', color='y')
rd.tick_params(axis='x', colors='y')
rd.xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=3))
rd.grid(b=True, which='major', color='black', linestyle='--')
rd.grid(b=True, which='minor', color='grey', linestyle=':')
if 'RS' in logs:
rs = ax[1].twiny()
rs.set_xlim(0.2, 2500)
rs.spines['top'].set_position(('outward', 50))
rs.spines['top'].set_color('m')
rs.semilogx(data.RS, data.DEPTH, linewidth=1, c='m', )
rs.set_xlabel('RS [ohm.m]', color='m')
rs.tick_params(axis='x', colors='m')
rs.xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=3))
rs.grid(b=True, which='major', color='black', linestyle='--')
rs.grid(b=True, which='minor', color='grey', linestyle=':')
if 'RM' in logs:
rm = ax[1].twiny()
rm.set_xlim(0.2, 2500)
rm.spines['top'].set_position(('outward', 90))
rm.spines['top'].set_color('C1')
rm.semilogx(data.RM, data.DEPTH, linewidth=1, c='C1', )
rm.set_xlabel('RM [ohm.m]', color='C1')
rm.tick_params(axis='x', colors='C1')
rm.xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=3))
rm.grid(b=True, which='major', color='black', linestyle='--')
rm.grid(b=True, which='minor', color='grey', linestyle=':')
if 'RT' in logs:
rt = ax[1].twiny()
rt.set_xlim(0.2, 2500)
rt.spines['top'].set_position(('outward', 130))
rt.spines['top'].set_color('brown')
rt.semilogx(data.RT, data.DEPTH, linewidth=1, c='brown', )
rt.set_xlabel('RT [ohm.m]', color='brown')
rt.tick_params(axis='x', colors='brown')
rt.xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=3))
if 'RXO' in logs:
rxo = ax[1].twiny()
rxo.set_xlim(0.2, 2500)
rxo.spines['top'].set_position(('outward', 170))
rxo.spines['top'].set_color('c')
rxo.semilogx(data.RXO, data.DEPTH, linewidth=1, c='c', )
rxo.set_xlabel('RXO [ohm.m]', color='c')
rxo.tick_params(axis='x', colors='c')
rxo.xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=3))
# 3rd track NPHI, DPHI, RHOB
ax[2].minorticks_on()
ax[2].grid(b=True, which='major', color='black', linestyle='--')
ax[2].grid(b=True, which='minor', color='grey', linestyle=':')
if 'NPHI' in logs:
nphi = ax[2].twiny()
nphi.minorticks_on()
nphi.set_xlim(0.45, -0.15)
nphi.spines['top'].set_position(('outward', 20))
nphi.spines['top'].set_color('blue')
nphi.set_xlabel("v/v")
nphi.plot(data.NPHI, data.DEPTH, linewidth=1, label='v/v', color='blue')
nphi.set_xlabel('NPHI [v/v] ', color='blue')
nphi.tick_params(axis='x', colors='blue')
nphi.xaxis.set_major_locator(plt.MultipleLocator(0.2))
nphi.grid(b=True, which='major', color='black', linestyle='--')
nphi.grid(b=True, which='minor', color='grey', linestyle=':')
if 'RHOB' in logs:
rhob = ax[2].twiny()
rhob.set_xlim(1.95, 2.95)
rhob.plot(data.RHOB, data.DEPTH, '--', linewidth=1, label='g/cm^3', color='red')
rhob.spines['top'].set_position(('outward', 60))
rhob.spines['top'].set_color('red')
rhob.set_xlabel('RHOB [g/cm^3]', color='red')
rhob.tick_params(axis='x', colors='red')
rhob.xaxis.set_major_locator(plt.MultipleLocator(0.4))
elif 'DPHI' in logs:
dphi = ax[2].twiny()
dphi.set_xlim(0.45, -0.15)
dphi.plot(data.DPHI, data.DEPTH, '--', linewidth=1, label='%', color='red')
dphi.spines['top'].set_position(('outward', 60))
dphi.spines['top'].set_color('red')
dphi.set_xlabel('DPHI [%]', color='red')
dphi.tick_params(axis='x', colors='red')
dphi.xaxis.set_major_locator(plt.MultipleLocator(0.4))
if 'NPHI' and 'RHOB' in logs:
# https://stackoverflow.com/questions/57766457/how-to-plot-fill-betweenx-to-fill-the-area-between-y1-and-y2-with-different-scal
x2p, _ = (rhob.transData + nphi.transData.inverted()).transform(np.c_[data.RHOB, data.DEPTH]).T
nphi.autoscale(False)
nphi.fill_betweenx(data.DEPTH, data.NPHI, x2p, color="y", alpha=0.4, where=(x2p > data.NPHI))
nphi.fill_betweenx(data.DEPTH, data.NPHI, x2p, color="turquoise", alpha=0.4, where=(x2p < data.NPHI))
plt.tight_layout(pad=2, h_pad=10, w_pad=2)
@staticmethod
def doub_logplot(data, logs: list, reslog: list or None = None, min_depth: float or int or None = None,
max_depth: float or int or None = None, plotsize: tuple = None):
"""
This gives a combination plot of your choice
:param logs: name of logs to plot in a list
:param reslog: name of resistivity logs to plot in a list
:param data: well logs dataframe
:param min_depth: top of reservoir, closer to the surface (length units)
:param max_depth: bottom of reservoir, closer to the subsurface (length units)
:param plotsize: the plot figsize in tuple form, default is (14, 22)
"""
if plotsize is None:
plotsize = (17, 15)
if reslog is None:
reslog = []
total = len(logs) + len(reslog)
total_logs = logs + reslog
# create the subplots; ncols equals the number of logs
fig, ax = plt.subplots(nrows=1, ncols=total, figsize=plotsize)
# General setting for all axis
for axes in ax:
axes.invert_yaxis()
for xtick in axes.get_xticklabels():
xtick.set_fontsize(10)
for ytick in axes.get_yticklabels():
ytick.set_fontsize(10)
if min_depth and max_depth is not None:
axes.set_ylim(max_depth, min_depth)
else:
axes.set_ylim(data['DEPTH'].max(), data['DEPTH'].min())
colors = ['k', 'brown', 'r', 'purple', 'y', 'orange', 'c', 'gold', 'b',
'plum', 'navy', 'm', 'sienna', 'teal', 'g']
for i in range(len(total_logs)):
if i < len(logs):
# for non-resistivity, normal plot
colrs = np.random.choice(colors)
ax[i].minorticks_on()
ax[i].grid(b=True, which='major', color='black', linestyle='--')
ax[i].grid(b=True, which='minor', color='grey', linestyle=':')
ax[i].plot(data[total_logs[i]], data['DEPTH'], color=colrs)
ax[i].set_xlim(data[total_logs[i]].min(), data[total_logs[i]].max())
ax[i].set_title(total_logs[i], size=20)
ax[i].grid(b=True, which='major', color='black', linestyle='--')
ax[i].grid(b=True, which='minor', color='grey', linestyle=':')
colors.remove(colrs)
if logs[i] == 'NPHI':
ax[i].set_xlim(0.45, -0.15)
else:
# for resistivity, semilog plot
colrs = np.random.choice(colors)
ax[i].minorticks_on()
ax[i].grid(b=True, which='major', color='black', linestyle='--')
ax[i].grid(b=True, which='minor', color='grey', linestyle=':')
ax[i].semilogx(data[total_logs[i]], data['DEPTH'], color=colrs)
ax[i].set_xlim(0.2, 2500)
ax[i].set_title(total_logs[i], size=20)
ax[i].grid(b=True, which='major', color='black', linestyle='--')
ax[i].grid(b=True, which='minor', color='grey', linestyle=':')
colors.remove(colrs)
plt.tight_layout(1.1)
| [
"numpy.mean",
"lasio.read",
"matplotlib.ticker.LogLocator",
"numpy.where",
"numpy.random.choice",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.MultipleLocator",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout",
"warnings.filterwarnings"
] | [((207, 230), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (220, 230), True, 'import matplotlib.pyplot as plt\n'), ((232, 265), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (255, 265), False, 'import warnings\n'), ((1213, 1229), 'lasio.read', 'lasio.read', (['path'], {}), '(path)\n', (1223, 1229), False, 'import lasio\n'), ((3829, 3891), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(6)', 'figsize': 'plotsize', 'sharey': '(False)'}), '(nrows=1, ncols=6, figsize=plotsize, sharey=False)\n', (3841, 3891), True, 'import matplotlib.pyplot as plt\n'), ((17246, 17288), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(2)', 'h_pad': '(10)', 'w_pad': '(2)'}), '(pad=2, h_pad=10, w_pad=2)\n', (17262, 17288), True, 'import matplotlib.pyplot as plt\n'), ((17967, 18029), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(3)', 'figsize': 'plotsize', 'sharey': '"""all"""'}), "(nrows=1, ncols=3, figsize=plotsize, sharey='all')\n", (17979, 18029), True, 'import matplotlib.pyplot as plt\n'), ((25305, 25347), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(2)', 'h_pad': '(10)', 'w_pad': '(2)'}), '(pad=2, h_pad=10, w_pad=2)\n', (25321, 25347), True, 'import matplotlib.pyplot as plt\n'), ((26337, 26389), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': 'total', 'figsize': 'plotsize'}), '(nrows=1, ncols=total, figsize=plotsize)\n', (26349, 26389), True, 'import matplotlib.pyplot as plt\n'), ((28615, 28636), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', (['(1.1)'], {}), '(1.1)\n', (28631, 28636), True, 'import matplotlib.pyplot as plt\n'), ((2281, 2334), 'numpy.where', 'np.where', (["(dataframe['GR'] > 150)", '(150)', "dataframe['GR']"], {}), "(dataframe['GR'] > 150, 150, dataframe['GR'])\n", (2289, 2334), True, 'import numpy as np\n'), ((2366, 2415), 'numpy.where', 'np.where', (["(dataframe['GR'] < 0)", '(0)', "dataframe['GR']"], {}), "(dataframe['GR'] < 0, 0, dataframe['GR'])\n", (2374, 2415), True, 'import numpy as np\n'), ((2482, 2539), 'numpy.where', 'np.where', (["(dataframe['NPHI'] > 0.5)", '(0.5)', "dataframe['NPHI']"], {}), "(dataframe['NPHI'] > 0.5, 0.5, dataframe['NPHI'])\n", (2490, 2539), True, 'import numpy as np\n'), ((2575, 2636), 'numpy.where', 'np.where', (["(dataframe['NPHI'] < -0.15)", '(-0.15)', "dataframe['NPHI']"], {}), "(dataframe['NPHI'] < -0.15, -0.15, dataframe['NPHI'])\n", (2583, 2636), True, 'import numpy as np\n'), ((3113, 3124), 'numpy.mean', 'np.mean', (['df'], {}), '(df)\n', (3120, 3124), True, 'import numpy as np\n'), ((7893, 7933), 'matplotlib.ticker.LogLocator', 'ticker.LogLocator', ([], {'base': '(10.0)', 'numticks': '(3)'}), '(base=10.0, numticks=3)\n', (7910, 7933), True, 'import matplotlib.ticker as ticker\n'), ((8542, 8582), 'matplotlib.ticker.LogLocator', 'ticker.LogLocator', ([], {'base': '(10.0)', 'numticks': '(3)'}), '(base=10.0, numticks=3)\n', (8559, 8582), True, 'import matplotlib.ticker as ticker\n'), ((9175, 9215), 'matplotlib.ticker.LogLocator', 'ticker.LogLocator', ([], {'base': '(10.0)', 'numticks': '(3)'}), '(base=10.0, numticks=3)\n', (9192, 9215), True, 'import matplotlib.ticker as ticker\n'), ((9820, 9860), 'matplotlib.ticker.LogLocator', 'ticker.LogLocator', ([], {'base': '(10.0)', 'numticks': '(3)'}), '(base=10.0, numticks=3)\n', (9837, 9860), True, 'import matplotlib.ticker as ticker\n'), ((10475, 10515), 'matplotlib.ticker.LogLocator', 'ticker.LogLocator', ([], {'base': '(10.0)', 'numticks': '(3)'}), '(base=10.0, numticks=3)\n', (10492, 10515), True, 'import matplotlib.ticker as ticker\n'), ((11693, 11717), 'matplotlib.pyplot.MultipleLocator', 'plt.MultipleLocator', (['(0.2)'], {}), '(0.2)\n', (11712, 11717), True, 'import matplotlib.pyplot as plt\n'), ((12336, 12360), 'matplotlib.pyplot.MultipleLocator', 'plt.MultipleLocator', (['(0.4)'], {}), '(0.4)\n', (12355, 12360), True, 'import matplotlib.pyplot as plt\n'), ((20520, 20560), 'matplotlib.ticker.LogLocator', 'ticker.LogLocator', ([], {'base': '(10.0)', 'numticks': '(3)'}), '(base=10.0, numticks=3)\n', (20537, 20560), True, 'import matplotlib.ticker as ticker\n'), ((21121, 21161), 'matplotlib.ticker.LogLocator', 'ticker.LogLocator', ([], {'base': '(10.0)', 'numticks': '(3)'}), '(base=10.0, numticks=3)\n', (21138, 21161), True, 'import matplotlib.ticker as ticker\n'), ((21726, 21766), 'matplotlib.ticker.LogLocator', 'ticker.LogLocator', ([], {'base': '(10.0)', 'numticks': '(3)'}), '(base=10.0, numticks=3)\n', (21743, 21766), True, 'import matplotlib.ticker as ticker\n'), ((22344, 22384), 'matplotlib.ticker.LogLocator', 'ticker.LogLocator', ([], {'base': '(10.0)', 'numticks': '(3)'}), '(base=10.0, numticks=3)\n', (22361, 22384), True, 'import matplotlib.ticker as ticker\n'), ((22809, 22849), 'matplotlib.ticker.LogLocator', 'ticker.LogLocator', ([], {'base': '(10.0)', 'numticks': '(3)'}), '(base=10.0, numticks=3)\n', (22826, 22849), True, 'import matplotlib.ticker as ticker\n'), ((23595, 23619), 'matplotlib.pyplot.MultipleLocator', 'plt.MultipleLocator', (['(0.2)'], {}), '(0.2)\n', (23614, 23619), True, 'import matplotlib.pyplot as plt\n'), ((24238, 24262), 'matplotlib.pyplot.MultipleLocator', 'plt.MultipleLocator', (['(0.4)'], {}), '(0.4)\n', (24257, 24262), True, 'import matplotlib.pyplot as plt\n'), ((27168, 27192), 'numpy.random.choice', 'np.random.choice', (['colors'], {}), '(colors)\n', (27184, 27192), True, 'import numpy as np\n'), ((27997, 28021), 'numpy.random.choice', 'np.random.choice', (['colors'], {}), '(colors)\n', (28013, 28021), True, 'import numpy as np\n'), ((12818, 12842), 'matplotlib.pyplot.MultipleLocator', 'plt.MultipleLocator', (['(0.4)'], {}), '(0.4)\n', (12837, 12842), True, 'import matplotlib.pyplot as plt\n'), ((24720, 24744), 'matplotlib.pyplot.MultipleLocator', 'plt.MultipleLocator', (['(0.4)'], {}), '(0.4)\n', (24739, 24744), True, 'import matplotlib.pyplot as plt\n')] |
from kfp.components import InputPath, OutputPath, create_component_from_func
def xgboost_predict(
data_path: InputPath('CSV'), # Also supports LibSVM
model_path: InputPath('XGBoostModel'),
predictions_path: OutputPath('Predictions'),
label_column: int = None,
):
'''Make predictions using a trained XGBoost model.
Args:
data_path: Path for the feature data in CSV format.
model_path: Path for the trained model in binary XGBoost format.
predictions_path: Output path for the predictions.
label_column: Column containing the label data.
Annotations:
author: <NAME> <<EMAIL>>
'''
from pathlib import Path
import numpy
import pandas
import xgboost
df = pandas.read_csv(
data_path,
)
if label_column is not None:
df = df.drop(columns=[df.columns[label_column]])
testing_data = xgboost.DMatrix(
data=df,
)
model = xgboost.Booster(model_file=model_path)
predictions = model.predict(testing_data)
Path(predictions_path).parent.mkdir(parents=True, exist_ok=True)
numpy.savetxt(predictions_path, predictions)
if __name__ == '__main__':
create_component_from_func(
xgboost_predict,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=[
'xgboost==1.1.1',
'pandas==1.0.5',
],
annotations={
"author": "<NAME> <<EMAIL>>",
},
)
| [
"pandas.read_csv",
"pathlib.Path",
"kfp.components.create_component_from_func",
"kfp.components.InputPath",
"xgboost.Booster",
"numpy.savetxt",
"xgboost.DMatrix",
"kfp.components.OutputPath"
] | [((748, 774), 'pandas.read_csv', 'pandas.read_csv', (['data_path'], {}), '(data_path)\n', (763, 774), False, 'import pandas\n'), ((901, 925), 'xgboost.DMatrix', 'xgboost.DMatrix', ([], {'data': 'df'}), '(data=df)\n', (916, 925), False, 'import xgboost\n'), ((954, 992), 'xgboost.Booster', 'xgboost.Booster', ([], {'model_file': 'model_path'}), '(model_file=model_path)\n', (969, 992), False, 'import xgboost\n'), ((1114, 1158), 'numpy.savetxt', 'numpy.savetxt', (['predictions_path', 'predictions'], {}), '(predictions_path, predictions)\n', (1127, 1158), False, 'import numpy\n'), ((1192, 1415), 'kfp.components.create_component_from_func', 'create_component_from_func', (['xgboost_predict'], {'output_component_file': '"""component.yaml"""', 'base_image': '"""python:3.7"""', 'packages_to_install': "['xgboost==1.1.1', 'pandas==1.0.5']", 'annotations': "{'author': '<NAME> <<EMAIL>>'}"}), "(xgboost_predict, output_component_file=\n 'component.yaml', base_image='python:3.7', packages_to_install=[\n 'xgboost==1.1.1', 'pandas==1.0.5'], annotations={'author':\n '<NAME> <<EMAIL>>'})\n", (1218, 1415), False, 'from kfp.components import InputPath, OutputPath, create_component_from_func\n'), ((114, 130), 'kfp.components.InputPath', 'InputPath', (['"""CSV"""'], {}), "('CSV')\n", (123, 130), False, 'from kfp.components import InputPath, OutputPath, create_component_from_func\n'), ((172, 197), 'kfp.components.InputPath', 'InputPath', (['"""XGBoostModel"""'], {}), "('XGBoostModel')\n", (181, 197), False, 'from kfp.components import InputPath, OutputPath, create_component_from_func\n'), ((221, 246), 'kfp.components.OutputPath', 'OutputPath', (['"""Predictions"""'], {}), "('Predictions')\n", (231, 246), False, 'from kfp.components import InputPath, OutputPath, create_component_from_func\n'), ((1045, 1067), 'pathlib.Path', 'Path', (['predictions_path'], {}), '(predictions_path)\n', (1049, 1067), False, 'from pathlib import Path\n')] |
#training the NN
import sys
sys.path.append("F:/Riku_Ka/AudioTestCode")
import shutil
import os
from pydub import AudioSegment
import numpy as np
from keras import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import adam
from keras.models import load_model
import time
from keras.models import model_from_json
startTime = time.time()
#produce training data set.
trainingDIR = "F:/Riku_Ka/Training_Dementia/Training Data Set"
modelDIR = "F:/Riku_Ka/Training_Dementia"
modelFILE = "Model_for Dementia1.json"
weightFILE = "Model_for Dementia1.h5"
trainingMatrix = np.zeros(shape=(1,14))
MFCCfiles = os.listdir(trainingDIR)
for f in MFCCfiles:
tempFile = trainingDIR + '/' + f
print(tempFile)
tempMatrix = np.loadtxt(tempFile)
print(tempMatrix.shape)
trainingMatrix = np.vstack((trainingMatrix,tempMatrix))
trainingDate = trainingMatrix[:,1:]
print("The shape of training data matrix is: ")
print(trainingDate.shape)
trainingLabels = trainingMatrix[:,0]
print("Time point 1 is " + str(time.time() - startTime))
np.random.seed(10)
myModel = Sequential()
myModel.add(Dense(units=64,input_dim=13, activation='relu'))
myModel.add(Dense(units=8, activation='relu'))
myModel.add(Dense(1, activation='sigmoid'))
myModel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
myModel.fit(x=trainingDate, y=trainingLabels, epochs=150, batch_size=32 )
# Save the trained model
myModel_json = myModel.to_json()
with open(modelDIR + '/' + modelFILE, 'w') as json_file:
json_file.write(myModel_json)
myModel.save_weights(modelDIR + '/' + weightFILE)
print("The trained model has been saved.")
print("Time point 3 is " + str(time.time() - startTime))
#produce test data set.
testDIR = "F:/Riku_Ka/Training_Dementia/Test Data Set"
MFCCfiles = os.listdir(testDIR)
for f in MFCCfiles:
tempFile = testDIR + '/' + f
print(tempFile)
tempMatrix = np.loadtxt(tempFile)
testData = tempMatrix[:,1:]
testLabels = tempMatrix[:,0]
scores = myModel.evaluate(x=testData, y=testLabels)
print("The accuracy for file {0} is {1}".format(f,scores[1]*100))
print("Time point 2 is " + str(time.time() - startTime))
#Select files according to special WORDS
import os, random, shutil
resDIR = "F:/Riku_Ka/Training_Dementia/MFCC with Labels for Health"
dstDIR = "F:/Riku_Ka/Training_Dementia/Test Data Set"
import sys
sys.path.append("F:/Riku_Ka/AudioTestCode")
import shutil
import os
from pydub import AudioSegment
import numpy as np
import FHC_Audio
sourceDIR = "F:/Riku_Ka/PROMPT-UnzipAudioFile/音声データ_003"
dstDIR = "F:/Riku_Ka/Training_Dementia/MFCC for Dementia-Pt only"
subDIRs = os.listdir(sourceDIR)
#listHealth = ['BH', 'KH', 'OH', 'PKH', 'POH', 'PTH', 'TH']
#listDepression = ['AD', 'BD', 'GD', 'KD', 'MD', 'ND', 'OD', 'PKD', 'POD', 'PTD', 'SD', 'TD']
listDementias = ['AC','GC','KC','OC','PKC','SC','TC']
print(listDementias)
for dirs in subDIRs:
tempDIR = sourceDIR + '/' + dirs
files = os.listdir(tempDIR)
for f in files:
if any(x in f for x in listDementias) and f.find('Pt') != -1:
tempFILE = tempDIR + '/' + f
print("For the file {}: ".format(f))
try:
featureMFCC = FHC_Audio.getMFCC4SingleFile(tempFILE, 13)
featureFILE = dstDIR + '/' + os.path.splitext(f)[0] +"_MFCC.txt"
np.savetxt(featureFILE, featureMFCC)
except ValueError:
print("There is a ValueError.")
#select data randomly for TEST and TRAINING respectively
import os, random, shutil
sourceDIR = "F:/Riku_Ka/PROMPT-UnzipAudioFile/音声データ_003"
dstDIR = "F:/Riku_Ka/Training_Dementia/MFCC for Dementia-Pt only"
files = os.listdir(sourceDIR)
numOfTest = int(len(files)/3)
trainingSamples = random.sample(files, numOfTest)
for sample in trainingSamples:
shutil.move(resDIR + '/' + sample, dstDIR + '/' + sample)
#Give the labels to file
resDIR = "F:/Riku_Ka/Training_Dementia/MFCC for Dementia-Pt only"
dstDIR = "F:/Riku_Ka/Training_Dementia/MFCC with Labels for Dementia"
#myDIR = "F:/Riku_Ka/Training/MFCC for Health Data for Training"
MFCCfiles = os.listdir(resDIR)
for f in MFCCfiles:
tempFile = resDIR + '/' + f
tempMatrix = np.loadtxt(tempFile)
n,m = tempMatrix.shape
print("The size of {0} is {1} * {2}".format(f,n,m))
newCol = np.ones((n,1))
newMatrix = np.hstack((newCol,tempMatrix))
newFile = dstDIR + '/1_' + f
np.savetxt(newFile,newMatrix)
| [
"keras.Sequential",
"random.sample",
"os.listdir",
"numpy.ones",
"shutil.move",
"numpy.hstack",
"os.path.splitext",
"numpy.zeros",
"numpy.random.seed",
"numpy.vstack",
"time.time",
"keras.layers.Dense",
"numpy.savetxt",
"numpy.loadtxt",
"FHC_Audio.getMFCC4SingleFile",
"sys.path.append"... | [((30, 73), 'sys.path.append', 'sys.path.append', (['"""F:/Riku_Ka/AudioTestCode"""'], {}), "('F:/Riku_Ka/AudioTestCode')\n", (45, 73), False, 'import sys\n'), ((371, 382), 'time.time', 'time.time', ([], {}), '()\n', (380, 382), False, 'import time\n'), ((620, 643), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 14)'}), '(shape=(1, 14))\n', (628, 643), True, 'import numpy as np\n'), ((658, 681), 'os.listdir', 'os.listdir', (['trainingDIR'], {}), '(trainingDIR)\n', (668, 681), False, 'import os, random, shutil\n'), ((1109, 1127), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (1123, 1127), True, 'import numpy as np\n'), ((1141, 1153), 'keras.Sequential', 'Sequential', ([], {}), '()\n', (1151, 1153), False, 'from keras import Sequential\n'), ((1887, 1906), 'os.listdir', 'os.listdir', (['testDIR'], {}), '(testDIR)\n', (1897, 1906), False, 'import os, random, shutil\n'), ((2499, 2542), 'sys.path.append', 'sys.path.append', (['"""F:/Riku_Ka/AudioTestCode"""'], {}), "('F:/Riku_Ka/AudioTestCode')\n", (2514, 2542), False, 'import sys\n'), ((2780, 2801), 'os.listdir', 'os.listdir', (['sourceDIR'], {}), '(sourceDIR)\n', (2790, 2801), False, 'import os, random, shutil\n'), ((3844, 3865), 'os.listdir', 'os.listdir', (['sourceDIR'], {}), '(sourceDIR)\n', (3854, 3865), False, 'import os, random, shutil\n'), ((3916, 3947), 'random.sample', 'random.sample', (['files', 'numOfTest'], {}), '(files, numOfTest)\n', (3929, 3947), False, 'import os, random, shutil\n'), ((4290, 4308), 'os.listdir', 'os.listdir', (['resDIR'], {}), '(resDIR)\n', (4300, 4308), False, 'import os, random, shutil\n'), ((780, 800), 'numpy.loadtxt', 'np.loadtxt', (['tempFile'], {}), '(tempFile)\n', (790, 800), True, 'import numpy as np\n'), ((852, 891), 'numpy.vstack', 'np.vstack', (['(trainingMatrix, tempMatrix)'], {}), '((trainingMatrix, tempMatrix))\n', (861, 891), True, 'import numpy as np\n'), ((1169, 1217), 'keras.layers.Dense', 'Dense', ([], {'units': '(64)', 'input_dim': '(13)', 'activation': '"""relu"""'}), "(units=64, input_dim=13, activation='relu')\n", (1174, 1217), False, 'from keras.layers import Dense, Activation\n'), ((1231, 1264), 'keras.layers.Dense', 'Dense', ([], {'units': '(8)', 'activation': '"""relu"""'}), "(units=8, activation='relu')\n", (1236, 1264), False, 'from keras.layers import Dense, Activation\n'), ((1279, 1309), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1284, 1309), False, 'from keras.layers import Dense, Activation\n'), ((2001, 2021), 'numpy.loadtxt', 'np.loadtxt', (['tempFile'], {}), '(tempFile)\n', (2011, 2021), True, 'import numpy as np\n'), ((3110, 3129), 'os.listdir', 'os.listdir', (['tempDIR'], {}), '(tempDIR)\n', (3120, 3129), False, 'import os, random, shutil\n'), ((3985, 4042), 'shutil.move', 'shutil.move', (["(resDIR + '/' + sample)", "(dstDIR + '/' + sample)"], {}), "(resDIR + '/' + sample, dstDIR + '/' + sample)\n", (3996, 4042), False, 'import os, random, shutil\n'), ((4383, 4403), 'numpy.loadtxt', 'np.loadtxt', (['tempFile'], {}), '(tempFile)\n', (4393, 4403), True, 'import numpy as np\n'), ((4503, 4518), 'numpy.ones', 'np.ones', (['(n, 1)'], {}), '((n, 1))\n', (4510, 4518), True, 'import numpy as np\n'), ((4535, 4566), 'numpy.hstack', 'np.hstack', (['(newCol, tempMatrix)'], {}), '((newCol, tempMatrix))\n', (4544, 4566), True, 'import numpy as np\n'), ((4605, 4635), 'numpy.savetxt', 'np.savetxt', (['newFile', 'newMatrix'], {}), '(newFile, newMatrix)\n', (4615, 4635), True, 'import numpy as np\n'), ((1078, 1089), 'time.time', 'time.time', ([], {}), '()\n', (1087, 1089), False, 'import time\n'), ((1763, 1774), 'time.time', 'time.time', ([], {}), '()\n', (1772, 1774), False, 'import time\n'), ((2263, 2274), 'time.time', 'time.time', ([], {}), '()\n', (2272, 2274), False, 'import time\n'), ((3363, 3405), 'FHC_Audio.getMFCC4SingleFile', 'FHC_Audio.getMFCC4SingleFile', (['tempFILE', '(13)'], {}), '(tempFILE, 13)\n', (3391, 3405), False, 'import FHC_Audio\n'), ((3505, 3541), 'numpy.savetxt', 'np.savetxt', (['featureFILE', 'featureMFCC'], {}), '(featureFILE, featureMFCC)\n', (3515, 3541), True, 'import numpy as np\n'), ((3452, 3471), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (3468, 3471), False, 'import os, random, shutil\n')] |
"""
TextCNN Model for Sentiment Analysis
===============================================
This example shows how to use convolutional neural networks (textCNN)
for sentiment analysis on various datasets.
<NAME>. (2014). Convolutional neural networks for sentence classification.
arXiv preprint arXiv:1408.5882.
"""
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import time
import random
import numpy as np
import mxnet as mx
from mxnet import nd, gluon, autograd
from mxnet.gluon.data import DataLoader
import process_data
import text_cnn
np.random.seed(3435)
random.seed(3435)
mx.random.seed(3435)
parser = argparse.ArgumentParser(description='Sentiment analysis with the textCNN model on\
various datasets.')
parser.add_argument('--data_name', choices=['MR', 'SST-1', 'SST-2', 'Subj', 'TREC'], default='MR',
help='name of the data set')
parser.add_argument('--model_mode', choices=['rand', 'static', 'non-static', 'multichannel'],
default='multichannel', help='Variants of the textCNN model (see the paper:\
Convolutional Neural Networks for Sentence Classification).')
parser.add_argument('--lr', type=float, default=2.5E-3,
help='initial learning rate')
parser.add_argument('--epochs', type=int, default=20,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=50, metavar='N',
help='batch size')
parser.add_argument('--dropout', type=float, default=.5,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--log-interval', type=int, default=30, metavar='N',
help='report interval')
parser.add_argument('--save-prefix', type=str, default='sa-model',
help='path to save the final model')
parser.add_argument('--gpu', type=int, default=None,
help='id of the gpu to use. Set it to empty means to use cpu.')
args = parser.parse_args()
print(args)
if args.gpu is None:
print('Use cpu')
context = mx.cpu()
else:
print('Use gpu%d' % args.gpu)
context = mx.gpu(args.gpu)
if args.data_name == 'MR' or args.data_name == 'Subj':
vocab, max_len, output_size, train_dataset, train_data_lengths \
= process_data.load_dataset(args.data_name)
else:
vocab, max_len, output_size, train_dataset, train_data_lengths, \
test_dataset, test_data_lengths = process_data.load_dataset(args.data_name)
model = text_cnn.model(args.dropout, vocab, args.model_mode, output_size)
print(model)
loss = gluon.loss.SoftmaxCrossEntropyLoss()
def evaluate(net, dataloader):
"""Evaluate network on the specified dataset"""
total_L = 0.0
total_sample_num = 0
total_correct_num = 0
start_log_interval_time = time.time()
print('Begin Testing...')
for i, (data, label) in enumerate(dataloader):
data = mx.nd.transpose(data.as_in_context(context))
label = label.as_in_context(context)
output = net(data)
L = loss(output, label)
pred = nd.argmax(output, axis=1)
total_L += L.sum().asscalar()
total_sample_num += label.shape[0]
total_correct_num += (pred.astype('int') == label).sum().asscalar()
if (i + 1) % args.log_interval == 0:
print('[Batch {}/{}] elapsed {:.2f} s'.format(
i + 1, len(dataloader), time.time() - start_log_interval_time))
start_log_interval_time = time.time()
avg_L = total_L / float(total_sample_num)
acc = total_correct_num / float(total_sample_num)
return avg_L, acc
def train(net, train_data, test_data):
"""Train textCNN model for sentiment analysis."""
start_pipeline_time = time.time()
net, trainer = text_cnn.init(net, vocab, args.model_mode, context, args.lr)
random.shuffle(train_data)
sp = int(len(train_data)*0.9)
train_dataloader = DataLoader(dataset=train_data[:sp],
batch_size=args.batch_size,
shuffle=True)
val_dataloader = DataLoader(dataset=train_data[sp:],
batch_size=args.batch_size,
shuffle=False)
test_dataloader = DataLoader(dataset=test_data,
batch_size=args.batch_size,
shuffle=False)
# Training/Testing.
best_val_acc = 0
for epoch in range(args.epochs):
# Epoch training stats.
start_epoch_time = time.time()
epoch_L = 0.0
epoch_sent_num = 0
epoch_wc = 0
# Log interval training stats.
start_log_interval_time = time.time()
log_interval_wc = 0
log_interval_sent_num = 0
log_interval_L = 0.0
for i, (data, label) in enumerate(train_dataloader):
data = mx.nd.transpose(data.as_in_context(context))
label = label.as_in_context(context)
wc = max_len
log_interval_wc += wc
epoch_wc += wc
log_interval_sent_num += data.shape[1]
epoch_sent_num += data.shape[1]
with autograd.record():
output = net(data)
L = loss(output, label).mean()
L.backward()
# Update parameter.
trainer.step(1)
log_interval_L += L.asscalar()
epoch_L += L.asscalar()
if (i + 1) % args.log_interval == 0:
print('[Epoch %d Batch %d/%d] avg loss %g, throughput %gK wps' % (
epoch, i + 1, len(train_dataloader),
log_interval_L / log_interval_sent_num,
log_interval_wc / 1000 / (time.time() - start_log_interval_time)))
# Clear log interval training stats.
start_log_interval_time = time.time()
log_interval_wc = 0
log_interval_sent_num = 0
log_interval_L = 0
end_epoch_time = time.time()
val_avg_L, val_acc = evaluate(net, val_dataloader)
print('[Epoch %d] train avg loss %g, '
'test acc %.4f, test avg loss %g, throughput %gK wps' % (
epoch, epoch_L / epoch_sent_num,
val_acc, val_avg_L,
epoch_wc / 1000 / (end_epoch_time - start_epoch_time)))
if val_acc >= best_val_acc:
print('Observed Improvement.')
best_val_acc = val_acc
test_avg_L, test_acc = evaluate(net, test_dataloader)
print('Test loss %g, test acc %.4f'%(test_avg_L, test_acc))
print('Total time cost %.2fs'%(time.time()-start_pipeline_time))
return test_acc
def k_fold_cross_valid(k, net, all_dataset):
test_acc = []
fold_size = len(all_dataset) // k
random.shuffle(all_dataset)
for test_i in range(10):
test_data = all_dataset[test_i * fold_size: (test_i + 1) * fold_size]
train_data = all_dataset[: test_i * fold_size] + all_dataset[(test_i + 1) * fold_size:]
print(len(train_data), len(test_data))
test_acc.append(train(net, train_data, test_data))
print(sum(test_acc) / k)
if __name__ == '__main__':
if args.data_name != 'MR' and args.data_name != 'Subj':
train(model, train_dataset, test_dataset)
else:
k_fold_cross_valid(10, model, train_dataset)
| [
"mxnet.gluon.loss.SoftmaxCrossEntropyLoss",
"mxnet.nd.argmax",
"mxnet.autograd.record",
"random.shuffle",
"argparse.ArgumentParser",
"mxnet.cpu",
"text_cnn.init",
"random.seed",
"mxnet.gpu",
"numpy.random.seed",
"mxnet.random.seed",
"mxnet.gluon.data.DataLoader",
"process_data.load_dataset",... | [((1317, 1337), 'numpy.random.seed', 'np.random.seed', (['(3435)'], {}), '(3435)\n', (1331, 1337), True, 'import numpy as np\n'), ((1338, 1355), 'random.seed', 'random.seed', (['(3435)'], {}), '(3435)\n', (1349, 1355), False, 'import random\n'), ((1356, 1376), 'mxnet.random.seed', 'mx.random.seed', (['(3435)'], {}), '(3435)\n', (1370, 1376), True, 'import mxnet as mx\n'), ((1387, 1530), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Sentiment analysis with the textCNN model on various datasets."""'}), "(description=\n 'Sentiment analysis with the textCNN model on various datasets.'\n )\n", (1410, 1530), False, 'import argparse\n'), ((3280, 3345), 'text_cnn.model', 'text_cnn.model', (['args.dropout', 'vocab', 'args.model_mode', 'output_size'], {}), '(args.dropout, vocab, args.model_mode, output_size)\n', (3294, 3345), False, 'import text_cnn\n'), ((3367, 3403), 'mxnet.gluon.loss.SoftmaxCrossEntropyLoss', 'gluon.loss.SoftmaxCrossEntropyLoss', ([], {}), '()\n', (3401, 3403), False, 'from mxnet import nd, gluon, autograd\n'), ((2862, 2870), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (2868, 2870), True, 'import mxnet as mx\n'), ((2925, 2941), 'mxnet.gpu', 'mx.gpu', (['args.gpu'], {}), '(args.gpu)\n', (2931, 2941), True, 'import mxnet as mx\n'), ((3073, 3114), 'process_data.load_dataset', 'process_data.load_dataset', (['args.data_name'], {}), '(args.data_name)\n', (3098, 3114), False, 'import process_data\n'), ((3229, 3270), 'process_data.load_dataset', 'process_data.load_dataset', (['args.data_name'], {}), '(args.data_name)\n', (3254, 3270), False, 'import process_data\n'), ((3587, 3598), 'time.time', 'time.time', ([], {}), '()\n', (3596, 3598), False, 'import time\n'), ((4518, 4529), 'time.time', 'time.time', ([], {}), '()\n', (4527, 4529), False, 'import time\n'), ((4549, 4609), 'text_cnn.init', 'text_cnn.init', (['net', 'vocab', 'args.model_mode', 'context', 'args.lr'], {}), '(net, vocab, args.model_mode, context, args.lr)\n', (4562, 4609), False, 'import text_cnn\n'), ((4614, 4640), 'random.shuffle', 'random.shuffle', (['train_data'], {}), '(train_data)\n', (4628, 4640), False, 'import random\n'), ((4698, 4775), 'mxnet.gluon.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_data[:sp]', 'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(dataset=train_data[:sp], batch_size=args.batch_size, shuffle=True)\n', (4708, 4775), False, 'from mxnet.gluon.data import DataLoader\n'), ((4865, 4943), 'mxnet.gluon.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_data[sp:]', 'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(dataset=train_data[sp:], batch_size=args.batch_size, shuffle=False)\n', (4875, 4943), False, 'from mxnet.gluon.data import DataLoader\n'), ((5030, 5102), 'mxnet.gluon.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_data', 'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(dataset=test_data, batch_size=args.batch_size, shuffle=False)\n', (5040, 5102), False, 'from mxnet.gluon.data import DataLoader\n'), ((7581, 7608), 'random.shuffle', 'random.shuffle', (['all_dataset'], {}), '(all_dataset)\n', (7595, 7608), False, 'import random\n'), ((3859, 3884), 'mxnet.nd.argmax', 'nd.argmax', (['output'], {'axis': '(1)'}), '(output, axis=1)\n', (3868, 3884), False, 'from mxnet import nd, gluon, autograd\n'), ((5310, 5321), 'time.time', 'time.time', ([], {}), '()\n', (5319, 5321), False, 'import time\n'), ((5465, 5476), 'time.time', 'time.time', ([], {}), '()\n', (5474, 5476), False, 'import time\n'), ((6787, 6798), 'time.time', 'time.time', ([], {}), '()\n', (6796, 6798), False, 'import time\n'), ((4264, 4275), 'time.time', 'time.time', ([], {}), '()\n', (4273, 4275), False, 'import time\n'), ((5941, 5958), 'mxnet.autograd.record', 'autograd.record', ([], {}), '()\n', (5956, 5958), False, 'from mxnet import nd, gluon, autograd\n'), ((6637, 6648), 'time.time', 'time.time', ([], {}), '()\n', (6646, 6648), False, 'import time\n'), ((7421, 7432), 'time.time', 'time.time', ([], {}), '()\n', (7430, 7432), False, 'import time\n'), ((4186, 4197), 'time.time', 'time.time', ([], {}), '()\n', (4195, 4197), False, 'import time\n'), ((6501, 6512), 'time.time', 'time.time', ([], {}), '()\n', (6510, 6512), False, 'import time\n')] |
from functools import lru_cache
import os
import shutil
import struct
import numpy as np
import torch
import re
from fairseq.data.datautils import utf8_to_uxxxx, uxxxx_to_utf8
import cv2
from fairseq.data import FairseqDataset
import json
import lmdb
import logging
LOG = logging.getLogger(__name__)
class OcrLmdbDataset(FairseqDataset):
"""Takes a text file as input and binarizes it in memory at instantiation.
Original lines are also kept in memory"""
def __init__(
self, split, data_dir, dictionary, transforms, image_height, max_allowed_width,
):
LOG.info("...OcrLmdbDataset %s", data_dir)
self.data_dir = data_dir
self.split = split
self.dictionary = dictionary
self.preprocess = transforms
self.image_height = image_height
self.max_allowed_width = max_allowed_width
with open(os.path.join(self.data_dir, "desc.json"), "r") as fh:
self.data_desc = json.load(fh)
self.sizes = []
for entry in self.data_desc[self.split]:
self.sizes.append(len(entry["trans"].split()))
self.sizes = np.array(self.sizes)
self.lmdb_env = lmdb.Environment(
os.path.join(self.data_dir, "line-images.lmdb"),
map_size=1e6,
readonly=True,
lock=False,
)
self.lmdb_txn = self.lmdb_env.begin(buffers=True)
self.size_group_limits = [150, 200, 300, 350, 450, 600, np.inf]
self.size_group_keys = self.size_group_limits
self.size_groups = dict()
self.size_groups_dict = dict()
for cur_limit in self.size_group_limits:
self.size_groups[cur_limit] = []
self.size_groups_dict[cur_limit] = dict()
for idx, entry in enumerate(self.data_desc[self.split]):
width_orig, height_orig = entry["width"], entry["height"]
normalized_width = width_orig * (self.image_height / height_orig)
for cur_limit in self.size_group_limits:
if (
normalized_width < cur_limit
and normalized_width < self.max_allowed_width
):
self.size_groups[cur_limit].append(idx)
self.size_groups_dict[cur_limit][idx] = 1
break
# Now get final size (might have dropped large entries!)
self.nentries = 0
self.max_index = 0
for cur_limit in self.size_group_limits:
self.nentries += len(self.size_groups[cur_limit])
if len(self.size_groups[cur_limit]) > 0:
cur_max = max(self.size_groups[cur_limit])
if cur_max > self.max_index:
self.max_index = cur_max
print("...finished loading, size {}".format(self.nentries))
print("count by group")
total_group_cnt = 0
for cur_limit in self.size_group_limits:
print("group", cur_limit, len(self.size_groups[cur_limit]))
total_group_cnt += len(self.size_groups[cur_limit])
print("TOTAL...", total_group_cnt)
def __getitem__(self, index):
entry = self.data_desc[self.split][index]
max_width = 0
for cur_limit in self.size_group_limits:
if index in self.size_groups_dict[cur_limit]:
max_width = cur_limit
break
group_id = max_width
image_name = entry["id"]
img_bytes = np.asarray(
self.lmdb_txn.get(entry["id"].encode("ascii")), dtype=np.uint8
)
line_image = cv2.imdecode(img_bytes, cv2.IMREAD_COLOR) # -1)
# Do a check for RGBA images; if found get rid of alpha channel
if len(line_image.shape) == 3 and line_image.shape[2] == 4:
line_image = cv2.cvtColor(line_image, cv2.COLOR_BGRA2BGR)
line_image = self.preprocess(line_image)
# Sanity check: make sure width@30px lh is long enough not to crash our model; we pad to at least 15px wide
# Need to do this and change the "real" image size so that pack_padded doens't complain
if line_image.size(2) < 15:
line_image_ = torch.ones(
line_image.size(0), line_image.size(1), 15)
line_image_[:, :, : line_image.size(2)] = line_image
line_image = line_image_
# Add padding up to max-width, so that we have consistent size for cudnn.benchmark to work with
original_width = line_image.size(2)
original_height = line_image.size(1)
transcription = []
for char in entry["trans"].split():
transcription.append(self.dictionary.index(char))
src_metadata = {
"target": transcription,
# "target_len": len(transcription),
"uxxx_trans": entry["trans"],
"utf8_trans": uxxxx_to_utf8(entry["trans"]),
"width": original_width,
"height": original_height,
"group": group_id,
"image_name": image_name,
"image": line_image,
"id": index,
}
return src_metadata
def __len__(self):
return self.nentries
| [
"logging.getLogger",
"fairseq.data.datautils.uxxxx_to_utf8",
"os.path.join",
"numpy.array",
"cv2.imdecode",
"cv2.cvtColor",
"json.load"
] | [((278, 305), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (295, 305), False, 'import logging\n'), ((1134, 1154), 'numpy.array', 'np.array', (['self.sizes'], {}), '(self.sizes)\n', (1142, 1154), True, 'import numpy as np\n'), ((3594, 3635), 'cv2.imdecode', 'cv2.imdecode', (['img_bytes', 'cv2.IMREAD_COLOR'], {}), '(img_bytes, cv2.IMREAD_COLOR)\n', (3606, 3635), False, 'import cv2\n'), ((966, 979), 'json.load', 'json.load', (['fh'], {}), '(fh)\n', (975, 979), False, 'import json\n'), ((1210, 1257), 'os.path.join', 'os.path.join', (['self.data_dir', '"""line-images.lmdb"""'], {}), "(self.data_dir, 'line-images.lmdb')\n", (1222, 1257), False, 'import os\n'), ((3808, 3852), 'cv2.cvtColor', 'cv2.cvtColor', (['line_image', 'cv2.COLOR_BGRA2BGR'], {}), '(line_image, cv2.COLOR_BGRA2BGR)\n', (3820, 3852), False, 'import cv2\n'), ((4859, 4888), 'fairseq.data.datautils.uxxxx_to_utf8', 'uxxxx_to_utf8', (["entry['trans']"], {}), "(entry['trans'])\n", (4872, 4888), False, 'from fairseq.data.datautils import utf8_to_uxxxx, uxxxx_to_utf8\n'), ((883, 923), 'os.path.join', 'os.path.join', (['self.data_dir', '"""desc.json"""'], {}), "(self.data_dir, 'desc.json')\n", (895, 923), False, 'import os\n')] |
import numpy as np
import pytest
from lagom.core.transform import Clip
from lagom.core.transform import Centralize
from lagom.core.transform import Normalize
from lagom.core.transform import Standardize
from lagom.core.transform import ExpFactorCumSum
from lagom.core.transform import RunningMeanStd
from lagom.core.transform import RankTransform
class TestTransform(object):
def test_clip(self):
clip = Clip()
# Test scalar
assert clip(x=2, a_min=0, a_max=1) == 1
assert clip(x=0.5, a_min=0, a_max=1) == 0.5
assert clip(x=-1, a_min=0, a_max=1) == 0
# Test numpy scalar
assert clip(x=np.array(2), a_min=0, a_max=1) == 1
assert clip(x=np.array(0.5), a_min=0, a_max=1) == 0.5
assert clip(x=np.array(-1), a_min=0, a_max=1) == 0
#
# Test vector
#
def _test_vec(x):
assert np.alltrue(clip(x=x, a_min=2, a_max=3) == [2, 2, 3, 3])
# Tuple
a = (1, 2, 3, 4)
_test_vec(a)
# List
b = [1, 2, 3, 4]
_test_vec(b)
# ndarray
c = np.array([1, 2, 3, 4])
_test_vec(c)
#
# Test exceptions
#
# ndarray more than 1-dim is not allowed
d = np.array([[1, 2, 3, 4]])
with pytest.raises(ValueError):
clip(x=d, a_min=2, a_max=3)
def test_centralize(self):
centralize = Centralize()
# Test scalar
assert centralize(x=1) == 1
assert centralize(x=0) == 0
assert centralize(x=2) == 2
assert centralize(x=1, mean=1) == 1
assert centralize(x=0, mean=1) == 0
assert centralize(x=2, mean=1) == 2
# Test numpy scalar
assert centralize(x=np.array(1)) == 1
assert centralize(x=np.array(0)) == 0
assert centralize(x=np.array(2)) == 2
assert centralize(x=np.array(1), mean=1) == 1
assert centralize(x=np.array(0), mean=1) == 0
assert centralize(x=np.array(2), mean=1) == 2
#
# Test vector
#
def _test_vec(x):
assert np.alltrue(centralize(x=x) == [-1.5, -0.5, 0.5, 1.5])
assert np.alltrue(centralize(x=x, mean=1) == [0, 1, 2, 3])
# Tuple
a = (1, 2, 3, 4)
_test_vec(a)
# List
b = [1, 2, 3, 4]
_test_vec(b)
# ndarray
c = np.array([1, 2, 3, 4])
_test_vec(c)
#
# Test exceptions
#
# ndarray more than 1-dim is not allowed
d = np.array([[1, 2, 3, 4]])
with pytest.raises(ValueError):
centralize(x=d)
def test_normalize(self):
normalize = Normalize(eps=1.1920929e-07)
# Test scalar
assert normalize(x=-1) == 0
assert normalize(x=0.5) == 0.5
assert normalize(x=2) == 1
assert normalize(x=-1, min_val=0, max_val=1) == 0
assert normalize(x=0.5, min_val=0, max_val=1) == 0.5
assert normalize(x=2, min_val=0, max_val=1) == 1
# Test numpy scalar
assert normalize(x=np.array(-1)) == 0
assert normalize(x=np.array(0.5)) == 0.5
assert normalize(x=np.array(2)) == 1
assert normalize(x=np.array(-1), min_val=0, max_val=1) == 0
assert normalize(x=np.array(0.5), min_val=0, max_val=1) == 0.5
assert normalize(x=np.array(2), min_val=0, max_val=1) == 1
#
# Test vector
#
def _test_vec(x):
assert np.allclose(normalize(x=x),
[0. , 0.33333332, 0.66666664, 0.99999996])
assert np.allclose(normalize(x=x, min_val=0, max_val=1),
[0.99999988, 1.99999976, 2.99999964, 3.99999952])
# Tuple
a = (1, 2, 3, 4)
_test_vec(a)
# List
b = [1, 2, 3, 4]
_test_vec(b)
# ndarray
c = np.array([1, 2, 3, 4])
_test_vec(c)
#
# Test exceptions
#
# ndarray more than 1-dim is not allowed
d = np.array([[1, 2, 3, 4]])
with pytest.raises(ValueError):
normalize(x=d)
def test_standardize(self):
standardize = Standardize(eps=1.1920929e-07)
# Test scalar
assert standardize(x=-1) == -1
assert standardize(x=0) == 0
assert standardize(x=1) == 1
assert standardize(x=-1, mean=0, std=1) == -1
assert standardize(x=0, mean=0, std=1) == 0
assert standardize(x=1, mean=0, std=1) == 1
# Test numpy scalar
assert standardize(x=np.array(-1)) == -1
assert standardize(x=np.array(0)) == 0
assert standardize(x=np.array(1)) == 1
assert standardize(x=np.array(-1), mean=0, std=1) == -1
assert standardize(x=np.array(0), mean=0, std=1) == 0
assert standardize(x=np.array(1), mean=0, std=1) == 1
#
# Test vector
#
def _test_vec(x):
assert np.allclose(standardize(x=x),
[-1.34164064, -0.44721355, 0.44721355, 1.34164064])
assert np.allclose(standardize(x=x, mean=0, std=1),
[0.99999988, 1.99999976, 2.99999964, 3.99999952])
# Tuple
a = (1, 2, 3, 4)
_test_vec(a)
# List
b = [1, 2, 3, 4]
_test_vec(b)
# ndarray
c = np.array([1, 2, 3, 4])
_test_vec(c)
#
# Test exceptions
#
# ndarray more than 1-dim is not allowed
d = np.array([[1, 2, 3, 4]])
with pytest.raises(ValueError):
standardize(x=d)
def test_expfactorcumsum(self):
expfactorcumsum = ExpFactorCumSum(alpha=0.1)
#
# Test vector
#
def _test_vec(x):
assert np.allclose(expfactorcumsum(x=x),
[1.23, 2.3, 3.0])
# Tuple
a = (1, 2, 3)
_test_vec(a)
# List
b = [1, 2, 3]
_test_vec(b)
# ndarray
c = np.array([1, 2, 3])
_test_vec(c)
#
# Test exceptions
#
# Scalar is not allowed
with pytest.raises(AssertionError):
expfactorcumsum(x=1)
# ndarray more than 1-dim is not allowed
d = np.array([[1, 2, 3]])
with pytest.raises(ValueError):
expfactorcumsum(x=d)
def test_runningmeanstd(self):
def _test_moments(runningmeanstd, x):
assert np.allclose(runningmeanstd.mu, np.mean(x))
assert np.allclose(runningmeanstd.sigma, np.std(x))
a = [1, 2, 3, 4]
# Scalar
runningmeanstd = RunningMeanStd()
[runningmeanstd(i) for i in a]
_test_moments(runningmeanstd=runningmeanstd, x=a)
# Vector
runningmeanstd = RunningMeanStd()
runningmeanstd(a)
_test_moments(runningmeanstd=runningmeanstd, x=a)
# n-dim array
b = np.array([[1, 10, 100], [2, 20, 200], [3, 30, 300], [4, 40, 400]])
runningmeanstd = RunningMeanStd()
runningmeanstd(b)
assert np.allclose(runningmeanstd.mu, b.mean(0))
assert np.allclose(runningmeanstd.sigma, b.std(0))
def test_rank_transform(self):
rank_transform = RankTransform()
# List
a = [3, 14, 1]
assert np.allclose(rank_transform(a, centered=False), [1, 2, 0])
assert np.allclose(rank_transform(a), [0, 0.5, -0.5])
# ndarray
b = np.array([3, 14, 1])
assert np.allclose(rank_transform(b, centered=False), [1, 2, 0])
assert np.allclose(rank_transform(b), [0, 0.5, -0.5])
#
# Test exceptions
#
# Scalar is not allowed
with pytest.raises(AssertionError):
rank_transform(5)
# ndarray more than 1-dim is not allowed
c = np.array([[3, 14, 1]])
with pytest.raises(ValueError):
rank_transform(c) | [
"numpy.mean",
"lagom.core.transform.ExpFactorCumSum",
"lagom.core.transform.Normalize",
"lagom.core.transform.Clip",
"lagom.core.transform.Centralize",
"lagom.core.transform.RunningMeanStd",
"numpy.array",
"lagom.core.transform.RankTransform",
"pytest.raises",
"numpy.std",
"lagom.core.transform.... | [((421, 427), 'lagom.core.transform.Clip', 'Clip', ([], {}), '()\n', (425, 427), False, 'from lagom.core.transform import Clip\n'), ((1156, 1178), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1164, 1178), True, 'import numpy as np\n'), ((1316, 1340), 'numpy.array', 'np.array', (['[[1, 2, 3, 4]]'], {}), '([[1, 2, 3, 4]])\n', (1324, 1340), True, 'import numpy as np\n'), ((1482, 1494), 'lagom.core.transform.Centralize', 'Centralize', ([], {}), '()\n', (1492, 1494), False, 'from lagom.core.transform import Centralize\n'), ((2522, 2544), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (2530, 2544), True, 'import numpy as np\n'), ((2682, 2706), 'numpy.array', 'np.array', (['[[1, 2, 3, 4]]'], {}), '([[1, 2, 3, 4]])\n', (2690, 2706), True, 'import numpy as np\n'), ((2834, 2862), 'lagom.core.transform.Normalize', 'Normalize', ([], {'eps': '(1.1920929e-07)'}), '(eps=1.1920929e-07)\n', (2843, 2862), False, 'from lagom.core.transform import Normalize\n'), ((4118, 4140), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (4126, 4140), True, 'import numpy as np\n'), ((4278, 4302), 'numpy.array', 'np.array', (['[[1, 2, 3, 4]]'], {}), '([[1, 2, 3, 4]])\n', (4286, 4302), True, 'import numpy as np\n'), ((4433, 4463), 'lagom.core.transform.Standardize', 'Standardize', ([], {'eps': '(1.1920929e-07)'}), '(eps=1.1920929e-07)\n', (4444, 4463), False, 'from lagom.core.transform import Standardize\n'), ((5699, 5721), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (5707, 5721), True, 'import numpy as np\n'), ((5859, 5883), 'numpy.array', 'np.array', (['[[1, 2, 3, 4]]'], {}), '([[1, 2, 3, 4]])\n', (5867, 5883), True, 'import numpy as np\n'), ((6024, 6050), 'lagom.core.transform.ExpFactorCumSum', 'ExpFactorCumSum', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (6039, 6050), False, 'from lagom.core.transform import ExpFactorCumSum\n'), ((6409, 6428), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (6417, 6428), True, 'import numpy as np\n'), ((6684, 6705), 'numpy.array', 'np.array', (['[[1, 2, 3]]'], {}), '([[1, 2, 3]])\n', (6692, 6705), True, 'import numpy as np\n'), ((7068, 7084), 'lagom.core.transform.RunningMeanStd', 'RunningMeanStd', ([], {}), '()\n', (7082, 7084), False, 'from lagom.core.transform import RunningMeanStd\n'), ((7225, 7241), 'lagom.core.transform.RunningMeanStd', 'RunningMeanStd', ([], {}), '()\n', (7239, 7241), False, 'from lagom.core.transform import RunningMeanStd\n'), ((7361, 7427), 'numpy.array', 'np.array', (['[[1, 10, 100], [2, 20, 200], [3, 30, 300], [4, 40, 400]]'], {}), '([[1, 10, 100], [2, 20, 200], [3, 30, 300], [4, 40, 400]])\n', (7369, 7427), True, 'import numpy as np\n'), ((7453, 7469), 'lagom.core.transform.RunningMeanStd', 'RunningMeanStd', ([], {}), '()\n', (7467, 7469), False, 'from lagom.core.transform import RunningMeanStd\n'), ((7681, 7696), 'lagom.core.transform.RankTransform', 'RankTransform', ([], {}), '()\n', (7694, 7696), False, 'from lagom.core.transform import RankTransform\n'), ((7918, 7938), 'numpy.array', 'np.array', (['[3, 14, 1]'], {}), '([3, 14, 1])\n', (7926, 7938), True, 'import numpy as np\n'), ((8309, 8331), 'numpy.array', 'np.array', (['[[3, 14, 1]]'], {}), '([[3, 14, 1]])\n', (8317, 8331), True, 'import numpy as np\n'), ((1354, 1379), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1367, 1379), False, 'import pytest\n'), ((2720, 2745), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2733, 2745), False, 'import pytest\n'), ((4316, 4341), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4329, 4341), False, 'import pytest\n'), ((5897, 5922), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5910, 5922), False, 'import pytest\n'), ((6550, 6579), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (6563, 6579), False, 'import pytest\n'), ((6719, 6744), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6732, 6744), False, 'import pytest\n'), ((8174, 8203), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (8187, 8203), False, 'import pytest\n'), ((8345, 8370), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8358, 8370), False, 'import pytest\n'), ((6923, 6933), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (6930, 6933), True, 'import numpy as np\n'), ((6988, 6997), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (6994, 6997), True, 'import numpy as np\n'), ((667, 678), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (675, 678), True, 'import numpy as np\n'), ((725, 738), 'numpy.array', 'np.array', (['(0.5)'], {}), '(0.5)\n', (733, 738), True, 'import numpy as np\n'), ((787, 799), 'numpy.array', 'np.array', (['(-1)'], {}), '(-1)\n', (795, 799), True, 'import numpy as np\n'), ((1840, 1851), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (1848, 1851), True, 'import numpy as np\n'), ((1886, 1897), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (1894, 1897), True, 'import numpy as np\n'), ((1932, 1943), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (1940, 1943), True, 'import numpy as np\n'), ((1987, 1998), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (1995, 1998), True, 'import numpy as np\n'), ((2041, 2052), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (2049, 2052), True, 'import numpy as np\n'), ((2095, 2106), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (2103, 2106), True, 'import numpy as np\n'), ((3253, 3265), 'numpy.array', 'np.array', (['(-1)'], {}), '(-1)\n', (3261, 3265), True, 'import numpy as np\n'), ((3299, 3312), 'numpy.array', 'np.array', (['(0.5)'], {}), '(0.5)\n', (3307, 3312), True, 'import numpy as np\n'), ((3348, 3359), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (3356, 3359), True, 'import numpy as np\n'), ((3402, 3414), 'numpy.array', 'np.array', (['(-1)'], {}), '(-1)\n', (3410, 3414), True, 'import numpy as np\n'), ((3470, 3483), 'numpy.array', 'np.array', (['(0.5)'], {}), '(0.5)\n', (3478, 3483), True, 'import numpy as np\n'), ((3541, 3552), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (3549, 3552), True, 'import numpy as np\n'), ((4841, 4853), 'numpy.array', 'np.array', (['(-1)'], {}), '(-1)\n', (4849, 4853), True, 'import numpy as np\n'), ((4890, 4901), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (4898, 4901), True, 'import numpy as np\n'), ((4937, 4948), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (4945, 4948), True, 'import numpy as np\n'), ((4993, 5005), 'numpy.array', 'np.array', (['(-1)'], {}), '(-1)\n', (5001, 5005), True, 'import numpy as np\n'), ((5057, 5068), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (5065, 5068), True, 'import numpy as np\n'), ((5119, 5130), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (5127, 5130), True, 'import numpy as np\n')] |
import numpy as np
from ipywidgets import widgets
import matplotlib.pyplot as plt
from itk import binary_dilate_image_filter,binary_morphological_closing_image_filter, binary_morphological_opening_image_filter, binary_erode_image_filter, GetArrayFromImage, GetImageFromArray, label_image_to_shape_label_map_filter
class OpenCloseWidget():
def __init__(self):
self.image = np.zeros((50,50), dtype=np.uint8)
self.image[13, 13] = 255
self.image[20:30, 20:23] = 255
self.image[30, 21] = 255
self.image[31:40, 20:23] = 255
self.image_copy = self.image
reset_button = widgets.Button(description='Reset Image')
open_button = widgets.Button(description='Open')
close_button = widgets.Button(description='Close')
reset_button.on_click(self.reset)
open_button.on_click(self.opening)
close_button.on_click(self.closing)
display(widgets.HBox([open_button, close_button, reset_button]))
self.fig = plt.figure(figsize=(5,5))
self.img_obj = plt.imshow(self.image, origin='lower')
plt.clim((0,255))
def opening(self, event):
itk_image = GetImageFromArray(self.image)
self.image = GetArrayFromImage(binary_morphological_opening_image_filter(itk_image))
self.img_obj.set_data(self.image)
def closing(self, event):
itk_image = GetImageFromArray(self.image)
self.image = GetArrayFromImage(binary_morphological_closing_image_filter(itk_image))
self.img_obj.set_data(self.image)
def reset(self, event):
self.image = self.image_copy
self.img_obj.set_data(self.image_copy)
class DilateErodeWidget():
def __init__(self):
self.image = np.zeros((50,50), dtype=np.uint8)
self.image[13, 13] = 255
self.image[20:30, 20:23] = 255
self.image[30, 21] = 255
self.image[31:40, 20:23] = 255
self.image_copy = self.image
reset_button = widgets.Button(description='Reset Image')
dilate_button = widgets.Button(description='Dilate')
erode_button = widgets.Button(description='Erode')
dilate_button.on_click(self.dilate)
erode_button.on_click(self.erode)
reset_button.on_click(self.reset)
display(widgets.HBox([dilate_button, erode_button, reset_button]))
self.fig = plt.figure(figsize=(5,5))
self.img_obj = plt.imshow(self.image, origin='lower')
plt.clim((0,255))
def dilate(self, event):
itk_image = GetImageFromArray(self.image)
self.image = GetArrayFromImage(binary_dilate_image_filter(itk_image))
self.img_obj.set_data(self.image)
def erode(self, event):
itk_image = GetImageFromArray(self.image)
self.image = GetArrayFromImage(binary_erode_image_filter(itk_image))
self.img_obj.set_data(self.image)
def reset(self, event):
self.image = self.image_copy
self.img_obj.set_data(self.image_copy)
class Drawer():
def __init__(self, paint_width=1, paint_value = 255, erase_value=0):
self.drawing = False
self.paint_width = paint_width
self.paint_value = paint_value
self.erase_value = erase_value
self.image = self.create_image()
dilate_button = widgets.Button(description='Dilate')
erode_button = widgets.Button(description='Erode')
open_button = widgets.Button(description='Open')
close_button = widgets.Button(description='Close')
reset_button = widgets.Button(description='Reset Image')
dilate_button.on_click(self.dilate)
erode_button.on_click(self.erode)
open_button.on_click(self.opening)
close_button.on_click(self.closing)
reset_button.on_click(self.reset)
display(widgets.HBox([dilate_button,erode_button, open_button, close_button, reset_button]))
self.fig = plt.figure(figsize=(5,5))
self.img_obj = plt.imshow(self.image, origin='lower')
plt.clim((0,255))
plt.show()
self.fig.canvas.mpl_connect('button_press_event', self.onclick)
self.fig.canvas.mpl_connect('button_release_event', self.onrelease)
self.fig.canvas.mpl_connect('motion_notify_event', self.onmove)
def create_image(self):
return np.zeros((100,100), dtype=np.uint8)
def dilate(self, event):
itk_image = GetImageFromArray(self.image)
self.image = GetArrayFromImage(binary_dilate_image_filter(itk_image))
self.img_obj.set_data(self.image)
def erode(self, event):
itk_image = GetImageFromArray(self.image)
self.image = GetArrayFromImage(binary_erode_image_filter(itk_image))
self.img_obj.set_data(self.image)
def reset(self, event):
self.image = self.create_image()
self.img_obj.set_data(self.image)
def opening(self, event):
itk_image = GetImageFromArray(self.image)
self.image = GetArrayFromImage(binary_morphological_opening_image_filter(itk_image))
self.img_obj.set_data(self.image)
def closing(self, event):
itk_image = GetImageFromArray(self.image)
self.image = GetArrayFromImage(binary_morphological_closing_image_filter(itk_image))
self.img_obj.set_data(self.image)
def onclick(self, event):
self.drawing = True
if event.button == 1:
self.draw_point(int(event.xdata), int(event.ydata), self.paint_value)
elif event.button == 3:
self.draw_point(int(event.xdata), int(event.ydata), self.erase_value)
def onmove(self, event):
if event.button == 1:
self.draw_point(int(event.xdata), int(event.ydata), self.paint_value)
elif event.button == 3:
self.draw_point(int(event.xdata), int(event.ydata), self.erase_value)
def draw_point(self, ix, iy, value):
if self.drawing == True:
if self.paint_width == 0:
self.image[iy, ix] = value
self.img_obj._A.data[iy, ix] = value
else:
self.image[iy-self.paint_width : iy+self.paint_width,
ix-self.paint_width : ix+self.paint_width] = value
self.img_obj._A.data[iy-self.paint_width : iy+self.paint_width,
ix-self.paint_width : ix+self.paint_width] = value
plt.draw()
def onrelease(self, event):
self.drawing=False
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.clim",
"matplotlib.pyplot.draw",
"itk.binary_erode_image_filter",
"itk.binary_morphological_opening_image_filter",
"ipywidgets.widgets.Button",
"numpy.zeros",
"matplotlib.pyplot.figure",
"ipywidgets.widgets.HBox",
"itk.GetImageFromArray",
"itk.binar... | [((386, 420), 'numpy.zeros', 'np.zeros', (['(50, 50)'], {'dtype': 'np.uint8'}), '((50, 50), dtype=np.uint8)\n', (394, 420), True, 'import numpy as np\n'), ((626, 667), 'ipywidgets.widgets.Button', 'widgets.Button', ([], {'description': '"""Reset Image"""'}), "(description='Reset Image')\n", (640, 667), False, 'from ipywidgets import widgets\n'), ((690, 724), 'ipywidgets.widgets.Button', 'widgets.Button', ([], {'description': '"""Open"""'}), "(description='Open')\n", (704, 724), False, 'from ipywidgets import widgets\n'), ((748, 783), 'ipywidgets.widgets.Button', 'widgets.Button', ([], {'description': '"""Close"""'}), "(description='Close')\n", (762, 783), False, 'from ipywidgets import widgets\n'), ((1016, 1042), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (1026, 1042), True, 'import matplotlib.pyplot as plt\n'), ((1065, 1103), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.image'], {'origin': '"""lower"""'}), "(self.image, origin='lower')\n", (1075, 1103), True, 'import matplotlib.pyplot as plt\n'), ((1112, 1130), 'matplotlib.pyplot.clim', 'plt.clim', (['(0, 255)'], {}), '((0, 255))\n', (1120, 1130), True, 'import matplotlib.pyplot as plt\n'), ((1181, 1210), 'itk.GetImageFromArray', 'GetImageFromArray', (['self.image'], {}), '(self.image)\n', (1198, 1210), False, 'from itk import binary_dilate_image_filter, binary_morphological_closing_image_filter, binary_morphological_opening_image_filter, binary_erode_image_filter, GetArrayFromImage, GetImageFromArray, label_image_to_shape_label_map_filter\n'), ((1402, 1431), 'itk.GetImageFromArray', 'GetImageFromArray', (['self.image'], {}), '(self.image)\n', (1419, 1431), False, 'from itk import binary_dilate_image_filter, binary_morphological_closing_image_filter, binary_morphological_opening_image_filter, binary_erode_image_filter, GetArrayFromImage, GetImageFromArray, label_image_to_shape_label_map_filter\n'), ((1754, 1788), 'numpy.zeros', 'np.zeros', (['(50, 50)'], {'dtype': 'np.uint8'}), '((50, 50), dtype=np.uint8)\n', (1762, 1788), True, 'import numpy as np\n'), ((1994, 2035), 'ipywidgets.widgets.Button', 'widgets.Button', ([], {'description': '"""Reset Image"""'}), "(description='Reset Image')\n", (2008, 2035), False, 'from ipywidgets import widgets\n'), ((2060, 2096), 'ipywidgets.widgets.Button', 'widgets.Button', ([], {'description': '"""Dilate"""'}), "(description='Dilate')\n", (2074, 2096), False, 'from ipywidgets import widgets\n'), ((2120, 2155), 'ipywidgets.widgets.Button', 'widgets.Button', ([], {'description': '"""Erode"""'}), "(description='Erode')\n", (2134, 2155), False, 'from ipywidgets import widgets\n'), ((2389, 2415), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (2399, 2415), True, 'import matplotlib.pyplot as plt\n'), ((2438, 2476), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.image'], {'origin': '"""lower"""'}), "(self.image, origin='lower')\n", (2448, 2476), True, 'import matplotlib.pyplot as plt\n'), ((2485, 2503), 'matplotlib.pyplot.clim', 'plt.clim', (['(0, 255)'], {}), '((0, 255))\n', (2493, 2503), True, 'import matplotlib.pyplot as plt\n'), ((2553, 2582), 'itk.GetImageFromArray', 'GetImageFromArray', (['self.image'], {}), '(self.image)\n', (2570, 2582), False, 'from itk import binary_dilate_image_filter, binary_morphological_closing_image_filter, binary_morphological_opening_image_filter, binary_erode_image_filter, GetArrayFromImage, GetImageFromArray, label_image_to_shape_label_map_filter\n'), ((2760, 2789), 'itk.GetImageFromArray', 'GetImageFromArray', (['self.image'], {}), '(self.image)\n', (2777, 2789), False, 'from itk import binary_dilate_image_filter, binary_morphological_closing_image_filter, binary_morphological_opening_image_filter, binary_erode_image_filter, GetArrayFromImage, GetImageFromArray, label_image_to_shape_label_map_filter\n'), ((3340, 3376), 'ipywidgets.widgets.Button', 'widgets.Button', ([], {'description': '"""Dilate"""'}), "(description='Dilate')\n", (3354, 3376), False, 'from ipywidgets import widgets\n'), ((3400, 3435), 'ipywidgets.widgets.Button', 'widgets.Button', ([], {'description': '"""Erode"""'}), "(description='Erode')\n", (3414, 3435), False, 'from ipywidgets import widgets\n'), ((3458, 3492), 'ipywidgets.widgets.Button', 'widgets.Button', ([], {'description': '"""Open"""'}), "(description='Open')\n", (3472, 3492), False, 'from ipywidgets import widgets\n'), ((3516, 3551), 'ipywidgets.widgets.Button', 'widgets.Button', ([], {'description': '"""Close"""'}), "(description='Close')\n", (3530, 3551), False, 'from ipywidgets import widgets\n'), ((3575, 3616), 'ipywidgets.widgets.Button', 'widgets.Button', ([], {'description': '"""Reset Image"""'}), "(description='Reset Image')\n", (3589, 3616), False, 'from ipywidgets import widgets\n'), ((3955, 3981), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (3965, 3981), True, 'import matplotlib.pyplot as plt\n'), ((4004, 4042), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.image'], {'origin': '"""lower"""'}), "(self.image, origin='lower')\n", (4014, 4042), True, 'import matplotlib.pyplot as plt\n'), ((4051, 4069), 'matplotlib.pyplot.clim', 'plt.clim', (['(0, 255)'], {}), '((0, 255))\n', (4059, 4069), True, 'import matplotlib.pyplot as plt\n'), ((4088, 4098), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4096, 4098), True, 'import matplotlib.pyplot as plt\n'), ((4377, 4413), 'numpy.zeros', 'np.zeros', (['(100, 100)'], {'dtype': 'np.uint8'}), '((100, 100), dtype=np.uint8)\n', (4385, 4413), True, 'import numpy as np\n'), ((4463, 4492), 'itk.GetImageFromArray', 'GetImageFromArray', (['self.image'], {}), '(self.image)\n', (4480, 4492), False, 'from itk import binary_dilate_image_filter, binary_morphological_closing_image_filter, binary_morphological_opening_image_filter, binary_erode_image_filter, GetArrayFromImage, GetImageFromArray, label_image_to_shape_label_map_filter\n'), ((4670, 4699), 'itk.GetImageFromArray', 'GetImageFromArray', (['self.image'], {}), '(self.image)\n', (4687, 4699), False, 'from itk import binary_dilate_image_filter, binary_morphological_closing_image_filter, binary_morphological_opening_image_filter, binary_erode_image_filter, GetArrayFromImage, GetImageFromArray, label_image_to_shape_label_map_filter\n'), ((4986, 5015), 'itk.GetImageFromArray', 'GetImageFromArray', (['self.image'], {}), '(self.image)\n', (5003, 5015), False, 'from itk import binary_dilate_image_filter, binary_morphological_closing_image_filter, binary_morphological_opening_image_filter, binary_erode_image_filter, GetArrayFromImage, GetImageFromArray, label_image_to_shape_label_map_filter\n'), ((5207, 5236), 'itk.GetImageFromArray', 'GetImageFromArray', (['self.image'], {}), '(self.image)\n', (5224, 5236), False, 'from itk import binary_dilate_image_filter, binary_morphological_closing_image_filter, binary_morphological_opening_image_filter, binary_erode_image_filter, GetArrayFromImage, GetImageFromArray, label_image_to_shape_label_map_filter\n'), ((939, 994), 'ipywidgets.widgets.HBox', 'widgets.HBox', (['[open_button, close_button, reset_button]'], {}), '([open_button, close_button, reset_button])\n', (951, 994), False, 'from ipywidgets import widgets\n'), ((1250, 1302), 'itk.binary_morphological_opening_image_filter', 'binary_morphological_opening_image_filter', (['itk_image'], {}), '(itk_image)\n', (1291, 1302), False, 'from itk import binary_dilate_image_filter, binary_morphological_closing_image_filter, binary_morphological_opening_image_filter, binary_erode_image_filter, GetArrayFromImage, GetImageFromArray, label_image_to_shape_label_map_filter\n'), ((1471, 1523), 'itk.binary_morphological_closing_image_filter', 'binary_morphological_closing_image_filter', (['itk_image'], {}), '(itk_image)\n', (1512, 1523), False, 'from itk import binary_dilate_image_filter, binary_morphological_closing_image_filter, binary_morphological_opening_image_filter, binary_erode_image_filter, GetArrayFromImage, GetImageFromArray, label_image_to_shape_label_map_filter\n'), ((2310, 2367), 'ipywidgets.widgets.HBox', 'widgets.HBox', (['[dilate_button, erode_button, reset_button]'], {}), '([dilate_button, erode_button, reset_button])\n', (2322, 2367), False, 'from ipywidgets import widgets\n'), ((2622, 2659), 'itk.binary_dilate_image_filter', 'binary_dilate_image_filter', (['itk_image'], {}), '(itk_image)\n', (2648, 2659), False, 'from itk import binary_dilate_image_filter, binary_morphological_closing_image_filter, binary_morphological_opening_image_filter, binary_erode_image_filter, GetArrayFromImage, GetImageFromArray, label_image_to_shape_label_map_filter\n'), ((2829, 2865), 'itk.binary_erode_image_filter', 'binary_erode_image_filter', (['itk_image'], {}), '(itk_image)\n', (2854, 2865), False, 'from itk import binary_dilate_image_filter, binary_morphological_closing_image_filter, binary_morphological_opening_image_filter, binary_erode_image_filter, GetArrayFromImage, GetImageFromArray, label_image_to_shape_label_map_filter\n'), ((3850, 3938), 'ipywidgets.widgets.HBox', 'widgets.HBox', (['[dilate_button, erode_button, open_button, close_button, reset_button]'], {}), '([dilate_button, erode_button, open_button, close_button,\n reset_button])\n', (3862, 3938), False, 'from ipywidgets import widgets\n'), ((4532, 4569), 'itk.binary_dilate_image_filter', 'binary_dilate_image_filter', (['itk_image'], {}), '(itk_image)\n', (4558, 4569), False, 'from itk import binary_dilate_image_filter, binary_morphological_closing_image_filter, binary_morphological_opening_image_filter, binary_erode_image_filter, GetArrayFromImage, GetImageFromArray, label_image_to_shape_label_map_filter\n'), ((4739, 4775), 'itk.binary_erode_image_filter', 'binary_erode_image_filter', (['itk_image'], {}), '(itk_image)\n', (4764, 4775), False, 'from itk import binary_dilate_image_filter, binary_morphological_closing_image_filter, binary_morphological_opening_image_filter, binary_erode_image_filter, GetArrayFromImage, GetImageFromArray, label_image_to_shape_label_map_filter\n'), ((5055, 5107), 'itk.binary_morphological_opening_image_filter', 'binary_morphological_opening_image_filter', (['itk_image'], {}), '(itk_image)\n', (5096, 5107), False, 'from itk import binary_dilate_image_filter, binary_morphological_closing_image_filter, binary_morphological_opening_image_filter, binary_erode_image_filter, GetArrayFromImage, GetImageFromArray, label_image_to_shape_label_map_filter\n'), ((5276, 5328), 'itk.binary_morphological_closing_image_filter', 'binary_morphological_closing_image_filter', (['itk_image'], {}), '(itk_image)\n', (5317, 5328), False, 'from itk import binary_dilate_image_filter, binary_morphological_closing_image_filter, binary_morphological_opening_image_filter, binary_erode_image_filter, GetArrayFromImage, GetImageFromArray, label_image_to_shape_label_map_filter\n'), ((6485, 6495), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (6493, 6495), True, 'import matplotlib.pyplot as plt\n')] |
import random
import numpy as np
import scipy
import time
import json
import os
import pdb
import pickle
import pandas
from progressbar import *
from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot
from keras.models import Sequential, load_model, Model
from keras.optimizers import RMSprop, Adam, SGD
from keras import backend as K
from keras import regularizers
from keras.utils.np_utils import to_categorical
from utils import convnet_vgg, convnet_mod, convnet_ori, convnet_com
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
#return x / np.linalg.norm(x)
def makeFunc(x):
return lambda y:y[:,x]
class BaseListenerNetwork(object):
def __init__(self, modelname, optfilename, lr, entropy_coefficient, config_dict):
self.modelname = modelname
self.optfilename = optfilename
self.lr = lr
self.entropy_coefficient = entropy_coefficient
assert config_dict, "config_dict does not exist"
self.config = config_dict
self.initialize_model()
self.build_train_fn()
def rebuild_train_fn(self, entropy_coefficient=None, lr=None):
if entropy_coefficient:
self.entropy_coefficient = entropy_coefficient
if lr:
self.lr = lr
self.build_train_fn()
def save(self):
self.listener_model.save(self.modelname)
def load(self):
self.listener_model = load_model(self.modelname)
def save_weights(self):
self.listener_model.save_weights(self.modelname)
def load_weights(self):
self.listener_model.load_weights(self.modelname)
def save_opt(self):
symbolic_weights = self.opt.weights
weight_values = K.batch_get_value(symbolic_weights)
with open(self.optfilename, 'wb') as f:
pickle.dump(weight_values, f)
def load_opt(self):
with open(self.optfilename, 'rb') as f:
weight_values = pickle.load(f)
self.opt.set_weights(weight_values)
def save_memory(self):
self.memory_model_weights = self.listener_model.get_weights()
def load_memory(self):
self.listener_model.set_weights(self.memory_model_weights)
class PaperListenerNetwork(BaseListenerNetwork):
def __init__(self, modelname, optfilename, lr, entropy_coefficient, config_dict):
super(PaperListenerNetwork, self).__init__(modelname, optfilename, lr, entropy_coefficient, config_dict)
self.batch_speaker_message = []
self.batch_action = []
self.batch_candidates = []
self.batch_reward = []
def initialize_model(self):
"""
Batch input and output.
"""
if not os.path.exists(self.modelname):
## Define model
t_input = Input(shape=(self.config['max_message_length'],)) #Speakers Message, shape(bs, max_message_length)
c_inputs_all = Input(shape=(self.config['n_classes'], self.config['speaker_input_dim'])) #Candidates, shape(bs, n_class, speaker_input_dim)
inputs = [t_input, c_inputs_all]
z = Dense(self.config['speaker_input_dim'], activation='sigmoid')(t_input) #shape(bs, speaker_input_dim)
ts = []
us = []
for _ in range(self.config['n_classes']):
#c_input = Input(shape=(self.config['speaker_input_dim'],)) #shape(bs, speaker_input_dim)
c_input = Lambda(makeFunc(_))(c_inputs_all) #shape(bs, speaker_input_dim)
#t = Lambda(lambda x: K.expand_dims(K.sum(-K.square(x), axis=1)))(add([t_trans, Lambda(lambda x: -x)(c_input)])) #shape(bs, 1)
t = Dot(1, False)([z, c_input]) #shape(bs, 1)
ts.append(t)
us.append(c_input)
U = concatenate(ts) #shape(bs, n_classes)
us = concatenate(us)
final_output = Lambda(lambda x: K.softmax(x))(U) #shape(bs, n_classes)
#final_output = Dense(self.n_classes, activation='softmax', kernel_initializer='identity')(U)
#final_output = Dense(self.n_classes, activation='softmax')(U)
#f1 = Dense(50)(U)
#f2 = Lambda(lambda x: K.square(x))(f1)
#final_output = Dense(self.n_classes, activation='softmax')(f2)
self.listener_model = Model(inputs=inputs, outputs=[final_output, U, z, us])
#self.listener_model.compile(loss="categorical_crossentropy", optimizer=RMSprop(lr=self.config['listener_lr']))
else:
self.load()
#check!!!
def build_train_fn(self):
"""
Batch input and output.
"""
#direct prob input!!!
action_prob_placeholder = self.listener_model.output[0] #(bs, n_classes)
action_onehot_placeholder = K.placeholder(shape=(None, self.config['n_classes']), name="action_onehot") #(bs, n_classes)
reward_placeholder = K.placeholder(shape=(None,), name="reward") #(?)
action_prob = K.sum(action_prob_placeholder * action_onehot_placeholder, axis=1)
log_action_prob = K.log(action_prob)
loss = - log_action_prob * reward_placeholder
entropy = K.sum(action_prob_placeholder * K.log(action_prob_placeholder + 1e-10), axis=1)
#entropy = K.sum(entropy)
loss = loss + self.entropy_coefficient * entropy
loss = K.mean(loss)
self.opt = Adam(lr=self.lr)
self.updates = self.opt.get_updates(params=self.listener_model.trainable_weights, loss=loss)
if os.path.exists(self.optfilename):
self.load_opt()
self.train_fn = K.function(
inputs = self.listener_model.input + [action_onehot_placeholder, reward_placeholder],
outputs=[loss, loss], updates=self.updates)
def reshape_message_candidates(self, speaker_message, candidates):
assert len(speaker_message.shape)==1 and speaker_message.shape[0]==self.config['max_message_length']
assert len(candidates.shape)==2 and candidates.shape[0]==self.config['n_classes'] and candidates.shape[1]==self.config['speaker_input_dim']
speaker_message = np.expand_dims(speaker_message, axis=0) #shape(1, max_message_length)
#X = [speaker_message] + [c.reshape([1,-1]) for c in candidates]
X = [speaker_message, np.expand_dims(candidates, axis=0)]
return X
def sample_from_listener_policy(self, speaker_message, candidates):
"""
Input and output are all just one instance. No bs dimensize.
"""
X = self.reshape_message_candidates(speaker_message, candidates)
listener_output= self.listener_model.predict_on_batch(X)
y, U, z = listener_output[:3]
#us = listener_output[3]
listener_probs = y
listener_probs = np.squeeze(listener_probs) #shape(n_class)
listener_action = np.random.choice(np.arange(self.config['n_classes']), p=listener_probs) #int
U = np.squeeze(U)
return listener_action, listener_probs, U
def infer_from_listener_policy(self, speaker_message, candidates):
"""
Input and output are all just one instance. No bs dimensize.
"""
X = self.reshape_message_candidates(speaker_message, candidates)
listener_output= self.listener_model.predict_on_batch(X)
y, U, z = listener_output[:3]
#us = listener_output[3]
listener_probs = y
listener_probs = np.squeeze(listener_probs) #shape(n_class)
listener_action = np.argmax(listener_probs) #int
U = np.squeeze(U)
return listener_action, listener_probs, U
def train_listener_policy_on_batch(self):
"""
Train as a batch. Loss is an float for a batch
"""
action_onehot = to_categorical(self.batch_action, num_classes=self.config['n_classes'])
#self.batch_candidates = np.array(self.batch_candidates).transpose([1, 0, 2]).tolist() #shape(num_classes, bs, speaker_input_dim)
#self.batch_candidates = np.swapaxes(np.array(self.batch_candidates), 0, 1).tolist() #shape(num_classes, bs, speaker_input_dim)
#self.batch_candidates = np.swapaxes(np.array(self.batch_candidates), 0, 1).astype('float32').tolist() #shape(num_classes, bs, speaker_input_dim)
#self.batch_candidates = [np.array(_) for _ in self.batch_candidates]
#_loss, _entropy = self.train_fn([self.batch_speaker_message] + self.batch_candidates + [action_onehot, self.batch_reward] )
_loss, _entropy = self.train_fn([np.array(self.batch_speaker_message), self.batch_candidates, action_onehot, self.batch_reward] )
#print("Listener loss: ", _loss)
self.batch_speaker_message = [] #shape(bs, max_message_length)
self.batch_action = [] #shape(bs)
self.batch_candidates = [] #shape(bs, n_classes, speaker_input_dim)
self.batch_reward = [] #shape(bs)
def remember_listener_training_details(self, speaker_message, action, action_probs, target, candidates, reward):
"""
Inputs are just one instance. No bs dimensize.
"""
self.batch_speaker_message.append(speaker_message)
self.batch_action.append(action)
self.batch_candidates.append(candidates)
self.batch_reward.append(reward)
class PaperListenerNetwork_rnn(PaperListenerNetwork):
def reshape_message_candidates(self, speaker_message, candidates):
#if not self.config['fixed_length']:
# assert len(speaker_message.shape)==1 and speaker_message.shape[0]<=self.config['max_message_length']
#else:
# assert len(speaker_message.shape)==1 and speaker_message.shape[0]==self.config['max_message_length']
assert len(speaker_message.shape)==1 and speaker_message.shape[0]<=self.config['max_message_length']
assert len(candidates.shape)==2 and candidates.shape[0]==self.config['n_classes'] and candidates.shape[1]==self.config['speaker_input_dim']
speaker_message = np.expand_dims(to_categorical(speaker_message, self.config['alphabet_size']), axis=0) #shape(1, message_length, alphabet_size)
#X = [speaker_message] + [c.reshape([1,-1]) for c in candidates]
X = [speaker_message, np.expand_dims(candidates, axis=0)]
return X
def initialize_model(self):
"""
Batch input and output.
"""
## Define model
if not os.path.exists(self.modelname):
t_input = Input(shape=(None, self.config['alphabet_size'],)) #Speakers Message, shape(bs, message_length, alphabet_size)
#c_inputs_all = Input(shape=(self.config['n_classes'], self.config['speaker_input_dim'])) #Candidates, shape(bs, n_classes, speaker_input_dim)
c_inputs_all = Input(shape=(None, self.config['speaker_input_dim'])) #Candidates, shape(bs, n_classes, speaker_input_dim)
inputs = [t_input, c_inputs_all]
lstm = LSTM(self.config['listener_dim'], activation='tanh', return_sequences=False, return_state=True)
o, sh, sc = lstm(t_input)
z = Dense(self.config['listener_dim'], activation='sigmoid')(o) #shape(bs, listener_dim)
ts = []
us = []
u = Dense(self.config['listener_dim'], activation='sigmoid')
for _ in range(self.config['n_classes']):
#c_input = Input(shape=(self.config['speaker_input_dim'],)) #shape(bs, speaker_input_dim)
c_input = Lambda(makeFunc(_))(c_inputs_all)
uc = u(c_input)
t = Lambda(lambda x: K.expand_dims(K.sum(-K.square(x), axis=1)))(add([z, Lambda(lambda x: -x)(uc)])) #shape(bs, 1)
#t = Dot(1, False)([z,uc]) #shape(bs, 1)
ts.append(t)
us.append(uc)
U = concatenate(ts) #shape(bs, n_classes)
us = concatenate(us)
final_output = Lambda(lambda x: K.softmax(x))(U)
#shape(bs, n_classes)
self.listener_model = Model(inputs=inputs, outputs=[final_output, U, z, us])
#self.listener_model.compile(loss="categorical_crossentropy", optimizer=RMSprop(lr=self.config['listener_lr']))
else:
self.load()
#check!!!
def set_updates(self):
self.opt = Adam(lr=self.lr)
#adam = RMSprop(lr=self.lr)
self.updates = self.opt.get_updates(params=self.listener_model.trainable_weights, loss=self.loss)
if os.path.exists(self.optfilename):
self.load_opt()
def build_train_fn(self):
"""
Batch input and output.
"""
#direct prob input!!!
action_prob_placeholder = self.listener_model.output[0] #(bs, n_classes)
#action_onehot_placeholder = K.placeholder(shape=(None, self.config['n_classes']), name="action_onehot") #(bs, n_classes)
action_onehot_placeholder = K.placeholder(shape=(None, None), name="action_onehot") #(bs, n_classes)
reward_placeholder = K.placeholder(shape=(None,), name="reward") #(?)
action_prob = K.sum(action_prob_placeholder*action_onehot_placeholder, axis=1)
log_action_prob = K.log(action_prob)
loss = - log_action_prob*reward_placeholder
entropy = K.sum(action_prob_placeholder * K.log(action_prob_placeholder + 1e-10), axis=1)
#entropy = K.sum(entropy)
loss = loss + self.entropy_coefficient * entropy
loss = K.mean(loss)
self.loss =loss
self.set_updates()
self.train_fn = K.function(
inputs = self.listener_model.input + [action_onehot_placeholder, reward_placeholder],
outputs=[loss, loss], updates=self.updates)
def remember_listener_training_details(self, speaker_message, action, action_probs, target, candidates, reward):
"""
Inputs are just one instance. No bs dimensize.
"""
#if not self.config['fixed_length']:
toadd = self.config['max_message_length'] - len(speaker_message)
for _ in range(toadd):
speaker_message = np.append(speaker_message, -1)
speaker_message = to_categorical(speaker_message, self.config['alphabet_size']) #shape(message_length, alphabet_size)
self.batch_speaker_message.append(speaker_message)
self.batch_action.append(action)
self.batch_candidates.append(candidates)
self.batch_reward.append(reward)
class PaperListenerNetwork_rnn_conv(PaperListenerNetwork_rnn):
def __init__(self, modelname, optfilename, lr, entropy_coefficient, pretrain_convmodel_file, traincnn, config):
self.pretrain_convmodel_file = pretrain_convmodel_file
self.traincnn = traincnn
super(PaperListenerNetwork_rnn_conv, self).__init__(modelname, optfilename, lr, entropy_coefficient, config)
def initialize_model(self):
"""
Batch input and output.
"""
if not os.path.exists(self.modelname):
## Define model
self.conv_model = convnet_com(self.config['speaker_input_w'], self.config['speaker_input_h'], 3, preloadfile=self.pretrain_convmodel_file, name='conv_model_l')
t_input = Input(shape=(None, self.config['alphabet_size'],)) #Speakers Message, shape(bs, message_length, alphabet_size)
c_inputs_all = Input(shape=(self.config['n_classes'], self.config['speaker_input_w'], self.config['speaker_input_h'], 3), name='image_l') #Candidates, shape(bs, speaker_input_w, speaker_input_h, 3)
inputs = [t_input, c_inputs_all]
lstm = LSTM(self.config['listener_dim'], activation='tanh', return_sequences=False, return_state=True)
o, sh, sc = lstm(t_input)
z = Dense(self.config['listener_dim'], activation='sigmoid')(o) #shape(bs, listener_dim)
#u = Dense(self.config['listener_dim'], activation='sigmoid',kernel_regularizer=regularizers.l2(0.01))
u = Dense(self.config['listener_dim'], activation='sigmoid')
ts = []
us = []
for _ in range(self.config['n_classes']):
#c_input = Input(shape=(self.config['speaker_input_w'],self.config['speaker_input_h'],3)) #speaker_model.input[0], shape(bs, speaker_input_w, speaker_input_h, 3)
#c_input = Lambda(lambda x: x[:, _])(c_inputs_all)
c_input = Lambda(makeFunc(_))(c_inputs_all)
conv_outputs = self.conv_model(c_input)
uc = u(conv_outputs)
t = Lambda(lambda x: K.expand_dims(K.sum(-K.square(x),axis=1)))(add([z, Lambda(lambda x: -x)(uc)])) #shape(bs, 1)
#t = Dot(1, False)([z,uc]) #shape(bs, 1)
ts.append(t)
us.append(uc)
U = concatenate(ts) #shape(bs, n_classes)
us = concatenate(us)
final_output = Lambda(lambda x: K.softmax(x))(U) #shape(bs, n_classes)
self.listener_model = Model(inputs=inputs, outputs=[final_output, U, z, us])
#self.listener_model.compile(loss="categorical_crossentropy", optimizer=RMSprop(lr=self.config['listener_lr']))
else:
self.load()
#check!!!
self.conv_model = [l for l in self.listener_model.layers if l.name=='conv_model_l'][0]
#self.listener_model.layers[6].kernel_regularizer = None
#self.internal_model = Model(inputs=self.listener_model.inputs, outputs=[self.listener_model.layers[7].get_output_at(_) for _ in range(2)] + [self.listener_model.layers[6].output, self.listener_model.layers[-2].output]) #dot
#self.internal_model = Model(inputs=self.listener_model.inputs, outputs=[self.listener_model.layers[6].get_output_at(_) for _ in range(2)] + [self.listener_model.layers[7].output, self.listener_model.layers[-2].output]) #euc
self.trainable_weights_others = []
self.trainable_weights_conv = []
for layer in self.listener_model.layers:
if layer.name!='conv_model_l':
self.trainable_weights_others.extend(layer.trainable_weights)
else:
self.trainable_weights_conv.extend(layer.trainable_weights)
def set_updates(self):
self.opt = Adam(lr=self.lr)
#self.opt = RMSprop(lr=self.lr)
#opt = SGD(lr=self.lr, momentum=0.9, decay=1e-6, nesterov=True)
if not self.traincnn:
#self.updates = self.opt.get_updates(params=self.trainable_weights_others+self.trainable_weights_rnn, loss=self.loss)
self.updates = self.opt.get_updates(params=self.trainable_weights_others, loss=self.loss)
else:
self.updates = self.opt.get_updates(params=self.listener_model.trainable_weights, loss=self.loss)
if os.path.exists(self.optfilename):
self.load_opt()
def reshape_message_candidates(self, speaker_message, candidates):
#if not self.config['fixed_length']:
# assert len(speaker_message.shape)==1 and speaker_message.shape[0]<=self.config['max_message_length']
#else:
# assert len(speaker_message.shape)==1 and speaker_message.shape[0]==self.config['max_message_length']
assert len(speaker_message.shape)==1 and speaker_message.shape[0]<=self.config['max_message_length']
assert len(candidates.shape)==4 and candidates.shape[0]==self.config['n_classes'] and candidates.shape[1]==self.config['speaker_input_w'] and candidates.shape[2]==self.config['speaker_input_h']
speaker_message = np.expand_dims(to_categorical(speaker_message, self.config['alphabet_size']), axis=0) #shape(1, ?, alphabet_size)
X = [speaker_message, np.expand_dims(candidates, axis=0)]
return X
'''
class PaperListenerNetwork_rnn_conv_color(PaperListenerNetwork_rnn):
def initialize_model(self):
"""
Batch input and output.
"""
if not os.path.exists(self.modelname):
## Define model
t_input = Input(shape=(None, self.config['alphabet_size'],)) #Speakers Message, shape(bs, message_length, alphabet_size)
c_inputs_all = Input(shape=(self.config['n_classes'], 8))
inputs = [t_input, c_inputs_all]
lstm = LSTM(self.config['listener_dim'], activation='tanh', return_sequences=False, return_state=True)
o, sh, sc = lstm(t_input)
z = Dense(self.config['listener_dim'], activation='sigmoid')(o) #shape(bs, listener_dim)
u = Dense(self.config['listener_dim'], activation='sigmoid')
ts = []
for _ in range(self.config['n_classes']):
#c_input = Input(shape=(self.config['speaker_input_w'],self.config['speaker_input_h'],3)) #speaker_model.input[0], shape(bs, speaker_input_w, speaker_input_h, 3)
#c_input = Lambda(lambda x: x[:, _])(c_inputs_all)
c_input = Lambda(makeFunc(_))(c_inputs_all)
#conv_outputs = conv_model(c_input)
#conv_outputs = c_input
uc = u(c_input)
t = Lambda(lambda x: K.expand_dims(K.sum(-K.square(x),axis=1)))(add([z, Lambda(lambda x: -x)(uc)])) #shape(bs, 1)
ts.append(t)
U = concatenate(ts) #shape(bs, n_classes)
final_output = Lambda(lambda x: K.softmax(x))(U) #shape(bs, n_classes)
self.listener_model = Model(inputs=inputs, outputs=[final_output, z, U])
#self.listener_model.compile(loss="categorical_crossentropy", optimizer=RMSprop(lr=self.config['listener_lr']))
else:
self.load()
#check!!!
self.trainable_weights_rnn = self.listener_model.trainable_weights[:3]
self.trainable_weights_others = self.listener_model.trainable_weights[3:]
def set_updates(self):
self.opt = Adam(lr=self.lr)
#opt = RMSprop(lr=self.lr)
#opt = SGD(lr=self.lr, momentum=0.9, decay=1e-6, nesterov=True)
self.updates = self.opt.get_updates(params=self.listener_model.trainable_weights, loss=self.loss)
if os.path.exists(self.optfilename):
self.load_opt()
def reshape_message_candidates(self, speaker_message, candidates):
#if not self.config['fixed_length']:
# assert len(speaker_message.shape)==1 and speaker_message.shape[0]<=self.config['max_message_length']
#else:
# assert len(speaker_message.shape)==1 and speaker_message.shape[0]==self.config['max_message_length']
#pdb.set_trace()
assert len(speaker_message.shape)==1 and speaker_message.shape[0]<=self.config['max_message_length']
assert len(candidates.shape)==2 and candidates.shape[0]==self.config['n_classes'] and candidates.shape[1]==8
speaker_message = np.expand_dims(to_categorical(speaker_message, self.config['alphabet_size']), axis=0) #shape(1, ?, alphabet_size)
X = [speaker_message, np.expand_dims(candidates, axis=0)]
return X
class PaperListenerNetwork_direct(BaseListenerNetwork):
def __init__(self, modelname, config_dict):
assert False #TOMODIFY
super(PaperListenerNetwork_direct, self).__init__(modelname, config_dict)
self.batch_speaker_message = []
self.batch_action = []
self.batch_candidates = []
self.batch_reward = []
def initialize_model(self):
"""
Batch input and output.
"""
if not os.path.exists(self.modelname):
## Define model
## Speakers Message
t_input = Input(shape=(self.config['max_message_length'],)) #shape(bs, max_message_length)
t_trans = Dense(self.config['speaker_input_dim'],
#kernel_initializer=keras.initializers.Identity(gain=1.0),
#bias_initializer='zeros',
activation='sigmoid')(t_input) #shape(bs, speaker_input_dim)
inputs = [t_input]
ts = []
for _ in range(self.config['n_classes']):
c_input = Input(shape=(self.config['speaker_input_dim'],)) #shape(bs, speaker_input_dim)
t = Lambda(lambda x: K.expand_dims(K.sum(-K.square(x),axis=1)))(add([t_trans, Lambda(lambda x: -x)(c_input)])) #shape(bs, 1)
inputs.append(c_input)
ts.append(t)
U = concatenate(ts) #shape(bs, n_classes)
listener_probs = U
#listener_probs = Lambda(lambda x: K.softmax(x))(U) #shape(bs, n_classes)
listener_infer_action = Lambda(lambda x: K.argmax(x))(U) #shape(bs)
target_onehot_placeholder = Input(shape=(self.config['n_classes'],), name="action_onehot") #(bs, n_classes)
listener_prob_2 = dot([listener_probs, target_onehot_placeholder], axes=1)
listener_prob_2 = Lambda(lambda x:K.squeeze(x, axis=1))(listener_prob_2)
self.listener_model = Model(inputs=inputs + [target_onehot_placeholder], outputs=[listener_probs, listener_infer_action, t_trans, listener_prob_2])
else:
self.load()
#check!!!
def build_train_fn(self):
"""
Batch input and output.
"""
#direct prob input!!!
#reward_placeholder = K.placeholder(shape=(None,), name="reward") #(?)
action_prob = self.listener_model.output[3]
#loss = K.log(-action_prob)*reward_placeholder
#loss = - action_prob * reward_placeholder
loss = - action_prob
loss = K.mean(loss)
self.opt = Adam(lr=self.config['listener_lr'])
self.updates = self.opt.get_updates(params=self.listener_model.trainable_weights,loss=loss)
#if os.path.exists(self.optfilename):
# self.load_opt()
self.train_fn = K.function(
#inputs = self.listener_model.input + [reward_placeholder],
inputs = self.listener_model.input,
outputs=[loss, loss], updates=self.updates)
def sample_from_listener_policy(self, speaker_message, candidates):
"""
Input and output are all just one instance. No bs dimensize.
"""
X = self.reshape_message_candidates(speaker_message, candidates) + [np.zeros([1, self.config['n_classes']])]
listener_probs, listener_infer_action, _t_trans, _lp2 = self.listener_model.predict_on_batch(X)
listener_probs = np.squeeze(listener_probs) #shape(n_class)
#listener_probs = scipy.special.softmax(listener_probs)
listener_probs = softmax(listener_probs)
#pdb.set_trace() #???norm???
listener_action = np.random.choice(np.arange(self.config['n_classes']), p=listener_probs) #int
return listener_action, listener_probs
def infer_from_listener_policy(self, speaker_message, candidates):
"""
Input and output are all just one instance. No bs dimensize.
"""
X = self.reshape_message_candidates(speaker_message, candidates) + [np.zeros([1, self.config['n_classes']])]
listener_probs, listener_infer_action, _t_trans, _lp2 = self.listener_model.predict_on_batch(X)
listener_probs = np.squeeze(listener_probs) #shape(n_class)
listener_probs = softmax(listener_probs)
listener_action = np.squeeze(listener_infer_action).tolist() #int
return listener_action, listener_probs
def train_listener_policy_on_batch(self):
"""
Train as a batch. Loss is an float for a batch
"""
self.batch_candidates = np.array(self.batch_candidates).transpose([1, 0, 2]).tolist() #shape(num_classes, bs, speaker_input_dim
#_loss, _entropy = self.train_fn([self.batch_speaker_message] + self.batch_candidates + [self.batch_action, self.batch_reward] )
_loss, _entropy = self.train_fn([self.batch_speaker_message] + self.batch_candidates + [self.batch_action] )
#print("Listener loss: ", _loss)
self.batch_speaker_message = [] #shape(bs, max_message_length)
self.batch_action = [] #shape(bs, n_classes)
self.batch_candidates = [] #shape(bs, n_classes, speaker_input_dim)
self.batch_reward = [] #shape(bs)
def remember_listener_training_details(self, speaker_message, action, action_probs, target, candidates, reward):
"""
Inputs are just one instance. No bs dimensize.
"""
#action_onehot = np.zeros(self.config['n_classes'])
#action_onehot[action] = 1
action_onehot = np.ones(self.config['n_classes']) * np.all(target==candidates, axis=1)
self.batch_action.append(action_onehot)
self.batch_speaker_message.append(speaker_message)
self.batch_candidates.append(candidates)
self.batch_reward.append(reward)
''' | [
"keras.backend.sum",
"numpy.array",
"keras.layers.Dense",
"numpy.arange",
"os.path.exists",
"keras.backend.square",
"keras.backend.placeholder",
"keras.layers.Dot",
"numpy.max",
"keras.layers.LSTM",
"keras.backend.batch_get_value",
"keras.layers.concatenate",
"keras.models.Model",
"keras.o... | [((1362, 1388), 'keras.models.load_model', 'load_model', (['self.modelname'], {}), '(self.modelname)\n', (1372, 1388), False, 'from keras.models import Sequential, load_model, Model\n'), ((1633, 1668), 'keras.backend.batch_get_value', 'K.batch_get_value', (['symbolic_weights'], {}), '(symbolic_weights)\n', (1650, 1668), True, 'from keras import backend as K\n'), ((4340, 4415), 'keras.backend.placeholder', 'K.placeholder', ([], {'shape': "(None, self.config['n_classes'])", 'name': '"""action_onehot"""'}), "(shape=(None, self.config['n_classes']), name='action_onehot')\n", (4353, 4415), True, 'from keras import backend as K\n'), ((4457, 4500), 'keras.backend.placeholder', 'K.placeholder', ([], {'shape': '(None,)', 'name': '"""reward"""'}), "(shape=(None,), name='reward')\n", (4470, 4500), True, 'from keras import backend as K\n'), ((4523, 4589), 'keras.backend.sum', 'K.sum', (['(action_prob_placeholder * action_onehot_placeholder)'], {'axis': '(1)'}), '(action_prob_placeholder * action_onehot_placeholder, axis=1)\n', (4528, 4589), True, 'from keras import backend as K\n'), ((4611, 4629), 'keras.backend.log', 'K.log', (['action_prob'], {}), '(action_prob)\n', (4616, 4629), True, 'from keras import backend as K\n'), ((4865, 4877), 'keras.backend.mean', 'K.mean', (['loss'], {}), '(loss)\n', (4871, 4877), True, 'from keras import backend as K\n'), ((4894, 4910), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'self.lr'}), '(lr=self.lr)\n', (4898, 4910), False, 'from keras.optimizers import RMSprop, Adam, SGD\n'), ((5013, 5045), 'os.path.exists', 'os.path.exists', (['self.optfilename'], {}), '(self.optfilename)\n', (5027, 5045), False, 'import os\n'), ((5086, 5228), 'keras.backend.function', 'K.function', ([], {'inputs': '(self.listener_model.input + [action_onehot_placeholder, reward_placeholder])', 'outputs': '[loss, loss]', 'updates': 'self.updates'}), '(inputs=self.listener_model.input + [action_onehot_placeholder,\n reward_placeholder], outputs=[loss, loss], updates=self.updates)\n', (5096, 5228), True, 'from keras import backend as K\n'), ((5576, 5615), 'numpy.expand_dims', 'np.expand_dims', (['speaker_message'], {'axis': '(0)'}), '(speaker_message, axis=0)\n', (5590, 5615), True, 'import numpy as np\n'), ((6168, 6194), 'numpy.squeeze', 'np.squeeze', (['listener_probs'], {}), '(listener_probs)\n', (6178, 6194), True, 'import numpy as np\n'), ((6316, 6329), 'numpy.squeeze', 'np.squeeze', (['U'], {}), '(U)\n', (6326, 6329), True, 'import numpy as np\n'), ((6755, 6781), 'numpy.squeeze', 'np.squeeze', (['listener_probs'], {}), '(listener_probs)\n', (6765, 6781), True, 'import numpy as np\n'), ((6819, 6844), 'numpy.argmax', 'np.argmax', (['listener_probs'], {}), '(listener_probs)\n', (6828, 6844), True, 'import numpy as np\n'), ((6857, 6870), 'numpy.squeeze', 'np.squeeze', (['U'], {}), '(U)\n', (6867, 6870), True, 'import numpy as np\n'), ((7047, 7118), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['self.batch_action'], {'num_classes': "self.config['n_classes']"}), "(self.batch_action, num_classes=self.config['n_classes'])\n", (7061, 7118), False, 'from keras.utils.np_utils import to_categorical\n'), ((11147, 11163), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'self.lr'}), '(lr=self.lr)\n', (11151, 11163), False, 'from keras.optimizers import RMSprop, Adam, SGD\n'), ((11302, 11334), 'os.path.exists', 'os.path.exists', (['self.optfilename'], {}), '(self.optfilename)\n', (11316, 11334), False, 'import os\n'), ((11684, 11739), 'keras.backend.placeholder', 'K.placeholder', ([], {'shape': '(None, None)', 'name': '"""action_onehot"""'}), "(shape=(None, None), name='action_onehot')\n", (11697, 11739), True, 'from keras import backend as K\n'), ((11781, 11824), 'keras.backend.placeholder', 'K.placeholder', ([], {'shape': '(None,)', 'name': '"""reward"""'}), "(shape=(None,), name='reward')\n", (11794, 11824), True, 'from keras import backend as K\n'), ((11847, 11913), 'keras.backend.sum', 'K.sum', (['(action_prob_placeholder * action_onehot_placeholder)'], {'axis': '(1)'}), '(action_prob_placeholder * action_onehot_placeholder, axis=1)\n', (11852, 11913), True, 'from keras import backend as K\n'), ((11933, 11951), 'keras.backend.log', 'K.log', (['action_prob'], {}), '(action_prob)\n', (11938, 11951), True, 'from keras import backend as K\n'), ((12187, 12199), 'keras.backend.mean', 'K.mean', (['loss'], {}), '(loss)\n', (12193, 12199), True, 'from keras import backend as K\n'), ((12264, 12406), 'keras.backend.function', 'K.function', ([], {'inputs': '(self.listener_model.input + [action_onehot_placeholder, reward_placeholder])', 'outputs': '[loss, loss]', 'updates': 'self.updates'}), '(inputs=self.listener_model.input + [action_onehot_placeholder,\n reward_placeholder], outputs=[loss, loss], updates=self.updates)\n', (12274, 12406), True, 'from keras import backend as K\n'), ((12813, 12874), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['speaker_message', "self.config['alphabet_size']"], {}), "(speaker_message, self.config['alphabet_size'])\n", (12827, 12874), False, 'from keras.utils.np_utils import to_categorical\n'), ((16508, 16524), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'self.lr'}), '(lr=self.lr)\n', (16512, 16524), False, 'from keras.optimizers import RMSprop, Adam, SGD\n'), ((16991, 17023), 'os.path.exists', 'os.path.exists', (['self.optfilename'], {}), '(self.optfilename)\n', (17005, 17023), False, 'import os\n'), ((558, 567), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (564, 567), True, 'import numpy as np\n'), ((1716, 1745), 'pickle.dump', 'pickle.dump', (['weight_values', 'f'], {}), '(weight_values, f)\n', (1727, 1745), False, 'import pickle\n'), ((1833, 1847), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1844, 1847), False, 'import pickle\n'), ((2513, 2543), 'os.path.exists', 'os.path.exists', (['self.modelname'], {}), '(self.modelname)\n', (2527, 2543), False, 'import os\n'), ((2579, 2628), 'keras.layers.Input', 'Input', ([], {'shape': "(self.config['max_message_length'],)"}), "(shape=(self.config['max_message_length'],))\n", (2584, 2628), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((2697, 2770), 'keras.layers.Input', 'Input', ([], {'shape': "(self.config['n_classes'], self.config['speaker_input_dim'])"}), "(shape=(self.config['n_classes'], self.config['speaker_input_dim']))\n", (2702, 2770), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((3454, 3469), 'keras.layers.concatenate', 'concatenate', (['ts'], {}), '(ts)\n', (3465, 3469), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((3501, 3516), 'keras.layers.concatenate', 'concatenate', (['us'], {}), '(us)\n', (3512, 3516), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((3925, 3979), 'keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': '[final_output, U, z, us]'}), '(inputs=inputs, outputs=[final_output, U, z, us])\n', (3930, 3979), False, 'from keras.models import Sequential, load_model, Model\n'), ((5739, 5773), 'numpy.expand_dims', 'np.expand_dims', (['candidates'], {'axis': '(0)'}), '(candidates, axis=0)\n', (5753, 5773), True, 'import numpy as np\n'), ((6249, 6284), 'numpy.arange', 'np.arange', (["self.config['n_classes']"], {}), "(self.config['n_classes'])\n", (6258, 6284), True, 'import numpy as np\n'), ((9143, 9204), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['speaker_message', "self.config['alphabet_size']"], {}), "(speaker_message, self.config['alphabet_size'])\n", (9157, 9204), False, 'from keras.utils.np_utils import to_categorical\n'), ((9348, 9382), 'numpy.expand_dims', 'np.expand_dims', (['candidates'], {'axis': '(0)'}), '(candidates, axis=0)\n', (9362, 9382), True, 'import numpy as np\n'), ((9498, 9528), 'os.path.exists', 'os.path.exists', (['self.modelname'], {}), '(self.modelname)\n', (9512, 9528), False, 'import os\n'), ((9544, 9593), 'keras.layers.Input', 'Input', ([], {'shape': "(None, self.config['alphabet_size'])"}), "(shape=(None, self.config['alphabet_size']))\n", (9549, 9593), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((9821, 9874), 'keras.layers.Input', 'Input', ([], {'shape': "(None, self.config['speaker_input_dim'])"}), "(shape=(None, self.config['speaker_input_dim']))\n", (9826, 9874), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((9981, 10080), 'keras.layers.LSTM', 'LSTM', (["self.config['listener_dim']"], {'activation': '"""tanh"""', 'return_sequences': '(False)', 'return_state': '(True)'}), "(self.config['listener_dim'], activation='tanh', return_sequences=False,\n return_state=True)\n", (9985, 10080), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((10237, 10293), 'keras.layers.Dense', 'Dense', (["self.config['listener_dim']"], {'activation': '"""sigmoid"""'}), "(self.config['listener_dim'], activation='sigmoid')\n", (10242, 10293), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((10722, 10737), 'keras.layers.concatenate', 'concatenate', (['ts'], {}), '(ts)\n', (10733, 10737), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((10769, 10784), 'keras.layers.concatenate', 'concatenate', (['us'], {}), '(us)\n', (10780, 10784), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((10896, 10950), 'keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': '[final_output, U, z, us]'}), '(inputs=inputs, outputs=[final_output, U, z, us])\n', (10901, 10950), False, 'from keras.models import Sequential, load_model, Model\n'), ((12757, 12787), 'numpy.append', 'np.append', (['speaker_message', '(-1)'], {}), '(speaker_message, -1)\n', (12766, 12787), True, 'import numpy as np\n'), ((13546, 13576), 'os.path.exists', 'os.path.exists', (['self.modelname'], {}), '(self.modelname)\n', (13560, 13576), False, 'import os\n'), ((13620, 13765), 'utils.convnet_com', 'convnet_com', (["self.config['speaker_input_w']", "self.config['speaker_input_h']", '(3)'], {'preloadfile': 'self.pretrain_convmodel_file', 'name': '"""conv_model_l"""'}), "(self.config['speaker_input_w'], self.config['speaker_input_h'],\n 3, preloadfile=self.pretrain_convmodel_file, name='conv_model_l')\n", (13631, 13765), False, 'from utils import convnet_vgg, convnet_mod, convnet_ori, convnet_com\n'), ((13778, 13827), 'keras.layers.Input', 'Input', ([], {'shape': "(None, self.config['alphabet_size'])"}), "(shape=(None, self.config['alphabet_size']))\n", (13783, 13827), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((13909, 14036), 'keras.layers.Input', 'Input', ([], {'shape': "(self.config['n_classes'], self.config['speaker_input_w'], self.config[\n 'speaker_input_h'], 3)", 'name': '"""image_l"""'}), "(shape=(self.config['n_classes'], self.config['speaker_input_w'], self\n .config['speaker_input_h'], 3), name='image_l')\n", (13914, 14036), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((14145, 14244), 'keras.layers.LSTM', 'LSTM', (["self.config['listener_dim']"], {'activation': '"""tanh"""', 'return_sequences': '(False)', 'return_state': '(True)'}), "(self.config['listener_dim'], activation='tanh', return_sequences=False,\n return_state=True)\n", (14149, 14244), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((14481, 14537), 'keras.layers.Dense', 'Dense', (["self.config['listener_dim']"], {'activation': '"""sigmoid"""'}), "(self.config['listener_dim'], activation='sigmoid')\n", (14486, 14537), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((15178, 15193), 'keras.layers.concatenate', 'concatenate', (['ts'], {}), '(ts)\n', (15189, 15193), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((15225, 15240), 'keras.layers.concatenate', 'concatenate', (['us'], {}), '(us)\n', (15236, 15240), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((15347, 15401), 'keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': '[final_output, U, z, us]'}), '(inputs=inputs, outputs=[final_output, U, z, us])\n', (15352, 15401), False, 'from keras.models import Sequential, load_model, Model\n'), ((17720, 17781), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['speaker_message', "self.config['alphabet_size']"], {}), "(speaker_message, self.config['alphabet_size'])\n", (17734, 17781), False, 'from keras.utils.np_utils import to_categorical\n'), ((17844, 17878), 'numpy.expand_dims', 'np.expand_dims', (['candidates'], {'axis': '(0)'}), '(candidates, axis=0)\n', (17858, 17878), True, 'import numpy as np\n'), ((2871, 2932), 'keras.layers.Dense', 'Dense', (["self.config['speaker_input_dim']"], {'activation': '"""sigmoid"""'}), "(self.config['speaker_input_dim'], activation='sigmoid')\n", (2876, 2932), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((4726, 4764), 'keras.backend.log', 'K.log', (['(action_prob_placeholder + 1e-10)'], {}), '(action_prob_placeholder + 1e-10)\n', (4731, 4764), True, 'from keras import backend as K\n'), ((7775, 7811), 'numpy.array', 'np.array', (['self.batch_speaker_message'], {}), '(self.batch_speaker_message)\n', (7783, 7811), True, 'import numpy as np\n'), ((10115, 10171), 'keras.layers.Dense', 'Dense', (["self.config['listener_dim']"], {'activation': '"""sigmoid"""'}), "(self.config['listener_dim'], activation='sigmoid')\n", (10120, 10171), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((12046, 12084), 'keras.backend.log', 'K.log', (['(action_prob_placeholder + 1e-10)'], {}), '(action_prob_placeholder + 1e-10)\n', (12051, 12084), True, 'from keras import backend as K\n'), ((14279, 14335), 'keras.layers.Dense', 'Dense', (["self.config['listener_dim']"], {'activation': '"""sigmoid"""'}), "(self.config['listener_dim'], activation='sigmoid')\n", (14284, 14335), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((3357, 3370), 'keras.layers.Dot', 'Dot', (['(1)', '(False)'], {}), '(1, False)\n', (3360, 3370), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((3553, 3565), 'keras.backend.softmax', 'K.softmax', (['x'], {}), '(x)\n', (3562, 3565), True, 'from keras import backend as K\n'), ((10821, 10833), 'keras.backend.softmax', 'K.softmax', (['x'], {}), '(x)\n', (10830, 10833), True, 'from keras import backend as K\n'), ((15277, 15289), 'keras.backend.softmax', 'K.softmax', (['x'], {}), '(x)\n', (15286, 15289), True, 'from keras import backend as K\n'), ((10583, 10603), 'keras.layers.Lambda', 'Lambda', (['(lambda x: -x)'], {}), '(lambda x: -x)\n', (10589, 10603), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((15040, 15060), 'keras.layers.Lambda', 'Lambda', (['(lambda x: -x)'], {}), '(lambda x: -x)\n', (15046, 15060), False, 'from keras.layers import Input, Dense, LSTM, Lambda, concatenate, add, Dot\n'), ((10552, 10563), 'keras.backend.square', 'K.square', (['x'], {}), '(x)\n', (10560, 10563), True, 'from keras import backend as K\n'), ((15010, 15021), 'keras.backend.square', 'K.square', (['x'], {}), '(x)\n', (15018, 15021), True, 'from keras import backend as K\n')] |
import argparse
import torch
import random
from torchvision import utils
from torchvision import transforms
from sklearn.decomposition import FastICA
import numpy as np
import random
from PIL import Image, ImageDraw, ImageFont
from model import Generator
def ica_single_img(
directions,
ckpt,
degree=5,
channel_multiplier=2,
latent=512,
n_mlp=8,
max_channel_size=512,
size=256,
truncation=0.7,
device='cuda',
full_model=False,
initial_latent=None,
resolution=64,
start_component=0,
end_component=None,
num_of_columns=5,
col=None,
row=None,
no_index=False,
seed=None,
need_PIL=False
):
if row is None and need_PIL:
assert "If you need a gif, please select a row!"
print("Loading checkpoints...")
ckpt = torch.load(ckpt)
if full_model:
state_dict = ckpt.state_dict()
g = ckpt.to(device)
else:
state_dict = ckpt["g_ema"]
g = Generator(
size, latent, n_mlp, channel_multiplier=channel_multiplier, max_channel_size=max_channel_size
).to(device)
g.load_state_dict(state_dict)
components = directions
num_of_components = directions.shape[1]
print("Generating images..")
if seed:
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
trunc = g.mean_latent(4)
else:
trunc = g.mean_latent(4096)
w_plus = False
if initial_latent:
proj = torch.load(initial_latent)
key = list(proj.keys())
latent = proj[key[0]]['latent'].detach().to(device)
# print(proj[key[0]]['noise'])
noise = proj[key[0]]['noise']
if len(list(latent.shape)) == 2:
w_plus = True
else:
latent = trunc
noise = None
alpha = range(-num_of_columns // 2 + 1, num_of_columns // 2 + 1)
resize_transoform = transforms.Resize(resolution)
to_pil_transoform = transforms.ToPILImage()
to_tensor_transoform = transforms.ToTensor()
directional_results = []
if row:
d_range = range(num_of_components)[row:row + 1]
else:
d_range = range(num_of_components)[start_component:end_component]
if need_PIL:
PIL_list = []
for d in d_range:
txt = Image.new("RGB", (48, 48), (255, 255, 255))
draw = ImageDraw.Draw(txt)
draw.text((0, 20), "i = " + str(d), fill=(0, 0, 0))
txt = txt.resize((resolution, resolution))
txt = to_tensor_transoform(txt).to(device).unsqueeze(0)
imgs = [txt]
direction = degree * components[:, d].T
if col:
imgs = []
i_range = range(num_of_components)[col:col + 1]
else:
imgs = [txt]
i_range = range(num_of_columns)
if no_index:
imgs = []
for i in i_range:
if w_plus:
target_latent = torch.unsqueeze(latent, 0).clone()
target_latent[0] = target_latent[0] + alpha[i] * direction
else:
target_latent = latent + alpha[i] * direction
img, _ = g(
[target_latent],
input_is_latent=True,
noise=noise
)
img = resize_transoform(img)
imgs += [img]
if need_PIL:
PIL_gird = utils.make_grid(
img[0, :, :, :],
pad_value=1,
normalize=True,
range=(-1, 1),
nrow=1,
)
PIL_img = to_pil_transoform(PIL_gird)
PIL_list.append(PIL_img)
final_image = torch.cat(imgs).unsqueeze(0)
final_image = torch.transpose(final_image, 0, 1)
directional_results += [final_image]
if col:
nrow = 1
else:
nrow = num_of_columns + 1
final_image = torch.cat(directional_results, 0)
final_image = final_image.reshape(final_image.shape[0] * final_image.shape[1], final_image.shape[2],
final_image.shape[3], final_image.shape[4])
grid = utils.make_grid(
final_image,
pad_value=1,
normalize=True,
range=(-1, 1),
nrow=nrow,
)
if need_PIL:
pil_result = PIL_list
else:
pil_result = None
return to_pil_transoform(grid), pil_result
if __name__ == "__main__":
torch.manual_seed(1)
np.random.seed(1)
random.seed(1)
torch.set_grad_enabled(False)
parser = argparse.ArgumentParser(description="Apply closed form factorization")
parser.add_argument(
"-d",
"--degree",
type=float,
default=4,
help="scalar factors for moving latent vectors along eigenvector",
)
parser.add_argument(
"--channel_multiplier",
type=int,
default=2,
help='channel multiplier factor. config-f = 2, else = 1',
)
parser.add_argument(
"--latent",
type=int,
default=512,
help="demension of the latent",
)
parser.add_argument(
"--n_mlp",
type=int,
default=8,
help="n_mlp",
)
parser.add_argument(
"--max_channel_size",
type=int,
default=512,
help="max channel size",
)
parser.add_argument("--ckpt", type=str, required=True, help="stylegan2 checkpoints")
parser.add_argument(
"--size", type=int, default=256, help="output image size of the generator"
)
parser.add_argument(
"--truncation", type=float, default=0.7, help="truncation factor"
)
parser.add_argument(
"--device", type=str, default="cuda", help="device to run the model"
)
parser.add_argument('--full_model', default=False, action='store_true')
parser.add_argument("--initial_latent", type=str, required=False, default=None)
parser.add_argument("--resolution", type=int, default=64, help="resolution")
parser.add_argument("--start_component", type=int, default=0, help="start_component")
parser.add_argument("--end_component", type=int, default=None, help="end_component")
parser.add_argument("--num_of_columns", type=int, default=5, help="num_of_columns")
parser.add_argument("--col", type=int, default=None, help="column")
parser.add_argument("--row", type=int, default=None, help="row")
parser.add_argument('--no_index', default=False, action='store_true')
parser.add_argument("--random_seed", type=int, default=None, help="random seed")
parser.add_argument("--factor", type=str, default=None, required=True, help="factor")
parser.add_argument('--gif', default=False, action='store_true')
parser.add_argument('--prename', default=None, type=str)
args = parser.parse_args()
directions = torch.load(args.factor)["eigvec"].to(args.device)
grid, pil_result = ica_single_img(
directions,
args.ckpt,
degree=args.degree,
channel_multiplier=args.channel_multiplier,
latent=args.latent,
n_mlp=args.n_mlp,
max_channel_size=args.max_channel_size,
size=args.size,
truncation=args.truncation,
full_model=args.full_model,
initial_latent=args.initial_latent,
resolution=args.resolution,
start_component=args.start_component,
end_component=args.end_component,
num_of_columns=args.num_of_columns,
col=args.col,
row=args.row,
no_index=args.no_index,
seed=args.random_seed,
need_PIL=args.gif,
)
if args.prename:
if args.random_seed:
nm = args.prename + str(args.row) + str(args.random_seed)
else:
nm = args.prename + str(args.row)
else:
nm = str(args.row)
grid.save(nm + ".png")
pil_result = pil_result + list(reversed(pil_result))
pil_result[0].save(nm + '.gif',
save_all=True,
append_images=pil_result[1:],
duration=100,
loop=0)
| [
"torch.manual_seed",
"torchvision.transforms.ToPILImage",
"torch.set_grad_enabled",
"argparse.ArgumentParser",
"PIL.Image.new",
"torch.load",
"torch.unsqueeze",
"random.seed",
"torch.transpose",
"PIL.ImageDraw.Draw",
"model.Generator",
"numpy.random.seed",
"torchvision.transforms.Resize",
... | [((895, 911), 'torch.load', 'torch.load', (['ckpt'], {}), '(ckpt)\n', (905, 911), False, 'import torch\n'), ((1988, 2017), 'torchvision.transforms.Resize', 'transforms.Resize', (['resolution'], {}), '(resolution)\n', (2005, 2017), False, 'from torchvision import transforms\n'), ((2042, 2065), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (2063, 2065), False, 'from torchvision import transforms\n'), ((2093, 2114), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2112, 2114), False, 'from torchvision import transforms\n'), ((4001, 4034), 'torch.cat', 'torch.cat', (['directional_results', '(0)'], {}), '(directional_results, 0)\n', (4010, 4034), False, 'import torch\n'), ((4235, 4322), 'torchvision.utils.make_grid', 'utils.make_grid', (['final_image'], {'pad_value': '(1)', 'normalize': '(True)', 'range': '(-1, 1)', 'nrow': 'nrow'}), '(final_image, pad_value=1, normalize=True, range=(-1, 1),\n nrow=nrow)\n', (4250, 4322), False, 'from torchvision import utils\n'), ((4532, 4552), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (4549, 4552), False, 'import torch\n'), ((4557, 4574), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (4571, 4574), True, 'import numpy as np\n'), ((4579, 4593), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (4590, 4593), False, 'import random\n'), ((4598, 4627), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (4620, 4627), False, 'import torch\n'), ((4641, 4711), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Apply closed form factorization"""'}), "(description='Apply closed form factorization')\n", (4664, 4711), False, 'import argparse\n'), ((1360, 1383), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1377, 1383), False, 'import torch\n'), ((1392, 1412), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1406, 1412), True, 'import numpy as np\n'), ((1421, 1438), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1432, 1438), False, 'import random\n'), ((1577, 1603), 'torch.load', 'torch.load', (['initial_latent'], {}), '(initial_latent)\n', (1587, 1603), False, 'import torch\n'), ((2376, 2419), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(48, 48)', '(255, 255, 255)'], {}), "('RGB', (48, 48), (255, 255, 255))\n", (2385, 2419), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2435, 2454), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['txt'], {}), '(txt)\n', (2449, 2454), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3828, 3862), 'torch.transpose', 'torch.transpose', (['final_image', '(0)', '(1)'], {}), '(final_image, 0, 1)\n', (3843, 3862), False, 'import torch\n'), ((1055, 1163), 'model.Generator', 'Generator', (['size', 'latent', 'n_mlp'], {'channel_multiplier': 'channel_multiplier', 'max_channel_size': 'max_channel_size'}), '(size, latent, n_mlp, channel_multiplier=channel_multiplier,\n max_channel_size=max_channel_size)\n', (1064, 1163), False, 'from model import Generator\n'), ((3455, 3543), 'torchvision.utils.make_grid', 'utils.make_grid', (['img[0, :, :, :]'], {'pad_value': '(1)', 'normalize': '(True)', 'range': '(-1, 1)', 'nrow': '(1)'}), '(img[0, :, :, :], pad_value=1, normalize=True, range=(-1, 1),\n nrow=1)\n', (3470, 3543), False, 'from torchvision import utils\n'), ((3777, 3792), 'torch.cat', 'torch.cat', (['imgs'], {}), '(imgs)\n', (3786, 3792), False, 'import torch\n'), ((6924, 6947), 'torch.load', 'torch.load', (['args.factor'], {}), '(args.factor)\n', (6934, 6947), False, 'import torch\n'), ((3009, 3035), 'torch.unsqueeze', 'torch.unsqueeze', (['latent', '(0)'], {}), '(latent, 0)\n', (3024, 3035), False, 'import torch\n')] |
"""This module models the water level graphs to an appropiate degree polynomial.
"""
import matplotlib
import numpy as np
def polyfit(dates, levels, p):
# convert dates to nummbers
datenums = matplotlib.dates.date2num(dates)
# Find coefficients of best-fit polynomial f(x) of degree p
p_coeff = np.polyfit(datenums - datenums[0], levels, p)
# Convert coefficient into a polynomial that can be evaluated,
poly = np.poly1d(p_coeff)
return poly, datenums[0]
def floodwarning(station,dates,levels,p):
datenums = matplotlib.dates.date2num(dates)
p_coeff = np.polyfit(datenums - datenums[0], levels, p)
poly = np.poly1d(p_coeff)
polyd1 = np.polyder(poly)
high = station.typical_range[1]
low = station.typical_range[0]
latest_level = levels[0]
d1_latest = polyd1(0)
# print("Station,Latest,Highest,d1,d2,:", station.name,latest_level,highest_level,d1_latest,d2_latest,)
# default level as to not araise either too much worry or too little.
fw = "moderate"
# above highest level and increasing => severe
if (d1_latest > 0 and latest_level > high): fw = "severe"
# above highest level and decreasing => high
if (d1_latest < 0 and latest_level > high): fw = "high"
# near highest level and increasing => high
if (d1_latest > 0 and (abs(latest_level - high) > abs(latest_level - low))): fw = "high"
# near highest level and decreasing => moderate
if (d1_latest < 0 and (abs(latest_level - high) > abs(latest_level - low))): fw = "moderate"
# near lowest level and increasing => moderate
if (d1_latest > 0 and (abs(latest_level - high) < abs(latest_level - low))): fw = "moderate"
# near lowest level and decreasing => low
if (d1_latest < 0 and (abs(latest_level - high) < abs(latest_level - low))): fw = "low"
# below lowest level and decreasing => low
if (d1_latest < 0 and latest_level < low): fw = "low"
return fw
| [
"matplotlib.dates.date2num",
"numpy.polyder",
"numpy.poly1d",
"numpy.polyfit"
] | [((204, 236), 'matplotlib.dates.date2num', 'matplotlib.dates.date2num', (['dates'], {}), '(dates)\n', (229, 236), False, 'import matplotlib\n'), ((316, 361), 'numpy.polyfit', 'np.polyfit', (['(datenums - datenums[0])', 'levels', 'p'], {}), '(datenums - datenums[0], levels, p)\n', (326, 361), True, 'import numpy as np\n'), ((441, 459), 'numpy.poly1d', 'np.poly1d', (['p_coeff'], {}), '(p_coeff)\n', (450, 459), True, 'import numpy as np\n'), ((548, 580), 'matplotlib.dates.date2num', 'matplotlib.dates.date2num', (['dates'], {}), '(dates)\n', (573, 580), False, 'import matplotlib\n'), ((595, 640), 'numpy.polyfit', 'np.polyfit', (['(datenums - datenums[0])', 'levels', 'p'], {}), '(datenums - datenums[0], levels, p)\n', (605, 640), True, 'import numpy as np\n'), ((652, 670), 'numpy.poly1d', 'np.poly1d', (['p_coeff'], {}), '(p_coeff)\n', (661, 670), True, 'import numpy as np\n'), ((684, 700), 'numpy.polyder', 'np.polyder', (['poly'], {}), '(poly)\n', (694, 700), True, 'import numpy as np\n')] |
import argparse
import csv
from collections import defaultdict
import numpy as np
import sys
def main(args):
missing_info_scores = defaultdict(list)
usefulness_scores = defaultdict(list)
missing_info_score_histogram = defaultdict(int)
usefulness_score_histogram = defaultdict(int)
with open(args.mturk_results_file) as csv_results_file:
csv_reader = csv.DictReader(csv_results_file)
for row in csv_reader:
hit_id = row['HITId']
for i in range(1, 6):
if row['Answer.missing_info_%d.%d' %(i, i)] == 'true':
# import pdb; pdb.set_trace()
missing_info_scores[hit_id].append(i)
missing_info_score_histogram[i] += 1
for i in range(6):
if row['Answer.usefulness_%d.%d' % (i, i)] == 'true':
usefulness_scores[hit_id].append(i)
usefulness_score_histogram[i] += 1
missing_info_score_var = 0
usefulness_score_var = 0
missing_info_agreement = 0
usefulness_agreement = 0
for hit_id in usefulness_scores:
missing_info_score_var += np.var(missing_info_scores[hit_id])
usefulness_score_var += np.var(usefulness_scores[hit_id])
for i in range(1, 6):
if missing_info_scores[hit_id].count(i) > 2:
missing_info_agreement += 1
break
for i in range(6):
if usefulness_scores[hit_id].count(i) > 1:
# if usefulness_scores[hit_id].count(i) > 2 or (usefulness_scores[hit_id].count(i) + usefulness_scores[hit_id].count(i-1)) > 2 or (usefulness_scores[hit_id].count(i) + usefulness_scores[hit_id].count(i+1)) > 2:
usefulness_agreement += 1
break
print('Missing info score variance %.2f' % (missing_info_score_var*1./len(missing_info_scores)))
print('Usefulness score variance %.2f' % (usefulness_score_var*1./len(usefulness_scores)))
print('Missing info agreement: %d out of %d' % (missing_info_agreement, len(missing_info_scores)))
print('Usefulness agreement: %d out of %d' % (usefulness_agreement, len(usefulness_scores)))
print('Missing info score histogram')
print(missing_info_score_histogram)
print('Usefulness score histogram')
print(usefulness_score_histogram)
if __name__ == "__main__":
argparser = argparse.ArgumentParser(sys.argv[0])
argparser.add_argument("--mturk_results_file", type = str)
args = argparser.parse_args()
print(args)
main(args) | [
"numpy.var",
"csv.DictReader",
"collections.defaultdict",
"argparse.ArgumentParser"
] | [((136, 153), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (147, 153), False, 'from collections import defaultdict\n'), ((178, 195), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (189, 195), False, 'from collections import defaultdict\n'), ((231, 247), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (242, 247), False, 'from collections import defaultdict\n'), ((281, 297), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (292, 297), False, 'from collections import defaultdict\n'), ((2379, 2415), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (2402, 2415), False, 'import argparse\n'), ((380, 412), 'csv.DictReader', 'csv.DictReader', (['csv_results_file'], {}), '(csv_results_file)\n', (394, 412), False, 'import csv\n'), ((1152, 1187), 'numpy.var', 'np.var', (['missing_info_scores[hit_id]'], {}), '(missing_info_scores[hit_id])\n', (1158, 1187), True, 'import numpy as np\n'), ((1220, 1253), 'numpy.var', 'np.var', (['usefulness_scores[hit_id]'], {}), '(usefulness_scores[hit_id])\n', (1226, 1253), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import argparse
from glob import iglob as glob
from functools import partial
from itertools import starmap
import h5py
import pandas as pd
import numpy as np
from numba import jit
from scipy import linalg
from dautil.util import map_parallel
IDX = pd.IndexSlice
_REAL_TO_SIM = 1e-12
__version__ = '0.2'
def get_inputs(df, df_theory, df_filter, f_modecoupling, l_common, spectrum, sub_split, null_split, compute_nl=False):
'''``spectrum``: one of TT, EE, BB, TE, TE, EB
'''
df_Cl_sim = df.loc[IDX['Cl', spectrum, sub_split, null_split, :], l_common]
df_Cl_sim.reset_index(level=(0, 1, 2, 3), inplace=True, drop=True)
if compute_nl:
df_Nl_sim = df.loc[IDX['Nl', spectrum, sub_split, null_split, :], l_common]
df_Nl_sim.reset_index(level=(0, 1, 2, 3), inplace=True, drop=True)
df_Cl_sim += 1.j * df_Nl_sim
del df_Nl_sim
M = f_modecoupling['{0}{0}'.format(spectrum)][:][l_common][:, l_common]
# auto-spectra
if spectrum[0] == spectrum[1]:
F = df_filter.loc[IDX['{0}{0}'.format(spectrum), sub_split, null_split], l_common].values.real
# cross-spectra
else:
F = df_filter.loc[IDX['{0}{0}{0}{0}'.format(spectrum[0]), sub_split, null_split], l_common].values.real
F *= df_filter.loc[IDX['{0}{0}{0}{0}'.format(spectrum[1]), sub_split, null_split], l_common].values.real
F = np.sqrt(F)
Cl_th = df_theory.loc[l_common, spectrum].values if spectrum in ('TT', 'EE', 'BB', 'TE') else None
return M, np.ascontiguousarray(F), df_Cl_sim, Cl_th
def matrix_reduce(M, b):
'''reduce the resolution of the matrix M by bin-width ``b``
and devided by ``b``
'''
if b == 1:
return M
else:
m, n = M.shape
return np.einsum('ijkl->ik', M.reshape(m // b, b, n // b, b)) / b
def matrix_reduce_row(M, b):
'''reduce the resolution of the matrix M by bin-width ``b``
and devided by ``b``
'''
if b == 1:
return M
else:
m, n = M.shape
return np.einsum('ijk->ik', M.reshape(m // b, b, n)) / b
def spectra_reduce(Cls, b):
'''reduce the resolution across the second axis by bin-width ``b``
and devided by ``b``
'''
if b == 1:
return Cls
else:
m, n = Cls.shape
return np.einsum('ijk->ij', Cls.reshape(m, n // b, b)) / b
@jit(nopython=True, nogil=True)
def solve_K_l(M, F, B_2):
return F.reshape(-1, 1) * M * B_2.reshape(1, -1)
def solve(K_l, Cl_sim, bin_width, return_w=False):
K_b = matrix_reduce(K_l, bin_width)
Cl_sim_binned = spectra_reduce(Cl_sim, bin_width)
Cl = linalg.solve(K_b, Cl_sim_binned.T).T
if return_w:
P_bl_K_ll = matrix_reduce_row(K_l, bin_width)
w_bl = linalg.solve(K_b, P_bl_K_ll)
return Cl, w_bl
else:
return Cl
def solve_spectra(df_pseudo, df_theory, df_filter, f_modecoupling, B_2, bin_width, l_common, l_common_binned, spectrum, sub_split, null_split, compute_nl=False, return_w=False):
M, F, df_Cl_sim, Cl_th = get_inputs(df_pseudo, df_theory, df_filter, f_modecoupling, l_common, spectrum, sub_split, null_split, compute_nl=compute_nl)
K_l = solve_K_l(M, F, B_2)
del M, F
if Cl_th is not None:
df_Cl_sim.loc[IDX['theory', 0], :] = K_l @ Cl_th
del Cl_th
res = solve(K_l, df_Cl_sim.values, bin_width, return_w=return_w)
del K_l
if return_w:
Cl, w_bl = res
else:
Cl = res
df = pd.DataFrame(
Cl,
index=df_Cl_sim.index,
columns=l_common_binned
)
if return_w:
return df, w_bl
else:
return df
def main(pseudospectra, theory, filter_transfer, modecoupling, beam, bin_width, l_min, l_max, processes, compute_nl=False, return_w=False, l_boundary=600, l_lower=50, l_upper=4300):
'''
`l_boundary`: the pivot point of l bins. e.g. given a bin_width, 600 is always the boundary between 2 bins.
`l_lower`: lowest l we trust. e.g. 50
`l_upper`: highest l we trust. e.g. 4300 due to F_TT can becomes negative above that.
'''
if l_min < 0:
l_min = l_boundary - (l_boundary - l_lower) // bin_width * bin_width
print(f'auto l-min at {l_min}')
if l_max < 0:
l_max = l_boundary + (l_upper - l_boundary) // bin_width * bin_width
print(f'auto l-max at {l_max}')
l_common = np.arange(l_min, l_max)
l_common_binned = l_common.reshape(-1, bin_width).mean(axis=1)
# Reading
df_beam = pd.read_hdf(beam)['all']
B_2 = np.square(np.interp(l_common, df_beam.index, df_beam.values.real))
del df_beam
df = pd.concat(
map_parallel(pd.read_hdf, glob(pseudospectra), mode='multithreading', processes=processes)
)
df_theory = pd.read_hdf(theory) * _REAL_TO_SIM
df_filter = pd.read_hdf(filter_transfer)
# mapping all cases
index = pd.MultiIndex.from_product(
(
df.index.levels[1],
df.index.levels[2],
df.index.levels[3]
),
names=df.index.names[1:4]
)
if return_w:
index_w = pd.MultiIndex.from_product(
(
df.index.levels[1],
df.index.levels[2],
df.index.levels[3],
l_common_binned
),
names=df.index.names[1:4] + ['b']
)
with h5py.File(modecoupling, 'r') as f:
res = list(starmap(
partial(solve_spectra, df, df_theory, df_filter, f, B_2, bin_width, l_common, l_common_binned, compute_nl=compute_nl, return_w=return_w),
index
))
if return_w:
Cls, w_bls = list(map(list, zip(*res)))
else:
Cls = res
df_spectra = pd.concat(
Cls,
keys=index
)
del l_common_binned, B_2, df, df_theory, df_filter
df_spectra.index.names = index.names + df_spectra.index.names[-2:]
del index
df_spectra.sort_index(inplace=True)
if return_w:
df_window = pd.DataFrame(
np.concatenate(w_bls, axis=0),
index=index_w,
columns=l_common
)
df_window.sort_index(inplace=True)
return df_spectra, df_window
else:
return df_spectra
def cli():
parser = argparse.ArgumentParser(description="Obtain final spectra from pseudo-spectra, mode-coupling, filter transfer function, etc.")
parser.add_argument('-o', '--output', required=True,
help='Output HDF5 filename.')
parser.add_argument('--modecoupling', required=True,
help='Input modecoupling HDF5 file.')
parser.add_argument('--pseudospectra', required=True,
help='Input pseudospectra DataFrame in HDF5. Can be a glob pattern.')
parser.add_argument('--theory', required=True,
help='Input theory DataFrame in HDF5.')
parser.add_argument('--beam', required=True,
help='Input beam DataFrame in HDF5.')
parser.add_argument('--filter-transfer', required=True,
help='Input filter transfer function in HDF5.')
parser.add_argument('--compute-nl', action='store_true',
help='If specified, compute Nl and store as the imaginary part of the spectra DataFrame.')
parser.add_argument('--return-w', action='store_true',
help='If specified, return the band-power window function w.')
parser.add_argument('--bin-width', default=300, type=int,
help='bin width. Default: 300')
parser.add_argument('--l-min', type=int, default=-1,
help='Minimum l. Default: auto-calculated. Lowest value: 2.')
parser.add_argument('--l-max', type=int, default=-1,
help='maximum l (exclusive). Default: auto-calculated. Highest value: 4901')
parser.add_argument('-c', '--compress-level', default=9, type=int,
help='compress level of gzip algorithm. Default: 9.')
parser.add_argument('-v', '--version', action='version',
version='%(prog)s {}'.format(__version__))
parser.add_argument('-p', '--processes', type=int, default=1,
help='No. of parallel processes.')
args = parser.parse_args()
res = main(
args.pseudospectra,
args.theory,
args.filter_transfer,
args.modecoupling,
args.beam,
args.bin_width,
args.l_min,
args.l_max,
args.processes,
compute_nl=args.compute_nl,
return_w=args.return_w
)
if args.return_w:
df_spectra, df_window = res
else:
df_spectra = res
df_spectra.to_hdf(
args.output,
'spectra',
mode='w',
format='table',
complevel=args.compress_level,
fletcher32=True,
)
if args.return_w:
df_window.to_hdf(
args.output,
'window',
mode='a',
format='table',
complevel=args.compress_level,
fletcher32=True,
)
if __name__ == "__main__":
cli()
| [
"pandas.MultiIndex.from_product",
"numpy.sqrt",
"argparse.ArgumentParser",
"numpy.arange",
"glob.iglob",
"scipy.linalg.solve",
"numpy.ascontiguousarray",
"h5py.File",
"numba.jit",
"functools.partial",
"numpy.interp",
"numpy.concatenate",
"pandas.DataFrame",
"pandas.concat",
"pandas.read_... | [((2369, 2399), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'nogil': '(True)'}), '(nopython=True, nogil=True)\n', (2372, 2399), False, 'from numba import jit\n'), ((3486, 3550), 'pandas.DataFrame', 'pd.DataFrame', (['Cl'], {'index': 'df_Cl_sim.index', 'columns': 'l_common_binned'}), '(Cl, index=df_Cl_sim.index, columns=l_common_binned)\n', (3498, 3550), True, 'import pandas as pd\n'), ((4381, 4404), 'numpy.arange', 'np.arange', (['l_min', 'l_max'], {}), '(l_min, l_max)\n', (4390, 4404), True, 'import numpy as np\n'), ((4813, 4841), 'pandas.read_hdf', 'pd.read_hdf', (['filter_transfer'], {}), '(filter_transfer)\n', (4824, 4841), True, 'import pandas as pd\n'), ((4879, 4999), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['(df.index.levels[1], df.index.levels[2], df.index.levels[3])'], {'names': 'df.index.names[1:4]'}), '((df.index.levels[1], df.index.levels[2], df.\n index.levels[3]), names=df.index.names[1:4])\n', (4905, 4999), True, 'import pandas as pd\n'), ((5715, 5741), 'pandas.concat', 'pd.concat', (['Cls'], {'keys': 'index'}), '(Cls, keys=index)\n', (5724, 5741), True, 'import pandas as pd\n'), ((6249, 6385), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Obtain final spectra from pseudo-spectra, mode-coupling, filter transfer function, etc."""'}), "(description=\n 'Obtain final spectra from pseudo-spectra, mode-coupling, filter transfer function, etc.'\n )\n", (6272, 6385), False, 'import argparse\n'), ((1404, 1414), 'numpy.sqrt', 'np.sqrt', (['F'], {}), '(F)\n', (1411, 1414), True, 'import numpy as np\n'), ((1533, 1556), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['F'], {}), '(F)\n', (1553, 1556), True, 'import numpy as np\n'), ((2635, 2669), 'scipy.linalg.solve', 'linalg.solve', (['K_b', 'Cl_sim_binned.T'], {}), '(K_b, Cl_sim_binned.T)\n', (2647, 2669), False, 'from scipy import linalg\n'), ((2758, 2786), 'scipy.linalg.solve', 'linalg.solve', (['K_b', 'P_bl_K_ll'], {}), '(K_b, P_bl_K_ll)\n', (2770, 2786), False, 'from scipy import linalg\n'), ((4501, 4518), 'pandas.read_hdf', 'pd.read_hdf', (['beam'], {}), '(beam)\n', (4512, 4518), True, 'import pandas as pd\n'), ((4546, 4601), 'numpy.interp', 'np.interp', (['l_common', 'df_beam.index', 'df_beam.values.real'], {}), '(l_common, df_beam.index, df_beam.values.real)\n', (4555, 4601), True, 'import numpy as np\n'), ((4762, 4781), 'pandas.read_hdf', 'pd.read_hdf', (['theory'], {}), '(theory)\n', (4773, 4781), True, 'import pandas as pd\n'), ((5098, 5243), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['(df.index.levels[1], df.index.levels[2], df.index.levels[3], l_common_binned)'], {'names': "(df.index.names[1:4] + ['b'])"}), "((df.index.levels[1], df.index.levels[2], df.\n index.levels[3], l_common_binned), names=df.index.names[1:4] + ['b'])\n", (5124, 5243), True, 'import pandas as pd\n'), ((5361, 5389), 'h5py.File', 'h5py.File', (['modecoupling', '"""r"""'], {}), "(modecoupling, 'r')\n", (5370, 5389), False, 'import h5py\n'), ((4674, 4693), 'glob.iglob', 'glob', (['pseudospectra'], {}), '(pseudospectra)\n', (4678, 4693), True, 'from glob import iglob as glob\n'), ((6010, 6039), 'numpy.concatenate', 'np.concatenate', (['w_bls'], {'axis': '(0)'}), '(w_bls, axis=0)\n', (6024, 6039), True, 'import numpy as np\n'), ((5436, 5576), 'functools.partial', 'partial', (['solve_spectra', 'df', 'df_theory', 'df_filter', 'f', 'B_2', 'bin_width', 'l_common', 'l_common_binned'], {'compute_nl': 'compute_nl', 'return_w': 'return_w'}), '(solve_spectra, df, df_theory, df_filter, f, B_2, bin_width,\n l_common, l_common_binned, compute_nl=compute_nl, return_w=return_w)\n', (5443, 5576), False, 'from functools import partial\n')] |
import numpy as np
import os,sys
sys.path.insert(0,'scripts/')
import shutil
import kaldi_data as kd
import argparse
parser = argparse.ArgumentParser(description="Shuttling data", add_help=True)
parser.add_argument("--source", type=str, default="data/test_segments/utt2lang_sorted",help="source data")
parser.add_argument("--target", type=str, default="result_test_sorted.csv", help="target data")
parser.add_argument("--utt2spk", action='store_true', help="for utt2spk file")
parser.add_argument("--utt2lang", action='store_true', help="for utt2lang file")
args = parser.parse_known_args()[0]
SOURCE_FOLDER = args.source
TARGET_FOLDER = args.target
if not os.path.exists(TARGET_FOLDER):
os.mkdir(TARGET_FOLDER)
wavlist,utt_label,spk_label = kd.read_data_list(SOURCE_FOLDER, utt2spk=args.utt2spk, utt2lang=args.utt2lang)
idx = range(len(wavlist))
np.random.shuffle(idx)
wavlist = wavlist[idx]
utt_label = utt_label[idx]
spk_label = spk_label[idx]
kd.write_data(TARGET_FOLDER,wavlist,utt_label,spk_label)
| [
"os.path.exists",
"sys.path.insert",
"argparse.ArgumentParser",
"kaldi_data.read_data_list",
"os.mkdir",
"kaldi_data.write_data",
"numpy.random.shuffle"
] | [((33, 63), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""scripts/"""'], {}), "(0, 'scripts/')\n", (48, 63), False, 'import os, sys\n'), ((127, 195), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Shuttling data"""', 'add_help': '(True)'}), "(description='Shuttling data', add_help=True)\n", (150, 195), False, 'import argparse\n'), ((754, 832), 'kaldi_data.read_data_list', 'kd.read_data_list', (['SOURCE_FOLDER'], {'utt2spk': 'args.utt2spk', 'utt2lang': 'args.utt2lang'}), '(SOURCE_FOLDER, utt2spk=args.utt2spk, utt2lang=args.utt2lang)\n', (771, 832), True, 'import kaldi_data as kd\n'), ((860, 882), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (877, 882), True, 'import numpy as np\n'), ((961, 1020), 'kaldi_data.write_data', 'kd.write_data', (['TARGET_FOLDER', 'wavlist', 'utt_label', 'spk_label'], {}), '(TARGET_FOLDER, wavlist, utt_label, spk_label)\n', (974, 1020), True, 'import kaldi_data as kd\n'), ((660, 689), 'os.path.exists', 'os.path.exists', (['TARGET_FOLDER'], {}), '(TARGET_FOLDER)\n', (674, 689), False, 'import os, sys\n'), ((695, 718), 'os.mkdir', 'os.mkdir', (['TARGET_FOLDER'], {}), '(TARGET_FOLDER)\n', (703, 718), False, 'import os, sys\n')] |
import numpy as np
import pandas as pd
import patsy
FILE_PATH_CENSUS80_EXTRACT = "data/QOB.txt"
FILE_PATH_FULL_CENSUS7080 = "data/NEW7080.dta"
def get_df_census80():
cols = [0, 1, 3, 4, 5, 8, 9, 10, 11, 12, 15, 16, 17, 18, 19, 20, 23, 24, 26]
cols_names = [
"AGE",
"AGEQ",
"EDUC",
"ENOCENT",
"ESOCENT",
"LWKLYWGE",
"MARRIED",
"MIDATL",
"MT",
"NEWENG",
"CENSUS",
"STATE",
"QOB",
"RACE",
"SMSA",
"SOATL",
"WNOCENT",
"WSOCENT",
"YOB",
]
df = pd.read_csv(FILE_PATH_CENSUS80_EXTRACT, sep=" ", usecols=cols, names=cols_names)
# correct AGEQ
df.loc[df["CENSUS"] == 80, "AGEQ"] = df["AGEQ"] - 1900
return df
def get_df_census70():
cols = [
"v1",
"v2",
"v4",
"v5",
"v6",
"v9",
"v10",
"v11",
"v12",
"v13",
"v16",
"v17",
"v18",
"v19",
"v20",
"v21",
"v24",
"v25",
"v27",
]
cols_names = [
"AGE",
"AGEQ",
"EDUC",
"ENOCENT",
"ESOCENT",
"LWKLYWGE",
"MARRIED",
"MIDATL",
"MT",
"NEWENG",
"CENSUS",
"STATE",
"QOB",
"RACE",
"SMSA",
"SOATL",
"WNOCENT",
"WSOCENT",
"YOB",
]
df = pd.read_stata(FILE_PATH_FULL_CENSUS7080, columns=cols)
df = df.rename(columns=dict(zip(cols, cols_names)))
return df.loc[df["CENSUS"] == 70]
def get_df_census70_census_80():
cols = [
"v1",
"v2",
"v4",
"v5",
"v6",
"v9",
"v10",
"v11",
"v12",
"v13",
"v16",
"v17",
"v18",
"v19",
"v20",
"v21",
"v24",
"v25",
"v27",
]
cols_names = [
"AGE",
"AGEQ",
"EDUC",
"ENOCENT",
"ESOCENT",
"LWKLYWGE",
"MARRIED",
"MIDATL",
"MT",
"NEWENG",
"CENSUS",
"STATE",
"QOB",
"RACE",
"SMSA",
"SOATL",
"WNOCENT",
"WSOCENT",
"YOB",
]
df = pd.read_stata(FILE_PATH_FULL_CENSUS7080, columns=cols)
df = df.rename(columns=dict(zip(cols, cols_names)))
return df
def prepare_census_data(
df,
const=True,
qob=True,
yob=True,
age=True,
state_of_birth=False,
qob_x_yob=False,
qob_x_state=False,
):
if const:
df = add_constant(df)
if qob or qob_x_yob or qob_x_state:
df = add_quarter_of_birth_dummies(df)
if yob or qob_x_yob:
df = add_year_of_birth_dummies(df)
if age:
df = add_age_squared(df)
if state_of_birth or qob_x_state:
df = add_state_of_birth_dummies(df)
if qob_x_yob:
df = add_qob_yob_interactions(df)
if qob_x_state:
df = add_qob_state_interactions(df, qob_x_state)
return df
def add_constant(df):
df["CONST"] = 1
df["CONST"] = df["CONST"].astype(np.uint8)
return df
def get_constant_name():
return ["CONST"]
def add_quarter_of_birth_dummies(df):
return pd.concat((df, pd.get_dummies(df["QOB"], prefix="DUMMY_QOB")), axis=1)
def get_quarter_of_birth_dummy_names(start=1, end=3):
return [f"DUMMY_QOB_{j}" for j in range(start, end + 1)]
def add_year_of_birth_dummies(df):
return pd.concat((df, pd.get_dummies(df["YOB"] % 10, prefix="DUMMY_YOB")), axis=1)
def get_year_of_birth_dummy_names(start=0, end=8):
return [f"DUMMY_YOB_{i}" for i in range(start, end + 1)]
def add_age_squared(df):
df["AGESQ"] = df["AGEQ"].pow(2)
return df
def get_age_control_names(ageq=True, agesq=True):
lst = []
if ageq:
lst.append("AGEQ")
if agesq:
lst.append("AGESQ")
return lst
def add_state_of_birth_dummies(df):
return pd.concat((df, pd.get_dummies(df["STATE"], prefix="DUMMY_STATE")), axis=1)
def get_state_of_birth_dummy_names(state_list):
return [f"DUMMY_STATE_{i}" for i in state_list]
def get_state_list(df, rm_state=1):
state_list = set(df["STATE"])
state_list.remove(rm_state)
return state_list
def add_qob_yob_interactions(df):
interact_qob_yob = patsy.dmatrix(
" + ".join(get_qob_yob_interaction_names()), df, return_type="dataframe"
)
interact_qob_yob.drop("Intercept", axis=1, inplace=True)
return pd.concat((df, interact_qob_yob.astype(np.uint8)), axis=1)
def get_qob_yob_interaction_names(qob_start=1, qob_end=3, yob_start=0, yob_end=9):
return [
f"DUMMY_YOB_{i}:DUMMY_QOB_{j}"
for j in range(qob_start, qob_end + 1)
for i in range(yob_start, yob_end + 1)
]
def add_qob_state_interactions(df, state_list):
interact_qob_state = patsy.dmatrix(
" + ".join(get_qob_state_of_birth_interaction_names(state_list)),
df,
return_type="dataframe",
)
interact_qob_state.drop("Intercept", axis=1, inplace=True)
return pd.concat((df, interact_qob_state.astype(np.uint8)), axis=1)
def get_qob_state_of_birth_interaction_names(state_list):
return [f"DUMMY_STATE_{i}:DUMMY_QOB_{j}" for j in range(1, 4) for i in state_list]
def get_further_exogenous_regressors(race=True, smsa=True, married=True):
lst = []
if race:
lst.append("RACE")
if smsa:
lst.append("SMSA")
if married:
lst.append("MARRIED")
return lst
def get_region_of_residence_dummies():
return ["NEWENG", "MIDATL", "ENOCENT", "WNOCENT", "SOATL", "ESOCENT", "WSOCENT", "MT"]
def get_education_name():
return ["EDUC"]
def get_log_weekly_wage_name():
return ["LWKLYWGE"]
def add_education_dummies(df):
# dummy variable high school degree (12 or more years of education)
df["DUMMY_HIGH_SCHOOL"] = [1 if x >= 12 else 0 for x in df["EDUC"]]
# dummy variable college degree (16 or more years of education)
df["DUMMY_COLLEGE"] = [1 if x >= 16 else 0 for x in df["EDUC"]]
# dummy variable master's degree (18 or more years of education)
df["DUMMY_MASTER"] = [1 if x >= 18 else 0 for x in df["EDUC"]]
# dummy variable doctoral degree (20 or more years of education)
df["DUMMY_DOCTOR"] = [1 if x >= 20 else 0 for x in df["EDUC"]]
return df
def add_detrended_educational_variables(df, educ_vars=("EDUC")):
for ev in educ_vars:
mean_ev = df.groupby(["YOB", "QOB"])[ev].mean().to_frame()
mean_ev["MV_AVG"] = two_sided_moving_average(mean_ev.values)
for yob in set(df["YOB"]):
for qob in set(df["QOB"]):
df.loc[(df["YOB"] == yob) & (df["QOB"] == qob), f"MV_AVG_{ev}"] = mean_ev.loc[
(yob, qob), "MV_AVG"
]
df[f"DTRND_{ev}"] = df[ev] - df[f"MV_AVG_{ev}"]
return df
def two_sided_moving_average(x):
ma = np.full_like(x, np.nan)
for i in range(2, len(x) - 2):
ma[i] = (x[i - 2] + x[i - 1] + x[i + 1] + x[i + 2]) / 4
return ma
| [
"pandas.get_dummies",
"pandas.read_stata",
"numpy.full_like",
"pandas.read_csv"
] | [((613, 698), 'pandas.read_csv', 'pd.read_csv', (['FILE_PATH_CENSUS80_EXTRACT'], {'sep': '""" """', 'usecols': 'cols', 'names': 'cols_names'}), "(FILE_PATH_CENSUS80_EXTRACT, sep=' ', usecols=cols, names=cols_names\n )\n", (624, 698), True, 'import pandas as pd\n'), ((1474, 1528), 'pandas.read_stata', 'pd.read_stata', (['FILE_PATH_FULL_CENSUS7080'], {'columns': 'cols'}), '(FILE_PATH_FULL_CENSUS7080, columns=cols)\n', (1487, 1528), True, 'import pandas as pd\n'), ((2321, 2375), 'pandas.read_stata', 'pd.read_stata', (['FILE_PATH_FULL_CENSUS7080'], {'columns': 'cols'}), '(FILE_PATH_FULL_CENSUS7080, columns=cols)\n', (2334, 2375), True, 'import pandas as pd\n'), ((6997, 7020), 'numpy.full_like', 'np.full_like', (['x', 'np.nan'], {}), '(x, np.nan)\n', (7009, 7020), True, 'import numpy as np\n'), ((3311, 3356), 'pandas.get_dummies', 'pd.get_dummies', (["df['QOB']"], {'prefix': '"""DUMMY_QOB"""'}), "(df['QOB'], prefix='DUMMY_QOB')\n", (3325, 3356), True, 'import pandas as pd\n'), ((3547, 3597), 'pandas.get_dummies', 'pd.get_dummies', (["(df['YOB'] % 10)"], {'prefix': '"""DUMMY_YOB"""'}), "(df['YOB'] % 10, prefix='DUMMY_YOB')\n", (3561, 3597), True, 'import pandas as pd\n'), ((4028, 4077), 'pandas.get_dummies', 'pd.get_dummies', (["df['STATE']"], {'prefix': '"""DUMMY_STATE"""'}), "(df['STATE'], prefix='DUMMY_STATE')\n", (4042, 4077), True, 'import pandas as pd\n')] |
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from scipy.io.wavfile import read
from art.estimators.speech_recognition.speech_recognizer import SpeechRecognizerMixin
from art.estimators.speech_recognition.tensorflow_lingvo import TensorFlowLingvoASR
from art.estimators.tensorflow import TensorFlowV2Estimator
from art.utils import get_file
from tests.utils import ARTTestException
logger = logging.getLogger(__name__)
class TestTensorFlowLingvoASR:
"""
Test the TensorFlowLingvoASR estimator.
"""
@pytest.mark.skip_module("lingvo")
@pytest.mark.skip_framework("pytorch", "tensorflow1", "tensorflow2", "mxnet", "kerastf", "non_dl_frameworks")
def test_is_subclass(self, art_warning):
try:
assert issubclass(TensorFlowLingvoASR, (SpeechRecognizerMixin, TensorFlowV2Estimator))
except ARTTestException as e:
art_warning(e)
@pytest.mark.skip_module("lingvo")
@pytest.mark.skip_framework("pytorch", "tensorflow1", "tensorflow2", "mxnet", "kerastf", "non_dl_frameworks")
def test_implements_abstract_methods(self, art_warning):
try:
import tensorflow.compat.v1 as tf1
TensorFlowLingvoASR()
except ARTTestException as e:
art_warning(e)
@pytest.mark.skip_module("lingvo")
@pytest.mark.skip_framework("pytorch", "tensorflow1", "tensorflow2", "mxnet", "kerastf", "non_dl_frameworks")
def test_load_model(self, art_warning):
try:
import tensorflow.compat.v1 as tf1
TensorFlowLingvoASR()
graph = tf1.get_default_graph()
assert graph.get_operations()
except ARTTestException as e:
art_warning(e)
@pytest.mark.skip_module("lingvo")
@pytest.mark.skip_framework("pytorch", "tensorflow1", "tensorflow2", "mxnet", "kerastf", "non_dl_frameworks")
def test_create_decoder_input(self, art_warning, audio_batch_padded):
try:
import tensorflow.compat.v1 as tf1
test_input, test_mask_frequency = audio_batch_padded
test_target_dummy = np.array(["DUMMY"] * test_input.shape[0])
lingvo = TensorFlowLingvoASR()
decoder_input_tf = lingvo._create_decoder_input(lingvo._x_padded, lingvo._y_target, lingvo._mask_frequency)
decoder_input = lingvo._sess.run(
decoder_input_tf,
{
lingvo._x_padded: test_input,
lingvo._y_target: test_target_dummy,
lingvo._mask_frequency: test_mask_frequency,
},
)
assert set(decoder_input.keys()).issuperset({"src", "tgt", "sample_ids"})
assert set(decoder_input.src.keys()).issuperset({"src_inputs", "paddings"})
assert set(decoder_input.tgt.keys()).issuperset({"ids", "labels", "paddings", "weights"})
except ARTTestException as e:
art_warning(e)
@pytest.mark.skip_module("lingvo")
@pytest.mark.skip_framework("pytorch", "tensorflow1", "tensorflow2", "mxnet", "kerastf", "non_dl_frameworks")
def test_create_log_mel_features(self, art_warning, audio_batch_padded):
try:
import tensorflow.compat.v1 as tf1
test_input, _ = audio_batch_padded
lingvo = TensorFlowLingvoASR()
features_tf = lingvo._create_log_mel_features(lingvo._x_padded)
features = lingvo._sess.run(features_tf, {lingvo._x_padded: test_input})
assert features.shape[2] == 80
assert len(features.shape) == 4
except ARTTestException as e:
art_warning(e)
@pytest.mark.skip_module("lingvo")
@pytest.mark.skip_framework("pytorch", "tensorflow1", "tensorflow2", "mxnet", "kerastf", "non_dl_frameworks")
def test_pad_audio_input(self, art_warning):
try:
import tensorflow.compat.v1 as tf1
test_input = np.array([np.array([1]), np.array([2] * 480)], dtype=object)
test_mask = np.array([[True] + [False] * 479, [True] * 480])
test_output = np.array([[1] + [0] * 479, [2] * 480])
lingvo = TensorFlowLingvoASR()
output, mask, mask_freq = lingvo._pad_audio_input(test_input)
assert_array_equal(test_output, output)
assert_array_equal(test_mask, mask)
except ARTTestException as e:
art_warning(e)
@pytest.mark.skip_module("lingvo")
@pytest.mark.skip_framework("pytorch", "tensorflow1", "tensorflow2", "mxnet", "kerastf", "non_dl_frameworks")
def test_predict_batch(self, art_warning, audio_batch_padded):
try:
import tensorflow.compat.v1 as tf1
test_input, test_mask_frequency = audio_batch_padded
test_target_dummy = np.array(["DUMMY"] * test_input.shape[0])
lingvo = TensorFlowLingvoASR()
feed_dict = {
lingvo._x_padded: test_input,
lingvo._y_target: test_target_dummy,
lingvo._mask_frequency: test_mask_frequency,
}
predictions = lingvo._sess.run(lingvo._predict_batch_op, feed_dict)
assert set(predictions.keys()).issuperset(
{
"target_ids",
"target_labels",
"target_weights",
"target_paddings",
"transcripts",
"topk_decoded",
"topk_ids",
"topk_lens",
"topk_scores",
"norm_wer_errors",
"norm_wer_words",
"utt_id",
}
)
except ARTTestException as e:
art_warning(e)
@pytest.mark.skip_module("lingvo")
@pytest.mark.skip_framework("pytorch", "tensorflow1", "tensorflow2", "mxnet", "kerastf", "non_dl_frameworks")
def test_predict(self, art_warning, audio_data):
try:
import tensorflow.compat.v1 as tf1
test_input = audio_data
lingvo = TensorFlowLingvoASR()
predictions = lingvo.predict(test_input, batch_size=2)
assert predictions.shape == test_input.shape
assert isinstance(predictions[0], np.str_)
except ARTTestException as e:
art_warning(e)
@pytest.mark.skip_module("lingvo")
@pytest.mark.skip_framework("pytorch", "tensorflow1", "tensorflow2", "mxnet", "kerastf", "non_dl_frameworks")
def test_loss_gradient_tensor(self, art_warning, audio_batch_padded):
try:
import tensorflow.compat.v1 as tf1
test_input, test_mask_frequency = audio_batch_padded
test_target_dummy = np.array(["DUMMY"] * test_input.shape[0])
lingvo = TensorFlowLingvoASR()
feed_dict = {
lingvo._x_padded: test_input,
lingvo._y_target: test_target_dummy,
lingvo._mask_frequency: test_mask_frequency,
}
loss_gradient = lingvo._sess.run(lingvo._loss_gradient_op, feed_dict)
assert test_input.shape == loss_gradient.shape
assert loss_gradient.sum() == 0.0
except ARTTestException as e:
art_warning(e)
@pytest.mark.skip_module("lingvo")
@pytest.mark.skip_framework("pytorch", "tensorflow1", "tensorflow2", "mxnet", "kerastf", "non_dl_frameworks")
@pytest.mark.parametrize("batch_mode", [True, False])
def test_loss_gradient_batch_mode(self, art_warning, batch_mode, audio_data):
try:
import tensorflow.compat.v1 as tf1
test_input = audio_data
test_target = np.array(["This", "is", "a dummy", "a dummy"])
lingvo = TensorFlowLingvoASR()
if batch_mode:
gradients = lingvo._loss_gradient_per_batch(test_input, test_target)
else:
gradients = lingvo._loss_gradient_per_sequence(test_input, test_target)
gradients_abs_sum = np.array([np.abs(g).sum() for g in gradients], dtype=object)
# test shape, equal inputs have equal gradients, non-zero inputs have non-zero gradient sums
assert [x.shape for x in test_input] == [g.shape for g in gradients]
assert_allclose(np.abs(gradients[2]).sum(), np.abs(gradients[3]).sum(), rtol=1e-01)
assert_array_equal(gradients_abs_sum > 0, [False, True, True, True])
except ARTTestException as e:
art_warning(e)
class TestTensorFlowLingvoASRLibriSpeechSamples:
# specify LibriSpeech samples for download and with transcriptions
samples = {
"3575-170457-0013.wav": {
"uri": (
"https://github.com/tensorflow/cleverhans/blob/6ef939059172901db582c7702eb803b7171e3db5/"
"examples/adversarial_asr/LibriSpeech/test-clean/3575/170457/3575-170457-0013.wav?raw=true"
),
"transcript": (
"THE MORE SHE IS ENGAGED IN HER PROPER DUTIES THE LAST LEISURE WILL SHE HAVE FOR IT EVEN AS"
" AN ACCOMPLISHMENT AND A RECREATION"
),
},
"5105-28241-0006.wav": {
"uri": (
"https://github.com/tensorflow/cleverhans/blob/6ef939059172901db582c7702eb803b7171e3db5/"
"examples/adversarial_asr/LibriSpeech/test-clean/5105/28241/5105-28241-0006.wav?raw=true"
),
"transcript": (
"THE LOG AND THE COMPASS THEREFORE WERE ABLE TO BE CALLED UPON TO DO THE WORK OF THE SEXTANT WHICH"
" HAD BECOME UTTERLY USELESS"
),
},
"2300-131720-0015.wav": {
"uri": (
"https://github.com/tensorflow/cleverhans/blob/6ef939059172901db582c7702eb803b7171e3db5/"
"examples/adversarial_asr/LibriSpeech/test-clean/2300/131720/2300-131720-0015.wav?raw=true"
),
"transcript": (
"HE OBTAINED THE DESIRED SPEED AND LOAD WITH A FRICTION BRAKE ALSO REGULATOR OF SPEED BUT WAITED FOR AN"
" INDICATOR TO VERIFY IT"
),
},
}
@pytest.mark.skip_module("lingvo")
@pytest.mark.skip_framework("pytorch", "tensorflow1", "tensorflow2", "mxnet", "kerastf", "non_dl_frameworks")
def test_predict(self, art_warning):
try:
import tensorflow.compat.v1 as tf1
transcripts = list()
audios = list()
for filename, sample in self.samples.items():
file_path = get_file(filename, sample["uri"])
_, audio = read(file_path)
audios.append(audio)
transcripts.append(sample["transcript"])
audio_batch = np.array(audios, dtype=object)
lingvo = TensorFlowLingvoASR()
prediction = lingvo.predict(audio_batch, batch_size=1)
assert prediction[0] == transcripts[0]
except ARTTestException as e:
art_warning(e)
@pytest.mark.skip_module("lingvo")
@pytest.mark.skip_framework("pytorch", "tensorflow1", "tensorflow2", "mxnet", "kerastf", "non_dl_frameworks")
@pytest.mark.xfail(reason="Known issue that needs further investigation")
def test_loss_gradient(self, art_warning):
try:
import tensorflow.compat.v1 as tf1
transcripts = list()
audios = list()
for filename, sample in self.samples.items():
file_path = get_file(filename, sample["uri"])
_, audio = read(file_path)
audios.append(audio)
transcripts.append(sample["transcript"])
audio_batch = np.array(audios, dtype=object)
target_batch = np.array(transcripts)
lingvo = TensorFlowLingvoASR()
gradient_batch = lingvo._loss_gradient_per_batch(audio_batch, target_batch)
gradient_sequence = lingvo._loss_gradient_per_sequence(audio_batch, target_batch)
gradient_batch_sum = np.array([np.abs(gb).sum() for gb in gradient_batch], dtype=object)
gradient_sequence_sum = np.array([np.abs(gs).sum() for gs in gradient_sequence], dtype=object)
# test loss gradients per batch and per sequence are the same
assert_allclose(gradient_sequence_sum, gradient_batch_sum, rtol=1e-05)
# test gradient_batch, gradient_sequence and audios items have same shapes
assert (
[gb.shape for gb in gradient_batch]
== [gs.shape for gs in gradient_sequence]
== [a.shape for a in audios]
)
except ARTTestException as e:
art_warning(e)
| [
"logging.getLogger",
"numpy.abs",
"art.utils.get_file",
"art.estimators.speech_recognition.tensorflow_lingvo.TensorFlowLingvoASR",
"pytest.mark.xfail",
"numpy.testing.assert_allclose",
"pytest.mark.skip_module",
"pytest.mark.skip_framework",
"pytest.mark.parametrize",
"numpy.array",
"scipy.io.wa... | [((1677, 1704), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1694, 1704), False, 'import logging\n'), ((1804, 1837), 'pytest.mark.skip_module', 'pytest.mark.skip_module', (['"""lingvo"""'], {}), "('lingvo')\n", (1827, 1837), False, 'import pytest\n'), ((1843, 1955), 'pytest.mark.skip_framework', 'pytest.mark.skip_framework', (['"""pytorch"""', '"""tensorflow1"""', '"""tensorflow2"""', '"""mxnet"""', '"""kerastf"""', '"""non_dl_frameworks"""'], {}), "('pytorch', 'tensorflow1', 'tensorflow2', 'mxnet',\n 'kerastf', 'non_dl_frameworks')\n", (1869, 1955), False, 'import pytest\n'), ((2180, 2213), 'pytest.mark.skip_module', 'pytest.mark.skip_module', (['"""lingvo"""'], {}), "('lingvo')\n", (2203, 2213), False, 'import pytest\n'), ((2219, 2331), 'pytest.mark.skip_framework', 'pytest.mark.skip_framework', (['"""pytorch"""', '"""tensorflow1"""', '"""tensorflow2"""', '"""mxnet"""', '"""kerastf"""', '"""non_dl_frameworks"""'], {}), "('pytorch', 'tensorflow1', 'tensorflow2', 'mxnet',\n 'kerastf', 'non_dl_frameworks')\n", (2245, 2331), False, 'import pytest\n'), ((2555, 2588), 'pytest.mark.skip_module', 'pytest.mark.skip_module', (['"""lingvo"""'], {}), "('lingvo')\n", (2578, 2588), False, 'import pytest\n'), ((2594, 2706), 'pytest.mark.skip_framework', 'pytest.mark.skip_framework', (['"""pytorch"""', '"""tensorflow1"""', '"""tensorflow2"""', '"""mxnet"""', '"""kerastf"""', '"""non_dl_frameworks"""'], {}), "('pytorch', 'tensorflow1', 'tensorflow2', 'mxnet',\n 'kerastf', 'non_dl_frameworks')\n", (2620, 2706), False, 'import pytest\n'), ((2999, 3032), 'pytest.mark.skip_module', 'pytest.mark.skip_module', (['"""lingvo"""'], {}), "('lingvo')\n", (3022, 3032), False, 'import pytest\n'), ((3038, 3150), 'pytest.mark.skip_framework', 'pytest.mark.skip_framework', (['"""pytorch"""', '"""tensorflow1"""', '"""tensorflow2"""', '"""mxnet"""', '"""kerastf"""', '"""non_dl_frameworks"""'], {}), "('pytorch', 'tensorflow1', 'tensorflow2', 'mxnet',\n 'kerastf', 'non_dl_frameworks')\n", (3064, 3150), False, 'import pytest\n'), ((4236, 4269), 'pytest.mark.skip_module', 'pytest.mark.skip_module', (['"""lingvo"""'], {}), "('lingvo')\n", (4259, 4269), False, 'import pytest\n'), ((4275, 4387), 'pytest.mark.skip_framework', 'pytest.mark.skip_framework', (['"""pytorch"""', '"""tensorflow1"""', '"""tensorflow2"""', '"""mxnet"""', '"""kerastf"""', '"""non_dl_frameworks"""'], {}), "('pytorch', 'tensorflow1', 'tensorflow2', 'mxnet',\n 'kerastf', 'non_dl_frameworks')\n", (4301, 4387), False, 'import pytest\n'), ((4932, 4965), 'pytest.mark.skip_module', 'pytest.mark.skip_module', (['"""lingvo"""'], {}), "('lingvo')\n", (4955, 4965), False, 'import pytest\n'), ((4971, 5083), 'pytest.mark.skip_framework', 'pytest.mark.skip_framework', (['"""pytorch"""', '"""tensorflow1"""', '"""tensorflow2"""', '"""mxnet"""', '"""kerastf"""', '"""non_dl_frameworks"""'], {}), "('pytorch', 'tensorflow1', 'tensorflow2', 'mxnet',\n 'kerastf', 'non_dl_frameworks')\n", (4997, 5083), False, 'import pytest\n'), ((5703, 5736), 'pytest.mark.skip_module', 'pytest.mark.skip_module', (['"""lingvo"""'], {}), "('lingvo')\n", (5726, 5736), False, 'import pytest\n'), ((5742, 5854), 'pytest.mark.skip_framework', 'pytest.mark.skip_framework', (['"""pytorch"""', '"""tensorflow1"""', '"""tensorflow2"""', '"""mxnet"""', '"""kerastf"""', '"""non_dl_frameworks"""'], {}), "('pytorch', 'tensorflow1', 'tensorflow2', 'mxnet',\n 'kerastf', 'non_dl_frameworks')\n", (5768, 5854), False, 'import pytest\n'), ((7044, 7077), 'pytest.mark.skip_module', 'pytest.mark.skip_module', (['"""lingvo"""'], {}), "('lingvo')\n", (7067, 7077), False, 'import pytest\n'), ((7083, 7195), 'pytest.mark.skip_framework', 'pytest.mark.skip_framework', (['"""pytorch"""', '"""tensorflow1"""', '"""tensorflow2"""', '"""mxnet"""', '"""kerastf"""', '"""non_dl_frameworks"""'], {}), "('pytorch', 'tensorflow1', 'tensorflow2', 'mxnet',\n 'kerastf', 'non_dl_frameworks')\n", (7109, 7195), False, 'import pytest\n'), ((7636, 7669), 'pytest.mark.skip_module', 'pytest.mark.skip_module', (['"""lingvo"""'], {}), "('lingvo')\n", (7659, 7669), False, 'import pytest\n'), ((7675, 7787), 'pytest.mark.skip_framework', 'pytest.mark.skip_framework', (['"""pytorch"""', '"""tensorflow1"""', '"""tensorflow2"""', '"""mxnet"""', '"""kerastf"""', '"""non_dl_frameworks"""'], {}), "('pytorch', 'tensorflow1', 'tensorflow2', 'mxnet',\n 'kerastf', 'non_dl_frameworks')\n", (7701, 7787), False, 'import pytest\n'), ((8560, 8593), 'pytest.mark.skip_module', 'pytest.mark.skip_module', (['"""lingvo"""'], {}), "('lingvo')\n", (8583, 8593), False, 'import pytest\n'), ((8599, 8711), 'pytest.mark.skip_framework', 'pytest.mark.skip_framework', (['"""pytorch"""', '"""tensorflow1"""', '"""tensorflow2"""', '"""mxnet"""', '"""kerastf"""', '"""non_dl_frameworks"""'], {}), "('pytorch', 'tensorflow1', 'tensorflow2', 'mxnet',\n 'kerastf', 'non_dl_frameworks')\n", (8625, 8711), False, 'import pytest\n'), ((8713, 8765), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""batch_mode"""', '[True, False]'], {}), "('batch_mode', [True, False])\n", (8736, 8765), False, 'import pytest\n'), ((11452, 11485), 'pytest.mark.skip_module', 'pytest.mark.skip_module', (['"""lingvo"""'], {}), "('lingvo')\n", (11475, 11485), False, 'import pytest\n'), ((11491, 11603), 'pytest.mark.skip_framework', 'pytest.mark.skip_framework', (['"""pytorch"""', '"""tensorflow1"""', '"""tensorflow2"""', '"""mxnet"""', '"""kerastf"""', '"""non_dl_frameworks"""'], {}), "('pytorch', 'tensorflow1', 'tensorflow2', 'mxnet',\n 'kerastf', 'non_dl_frameworks')\n", (11517, 11603), False, 'import pytest\n'), ((12311, 12344), 'pytest.mark.skip_module', 'pytest.mark.skip_module', (['"""lingvo"""'], {}), "('lingvo')\n", (12334, 12344), False, 'import pytest\n'), ((12350, 12462), 'pytest.mark.skip_framework', 'pytest.mark.skip_framework', (['"""pytorch"""', '"""tensorflow1"""', '"""tensorflow2"""', '"""mxnet"""', '"""kerastf"""', '"""non_dl_frameworks"""'], {}), "('pytorch', 'tensorflow1', 'tensorflow2', 'mxnet',\n 'kerastf', 'non_dl_frameworks')\n", (12376, 12462), False, 'import pytest\n'), ((12464, 12536), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Known issue that needs further investigation"""'}), "(reason='Known issue that needs further investigation')\n", (12481, 12536), False, 'import pytest\n'), ((2462, 2483), 'art.estimators.speech_recognition.tensorflow_lingvo.TensorFlowLingvoASR', 'TensorFlowLingvoASR', ([], {}), '()\n', (2481, 2483), False, 'from art.estimators.speech_recognition.tensorflow_lingvo import TensorFlowLingvoASR\n'), ((2820, 2841), 'art.estimators.speech_recognition.tensorflow_lingvo.TensorFlowLingvoASR', 'TensorFlowLingvoASR', ([], {}), '()\n', (2839, 2841), False, 'from art.estimators.speech_recognition.tensorflow_lingvo import TensorFlowLingvoASR\n'), ((2862, 2885), 'tensorflow.compat.v1.get_default_graph', 'tf1.get_default_graph', ([], {}), '()\n', (2883, 2885), True, 'import tensorflow.compat.v1 as tf1\n'), ((3379, 3420), 'numpy.array', 'np.array', (["(['DUMMY'] * test_input.shape[0])"], {}), "(['DUMMY'] * test_input.shape[0])\n", (3387, 3420), True, 'import numpy as np\n'), ((3443, 3464), 'art.estimators.speech_recognition.tensorflow_lingvo.TensorFlowLingvoASR', 'TensorFlowLingvoASR', ([], {}), '()\n', (3462, 3464), False, 'from art.estimators.speech_recognition.tensorflow_lingvo import TensorFlowLingvoASR\n'), ((4590, 4611), 'art.estimators.speech_recognition.tensorflow_lingvo.TensorFlowLingvoASR', 'TensorFlowLingvoASR', ([], {}), '()\n', (4609, 4611), False, 'from art.estimators.speech_recognition.tensorflow_lingvo import TensorFlowLingvoASR\n'), ((5300, 5348), 'numpy.array', 'np.array', (['[[True] + [False] * 479, [True] * 480]'], {}), '([[True] + [False] * 479, [True] * 480])\n', (5308, 5348), True, 'import numpy as np\n'), ((5375, 5413), 'numpy.array', 'np.array', (['[[1] + [0] * 479, [2] * 480]'], {}), '([[1] + [0] * 479, [2] * 480])\n', (5383, 5413), True, 'import numpy as np\n'), ((5436, 5457), 'art.estimators.speech_recognition.tensorflow_lingvo.TensorFlowLingvoASR', 'TensorFlowLingvoASR', ([], {}), '()\n', (5455, 5457), False, 'from art.estimators.speech_recognition.tensorflow_lingvo import TensorFlowLingvoASR\n'), ((5544, 5583), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['test_output', 'output'], {}), '(test_output, output)\n', (5562, 5583), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((5596, 5631), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['test_mask', 'mask'], {}), '(test_mask, mask)\n', (5614, 5631), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((6076, 6117), 'numpy.array', 'np.array', (["(['DUMMY'] * test_input.shape[0])"], {}), "(['DUMMY'] * test_input.shape[0])\n", (6084, 6117), True, 'import numpy as np\n'), ((6140, 6161), 'art.estimators.speech_recognition.tensorflow_lingvo.TensorFlowLingvoASR', 'TensorFlowLingvoASR', ([], {}), '()\n', (6159, 6161), False, 'from art.estimators.speech_recognition.tensorflow_lingvo import TensorFlowLingvoASR\n'), ((7364, 7385), 'art.estimators.speech_recognition.tensorflow_lingvo.TensorFlowLingvoASR', 'TensorFlowLingvoASR', ([], {}), '()\n', (7383, 7385), False, 'from art.estimators.speech_recognition.tensorflow_lingvo import TensorFlowLingvoASR\n'), ((8016, 8057), 'numpy.array', 'np.array', (["(['DUMMY'] * test_input.shape[0])"], {}), "(['DUMMY'] * test_input.shape[0])\n", (8024, 8057), True, 'import numpy as np\n'), ((8080, 8101), 'art.estimators.speech_recognition.tensorflow_lingvo.TensorFlowLingvoASR', 'TensorFlowLingvoASR', ([], {}), '()\n', (8099, 8101), False, 'from art.estimators.speech_recognition.tensorflow_lingvo import TensorFlowLingvoASR\n'), ((8971, 9017), 'numpy.array', 'np.array', (["['This', 'is', 'a dummy', 'a dummy']"], {}), "(['This', 'is', 'a dummy', 'a dummy'])\n", (8979, 9017), True, 'import numpy as np\n'), ((9040, 9061), 'art.estimators.speech_recognition.tensorflow_lingvo.TensorFlowLingvoASR', 'TensorFlowLingvoASR', ([], {}), '()\n', (9059, 9061), False, 'from art.estimators.speech_recognition.tensorflow_lingvo import TensorFlowLingvoASR\n'), ((9669, 9737), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['(gradients_abs_sum > 0)', '[False, True, True, True]'], {}), '(gradients_abs_sum > 0, [False, True, True, True])\n', (9687, 9737), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((12047, 12077), 'numpy.array', 'np.array', (['audios'], {'dtype': 'object'}), '(audios, dtype=object)\n', (12055, 12077), True, 'import numpy as np\n'), ((12100, 12121), 'art.estimators.speech_recognition.tensorflow_lingvo.TensorFlowLingvoASR', 'TensorFlowLingvoASR', ([], {}), '()\n', (12119, 12121), False, 'from art.estimators.speech_recognition.tensorflow_lingvo import TensorFlowLingvoASR\n'), ((12990, 13020), 'numpy.array', 'np.array', (['audios'], {'dtype': 'object'}), '(audios, dtype=object)\n', (12998, 13020), True, 'import numpy as np\n'), ((13048, 13069), 'numpy.array', 'np.array', (['transcripts'], {}), '(transcripts)\n', (13056, 13069), True, 'import numpy as np\n'), ((13092, 13113), 'art.estimators.speech_recognition.tensorflow_lingvo.TensorFlowLingvoASR', 'TensorFlowLingvoASR', ([], {}), '()\n', (13111, 13113), False, 'from art.estimators.speech_recognition.tensorflow_lingvo import TensorFlowLingvoASR\n'), ((13592, 13662), 'numpy.testing.assert_allclose', 'assert_allclose', (['gradient_sequence_sum', 'gradient_batch_sum'], {'rtol': '(1e-05)'}), '(gradient_sequence_sum, gradient_batch_sum, rtol=1e-05)\n', (13607, 13662), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((11849, 11882), 'art.utils.get_file', 'get_file', (['filename', "sample['uri']"], {}), "(filename, sample['uri'])\n", (11857, 11882), False, 'from art.utils import get_file\n'), ((11910, 11925), 'scipy.io.wavfile.read', 'read', (['file_path'], {}), '(file_path)\n', (11914, 11925), False, 'from scipy.io.wavfile import read\n'), ((12792, 12825), 'art.utils.get_file', 'get_file', (['filename', "sample['uri']"], {}), "(filename, sample['uri'])\n", (12800, 12825), False, 'from art.utils import get_file\n'), ((12853, 12868), 'scipy.io.wavfile.read', 'read', (['file_path'], {}), '(file_path)\n', (12857, 12868), False, 'from scipy.io.wavfile import read\n'), ((5225, 5238), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (5233, 5238), True, 'import numpy as np\n'), ((5240, 5259), 'numpy.array', 'np.array', (['([2] * 480)'], {}), '([2] * 480)\n', (5248, 5259), True, 'import numpy as np\n'), ((9589, 9609), 'numpy.abs', 'np.abs', (['gradients[2]'], {}), '(gradients[2])\n', (9595, 9609), True, 'import numpy as np\n'), ((9617, 9637), 'numpy.abs', 'np.abs', (['gradients[3]'], {}), '(gradients[3])\n', (9623, 9637), True, 'import numpy as np\n'), ((9323, 9332), 'numpy.abs', 'np.abs', (['g'], {}), '(g)\n', (9329, 9332), True, 'import numpy as np\n'), ((13340, 13350), 'numpy.abs', 'np.abs', (['gb'], {}), '(gb)\n', (13346, 13350), True, 'import numpy as np\n'), ((13444, 13454), 'numpy.abs', 'np.abs', (['gs'], {}), '(gs)\n', (13450, 13454), True, 'import numpy as np\n')] |
import os
import imageio
import numpy as np
from metrics.tests_utils_cy import time_func
from metrics.ms_ssim_m.ms_ssim import ms_ssim
from metrics.ms_ssim_m.ms_ssim_cy import ms_ssim as ms_ssim_cy
from metrics.ms_ssim_m.ms_ssim_tf import MS_SSIM_TF
image = imageio.imread(os.path.join(__file__, '../..', 'corgi.jpg')).astype(np.int32)
noisy = (image + np.random.uniform(-5, 5, image.shape)).astype(np.int32)
obj = MS_SSIM_TF()
image = np.broadcast_to(image, (4,) + image.shape)
noisy = np.broadcast_to(noisy, (4,) + noisy.shape)
time_func(ms_ssim, 'ms_ssim', image, noisy)
time_func(ms_ssim_cy, 'ms_ssim_cy cython', image, noisy)
time_func(obj.ms_ssim, 'ms_ssim_tf', image, noisy)
| [
"os.path.join",
"metrics.tests_utils_cy.time_func",
"numpy.random.uniform",
"numpy.broadcast_to",
"metrics.ms_ssim_m.ms_ssim_tf.MS_SSIM_TF"
] | [((418, 430), 'metrics.ms_ssim_m.ms_ssim_tf.MS_SSIM_TF', 'MS_SSIM_TF', ([], {}), '()\n', (428, 430), False, 'from metrics.ms_ssim_m.ms_ssim_tf import MS_SSIM_TF\n'), ((439, 481), 'numpy.broadcast_to', 'np.broadcast_to', (['image', '((4,) + image.shape)'], {}), '(image, (4,) + image.shape)\n', (454, 481), True, 'import numpy as np\n'), ((490, 532), 'numpy.broadcast_to', 'np.broadcast_to', (['noisy', '((4,) + noisy.shape)'], {}), '(noisy, (4,) + noisy.shape)\n', (505, 532), True, 'import numpy as np\n'), ((534, 577), 'metrics.tests_utils_cy.time_func', 'time_func', (['ms_ssim', '"""ms_ssim"""', 'image', 'noisy'], {}), "(ms_ssim, 'ms_ssim', image, noisy)\n", (543, 577), False, 'from metrics.tests_utils_cy import time_func\n'), ((578, 634), 'metrics.tests_utils_cy.time_func', 'time_func', (['ms_ssim_cy', '"""ms_ssim_cy cython"""', 'image', 'noisy'], {}), "(ms_ssim_cy, 'ms_ssim_cy cython', image, noisy)\n", (587, 634), False, 'from metrics.tests_utils_cy import time_func\n'), ((635, 685), 'metrics.tests_utils_cy.time_func', 'time_func', (['obj.ms_ssim', '"""ms_ssim_tf"""', 'image', 'noisy'], {}), "(obj.ms_ssim, 'ms_ssim_tf', image, noisy)\n", (644, 685), False, 'from metrics.tests_utils_cy import time_func\n'), ((276, 320), 'os.path.join', 'os.path.join', (['__file__', '"""../.."""', '"""corgi.jpg"""'], {}), "(__file__, '../..', 'corgi.jpg')\n", (288, 320), False, 'import os\n'), ((356, 393), 'numpy.random.uniform', 'np.random.uniform', (['(-5)', '(5)', 'image.shape'], {}), '(-5, 5, image.shape)\n', (373, 393), True, 'import numpy as np\n')] |
# Copyright 2019-2022 The <NAME> at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-spots/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# <EMAIL>
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for expectation maximization cluster visualization"""
from itertools import combinations
import numpy as np
from scipy.spatial import distance
from tensorflow.python.platform import test
from deepcell_spots.cluster_vis import ca_to_adjacency_matrix, jitter
class TestClusterVis(test.TestCase):
def test_jitter(self):
coords = np.zeros((10, 2))
size = 5
noisy_coords = jitter(coords, size)
self.assertEqual(np.shape(coords), np.shape(noisy_coords))
self.assertNotEqual(coords.all(), noisy_coords.all())
def test_ca_to_adjacency_matrix(self):
num_clusters = 10
num_annotators = 3
ca_matrix = np.ones((num_clusters, num_annotators))
A = ca_to_adjacency_matrix(ca_matrix)
self.assertEqual(np.shape(A)[0], np.shape(A)[1], ca_matrix[0])
if __name__ == '__main__':
test.main()
| [
"numpy.shape",
"numpy.ones",
"deepcell_spots.cluster_vis.ca_to_adjacency_matrix",
"numpy.zeros",
"deepcell_spots.cluster_vis.jitter",
"tensorflow.python.platform.test.main"
] | [((2101, 2112), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (2110, 2112), False, 'from tensorflow.python.platform import test\n'), ((1585, 1602), 'numpy.zeros', 'np.zeros', (['(10, 2)'], {}), '((10, 2))\n', (1593, 1602), True, 'import numpy as np\n'), ((1643, 1663), 'deepcell_spots.cluster_vis.jitter', 'jitter', (['coords', 'size'], {}), '(coords, size)\n', (1649, 1663), False, 'from deepcell_spots.cluster_vis import ca_to_adjacency_matrix, jitter\n'), ((1910, 1949), 'numpy.ones', 'np.ones', (['(num_clusters, num_annotators)'], {}), '((num_clusters, num_annotators))\n', (1917, 1949), True, 'import numpy as np\n'), ((1962, 1995), 'deepcell_spots.cluster_vis.ca_to_adjacency_matrix', 'ca_to_adjacency_matrix', (['ca_matrix'], {}), '(ca_matrix)\n', (1984, 1995), False, 'from deepcell_spots.cluster_vis import ca_to_adjacency_matrix, jitter\n'), ((1689, 1705), 'numpy.shape', 'np.shape', (['coords'], {}), '(coords)\n', (1697, 1705), True, 'import numpy as np\n'), ((1707, 1729), 'numpy.shape', 'np.shape', (['noisy_coords'], {}), '(noisy_coords)\n', (1715, 1729), True, 'import numpy as np\n'), ((2022, 2033), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (2030, 2033), True, 'import numpy as np\n'), ((2038, 2049), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (2046, 2049), True, 'import numpy as np\n')] |
import logging
import numpy as np
from ..Dataset import Dataset
def crop(jets, pileup=False):
#logging.warning("Cropping...")
if pileup:
logging.warning("pileup")
pt_min, pt_max, m_min, m_max = 300, 365, 150, 220
else:
pt_min, pt_max, m_min, m_max = 250, 300, 50, 110
good_jets = []
bad_jets = []
#good_indices = []
for i, j in enumerate(jets):
if pt_min < j.pt < pt_max and m_min < j.mass < m_max:
good_jets.append(j)
else:
bad_jets.append(j)
# Weights for flatness in pt
w = np.zeros(len(good_jets))
y_ = np.array([jet.y for jet in good_jets])
jets_0 = [jet for jet in good_jets if jet.y == 0]
pdf, edges = np.histogram([j.pt for j in jets_0], density=True, range=[pt_min, pt_max], bins=50)
pts = [j.pt for j in jets_0]
indices = np.searchsorted(edges, pts) - 1
inv_w = 1. / pdf[indices]
inv_w /= inv_w.sum()
#w[y_==0] = inv_w
for i, (iw, jet) in enumerate(zip(inv_w, good_jets)):
if jet.y == 0:
w[i] = iw
jets_1 = [jet for jet in good_jets if jet.y == 1]
pdf, edges = np.histogram([j.pt for j in jets_1], density=True, range=[pt_min, pt_max], bins=50)
pts = [j.pt for j in jets_1]
indices = np.searchsorted(edges, pts) - 1
inv_w = 1. / pdf[indices]
inv_w /= inv_w.sum()
#w[y_==1] = inv_w
for i, (iw, jet) in enumerate(zip(inv_w, good_jets)):
if jet.y == 1:
w[i] = iw
return good_jets, bad_jets, w
def crop_dataset(dataset):
logging.info(dataset.subproblem)
pileup = (dataset.subproblem == 'pileup')
good_jets, bad_jets, w = crop(dataset.jets, pileup)
cropped_dataset = Dataset(bad_jets)
new_dataset = Dataset(good_jets, w)
return new_dataset, cropped_dataset
| [
"numpy.histogram",
"numpy.searchsorted",
"logging.warning",
"numpy.array",
"logging.info"
] | [((615, 653), 'numpy.array', 'np.array', (['[jet.y for jet in good_jets]'], {}), '([jet.y for jet in good_jets])\n', (623, 653), True, 'import numpy as np\n'), ((726, 813), 'numpy.histogram', 'np.histogram', (['[j.pt for j in jets_0]'], {'density': '(True)', 'range': '[pt_min, pt_max]', 'bins': '(50)'}), '([j.pt for j in jets_0], density=True, range=[pt_min, pt_max],\n bins=50)\n', (738, 813), True, 'import numpy as np\n'), ((1141, 1228), 'numpy.histogram', 'np.histogram', (['[j.pt for j in jets_1]'], {'density': '(True)', 'range': '[pt_min, pt_max]', 'bins': '(50)'}), '([j.pt for j in jets_1], density=True, range=[pt_min, pt_max],\n bins=50)\n', (1153, 1228), True, 'import numpy as np\n'), ((1552, 1584), 'logging.info', 'logging.info', (['dataset.subproblem'], {}), '(dataset.subproblem)\n', (1564, 1584), False, 'import logging\n'), ((154, 179), 'logging.warning', 'logging.warning', (['"""pileup"""'], {}), "('pileup')\n", (169, 179), False, 'import logging\n'), ((857, 884), 'numpy.searchsorted', 'np.searchsorted', (['edges', 'pts'], {}), '(edges, pts)\n', (872, 884), True, 'import numpy as np\n'), ((1272, 1299), 'numpy.searchsorted', 'np.searchsorted', (['edges', 'pts'], {}), '(edges, pts)\n', (1287, 1299), True, 'import numpy as np\n')] |
from pathlib import Path
import numpy
import sys
from keras.layers import Dense
from keras.models import load_model, Sequential
from numpy import loadtxt
import pickle
import os
from sklearn.externals import joblib
featureVectorSize = 251
def wider_deep_model():
# create model
model = Sequential()
model.add(Dense(featureVectorSize+20, input_dim=featureVectorSize, kernel_initializer='normal', activation='relu'))
model.add(Dense(55, kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
# Compile model
model.compile(loss='mean_squared_error', optimizer='adam')
return model
def main():
#inputFile = loadtxt("planVectorsSGD2-kmeans-simword-opportuneWordcount.txt", comments="#", delimiter=" ", unpack=False)
currentDirPath =os.path.dirname(os.path.realpath(__file__))
dirPath = str(Path.home())
model="nn"
if (len(sys.argv)>=2):
model = sys.argv[1]
if (len(sys.argv)>=3):
inputFile = loadtxt(sys.argv[2], comments="#", delimiter=" ", unpack=False)
else:
inputFile = loadtxt(os.path.join(dirPath,".rheem","mlModelVectors.txt"), comments="#", delimiter=" ", unpack=False)
#size = 146;
#start = 13;
#size = 213
size=251
start = 0
dimInputFile = inputFile.ndim
if(dimInputFile==1):
inputFile = numpy.reshape(inputFile, (-1,inputFile.size))
x_test = inputFile[:,0:size]
y_test = inputFile[start:,size]
# x_train = inputFile[:,0:size]
# y_train = inputFile[:,size]
#
# x_test = inputFile[:,0:size]
# y_test = inputFile[:,size]
# load the model from disk
if(model=="forest"):
# load the model from disk
filename = os.path.join(currentDirPath, "model-forest.sav")
print("Loading model: "+filename)
model = pickle.load(open(filename, 'rb'))
elif(model=="nn"):
filename = os.path.join(currentDirPath,'nn.pkl')
print("Loading model: "+filename)
# Load the pipeline first:
model = joblib.load(filename)
# Then, load the Keras model:
model.named_steps['mlp'].model = load_model(os.path.join(currentDirPath,'keras_model.h5'))
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
#kfold = KFold(n_splits=10, random_state=seed)
#results = cross_val_score(regr, x_train, y_train, cv=kfold)
#accuracy_score(prediction,y_train)
#print("Results: %.2f (%.2f) MSE" % (results.mean(), results.std()))
prediction = model.predict(x_test)
# for num in range(1,min([34,len(x_test)])):
# if num % 2 == 0:
# print("estimated time for " + str(x_test[num][size-2]) + "-" + str(x_test[num][size-1]) + " in java : " + str(
# prediction[num]) + "(real " + str(y_test[num]) + ")")
# else:
# print("estimated time for " + str(x_test[num][size-2]) + "-" + str(x_test[num][size-1]) + " in spark : " + str(
# prediction[num]) + "(real " + str(y_test[num]) + ")")
# print results to text
if (len(sys.argv) >= 4):
saveLocation = loadtxt(sys.argv[3], comments="#", delimiter=" ", unpack=False)
else:
saveLocation = os.path.join(dirPath, ".rheem", "estimates.txt")
# delete first
if(os._exists(saveLocation)):
os.remove(saveLocation)
text_file = open(saveLocation, "w")
# print estimates
dimResults = prediction.ndim
if (dimResults == 0):
text_file.write("%d" % prediction)
text_file.write("\n")
else:
for num in range(0, prediction.size):
t = prediction[num]
text_file.write("%d" % prediction[num])
text_file.write("\n")
text_file.close()
print("estimation done!")
if __name__ == "__main__":
main()
| [
"numpy.reshape",
"os._exists",
"pathlib.Path.home",
"sklearn.externals.joblib.load",
"os.path.join",
"keras.models.Sequential",
"os.path.realpath",
"numpy.random.seed",
"keras.layers.Dense",
"numpy.loadtxt",
"os.remove"
] | [((299, 311), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (309, 311), False, 'from keras.models import load_model, Sequential\n'), ((2280, 2303), 'numpy.random.seed', 'numpy.random.seed', (['seed'], {}), '(seed)\n', (2297, 2303), False, 'import numpy\n'), ((3314, 3338), 'os._exists', 'os._exists', (['saveLocation'], {}), '(saveLocation)\n', (3324, 3338), False, 'import os\n'), ((326, 436), 'keras.layers.Dense', 'Dense', (['(featureVectorSize + 20)'], {'input_dim': 'featureVectorSize', 'kernel_initializer': '"""normal"""', 'activation': '"""relu"""'}), "(featureVectorSize + 20, input_dim=featureVectorSize,\n kernel_initializer='normal', activation='relu')\n", (331, 436), False, 'from keras.layers import Dense\n'), ((446, 503), 'keras.layers.Dense', 'Dense', (['(55)'], {'kernel_initializer': '"""normal"""', 'activation': '"""relu"""'}), "(55, kernel_initializer='normal', activation='relu')\n", (451, 503), False, 'from keras.layers import Dense\n'), ((519, 556), 'keras.layers.Dense', 'Dense', (['(1)'], {'kernel_initializer': '"""normal"""'}), "(1, kernel_initializer='normal')\n", (524, 556), False, 'from keras.layers import Dense\n'), ((834, 860), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (850, 860), False, 'import os\n'), ((881, 892), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (890, 892), False, 'from pathlib import Path\n'), ((1014, 1077), 'numpy.loadtxt', 'loadtxt', (['sys.argv[2]'], {'comments': '"""#"""', 'delimiter': '""" """', 'unpack': '(False)'}), "(sys.argv[2], comments='#', delimiter=' ', unpack=False)\n", (1021, 1077), False, 'from numpy import loadtxt\n'), ((1370, 1416), 'numpy.reshape', 'numpy.reshape', (['inputFile', '(-1, inputFile.size)'], {}), '(inputFile, (-1, inputFile.size))\n', (1383, 1416), False, 'import numpy\n'), ((1743, 1791), 'os.path.join', 'os.path.join', (['currentDirPath', '"""model-forest.sav"""'], {}), "(currentDirPath, 'model-forest.sav')\n", (1755, 1791), False, 'import os\n'), ((3141, 3204), 'numpy.loadtxt', 'loadtxt', (['sys.argv[3]'], {'comments': '"""#"""', 'delimiter': '""" """', 'unpack': '(False)'}), "(sys.argv[3], comments='#', delimiter=' ', unpack=False)\n", (3148, 3204), False, 'from numpy import loadtxt\n'), ((3238, 3286), 'os.path.join', 'os.path.join', (['dirPath', '""".rheem"""', '"""estimates.txt"""'], {}), "(dirPath, '.rheem', 'estimates.txt')\n", (3250, 3286), False, 'import os\n'), ((3349, 3372), 'os.remove', 'os.remove', (['saveLocation'], {}), '(saveLocation)\n', (3358, 3372), False, 'import os\n'), ((1116, 1169), 'os.path.join', 'os.path.join', (['dirPath', '""".rheem"""', '"""mlModelVectors.txt"""'], {}), "(dirPath, '.rheem', 'mlModelVectors.txt')\n", (1128, 1169), False, 'import os\n'), ((1926, 1964), 'os.path.join', 'os.path.join', (['currentDirPath', '"""nn.pkl"""'], {}), "(currentDirPath, 'nn.pkl')\n", (1938, 1964), False, 'import os\n'), ((2057, 2078), 'sklearn.externals.joblib.load', 'joblib.load', (['filename'], {}), '(filename)\n', (2068, 2078), False, 'from sklearn.externals import joblib\n'), ((2170, 2216), 'os.path.join', 'os.path.join', (['currentDirPath', '"""keras_model.h5"""'], {}), "(currentDirPath, 'keras_model.h5')\n", (2182, 2216), False, 'import os\n')] |
import matplotlib.pyplot as plt
import numpy as np
from brancher.variables import RootVariable, RandomVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable, DeterministicVariable, MultivariateNormalVariable
from brancher import inference
import brancher.functions as BF
N_itr = 250
N_smpl = 1000
optimizer = "SGD"
lr = 0.001 #0.0001
# Probabilistic model #
T = 30
dt = 0.1
driving_noise = 0.1
measure_noise = 0.15
x0 = NormalVariable(0., driving_noise, 'x0')
y0 = NormalVariable(x0, measure_noise, 'y0')
x = [x0]
y = [y0]
x_names = ["x0"]
y_names = ["y0"]
y_range = [t for t in range(T) if (t < 0 or t > 28)]
for t in range(1, T):
x_names.append("x{}".format(t))
x.append(NormalVariable(x[t - 1], np.sqrt(dt)*driving_noise, x_names[t]))
if t in y_range:
y_name = "y{}".format(t)
y_names.append(y_name)
y.append(NormalVariable(x[t], measure_noise, y_name))
AR_model = ProbabilisticModel(x + y)
# Generate data #
data = AR_model._get_sample(number_samples=1)
time_series = [float(data[yt].data) for yt in y]
ground_truth = [float(data[xt].data) for xt in x]
#true_b = data[b].data
#print("The true coefficient is: {}".format(float(true_b)))
# Observe data #
[yt.observe(data[yt][:, 0, :]) for yt in y]
# get time series
#plt.plot([data[xt][:, 0, :] for xt in x])
#plt.scatter(y_range, time_series, c="k")
#plt.show()
# Structured variational distribution #
Qx = [NormalVariable(0., 1., 'x0', learnable=True)]
Qx_mean = [RootVariable(0., 'x0_mean', learnable=True)]
Qlambda = [RootVariable(-1., 'x0_lambda', learnable=True)]
for t in range(1, T):
if t in y_range:
l = 0.
else:
l = 1.
Qx_mean.append(RootVariable(0, x_names[t] + "_mean", learnable=True))
Qlambda.append(RootVariable(l, x_names[t] + "_lambda", learnable=True))
Qx.append(NormalVariable(BF.sigmoid(Qlambda[t])*Qx[t - 1] + (1 - BF.sigmoid(Qlambda[t]))*Qx_mean[t],
np.sqrt(dt)*driving_noise, x_names[t], learnable=True))
variational_posterior = ProbabilisticModel(Qx)
AR_model.set_posterior_model(variational_posterior)
# Inference #
inference.perform_inference(AR_model,
number_iterations=N_itr,
number_samples=N_smpl,
optimizer=optimizer,
lr=lr)
loss_list1 = AR_model.diagnostics["loss curve"]
samples_PE = AR_model.posterior_model.get_sample(1000)
#
# # Plot posterior
# #from brancher.visualizations import plot_density
# #plot_density(AR_model.posterior_model, variables=["x0", "x1"])
#
# # ELBO
# N_ELBO_smpl = 5000
# ELBO1 = AR_model.estimate_log_model_evidence(N_ELBO_smpl)
# print("PE: {}".format(ELBO1))
#
# # Statistics
# posterior_samples1 = AR_model._get_posterior_sample(2000)
# #b_posterior_samples1 = posterior_samples1[b].detach().numpy().flatten()
# #b_mean1 = np.mean(b_posterior_samples1)
# #b_sd1 = np.sqrt(np.var(b_posterior_samples1))
#
# x_mean1 = []
# lower_bound1 = []
# upper_bound1 = []
# for xt in x:
# x_posterior_samples1 = posterior_samples1[xt].detach().numpy().flatten()
# mean1 = np.mean(x_posterior_samples1)
# sd1 = np.sqrt(np.var(x_posterior_samples1))
# x_mean1.append(mean1)
# lower_bound1.append(mean1 - sd1)
# upper_bound1.append(mean1 + sd1)
# #print("The estimated coefficient is: {} +- {}".format(b_mean1, b_sd1))
# # Two subplots, unpack the axes array immediately
# f, (ax1, ax2, ax3) = plt.subplots(1, 3)
# ax1.plot(range(T), x_mean1, color="b", label="PE")
# ax1.scatter(y_range, time_series, color="k")
# ax1.plot(range(T), ground_truth, color="k", ls ="--", lw=1.5)
# ax1.fill_between(range(T), lower_bound1, upper_bound1, color="b", alpha=0.25)
# ax1.set_title("Time series")
# ax2.plot(np.array(loss_list1), color="b")
# ax2.set_title("Convergence")
# ax2.set_xlabel("Iteration")
# ax3.hist(b_posterior_samples1, 25, color="b", alpha=0.25)
# ax3.set_title("Posterior samples (b)")
# ax3.set_xlim(0, 1)
# plt.show()
# Mean-field variational distribution #
#Qb = BetaVariable(8., 1., "b", learnable=True)
Qx = [NormalVariable(0., 1., 'x0', learnable=True)]
for t in range(1, T):
Qx.append(NormalVariable(0, 2., x_names[t], learnable=True))
variational_posterior = ProbabilisticModel(Qx)
AR_model.set_posterior_model(variational_posterior)
# Inference #
inference.perform_inference(AR_model,
number_iterations=N_itr,
number_samples=N_smpl,
optimizer=optimizer,
lr=lr)
loss_list2 = AR_model.diagnostics["loss curve"]
#Plot posterior
from brancher.visualizations import plot_density
plot_density(AR_model.posterior_model, variables=["x0", "x1"])
# ELBO
ELBO2 = AR_model.estimate_log_model_evidence(N_ELBO_smpl)
print("MF: {}".format(ELBO2))
#
# samples_MF = AR_model.posterior_model.get_sample(1000)
#
# # Statistics
# posterior_samples2 = AR_model._get_posterior_sample(2000)
# #b_posterior_samples2 = posterior_samples2[b].detach().numpy().flatten()
# #b_mean2 = np.mean(b_posterior_samples2)
# #b_sd2 = np.sqrt(np.var(b_posterior_samples2))
#
# x_mean2 = []
# lower_bound2 = []
# upper_bound2 = []
# for xt in x:
# x_posterior_samples2 = posterior_samples2[xt].detach().numpy().flatten()
# mean2 = np.mean(x_posterior_samples2)
# sd2 = np.sqrt(np.var(x_posterior_samples2))
# x_mean2.append(mean2)
# lower_bound2.append(mean2 - sd2)
# upper_bound2.append(mean2 + sd2)
#print("The estimated coefficient is: {} +- {}".format(b_mean2, b_sd2))
# # Multivariate normal variational distribution #
# QV = MultivariateNormalVariable(loc=np.zeros((T,)),
# covariance_matrix=2*np.identity(T),
# learnable=True)
# #Qb = BetaVariable(8., 1., "b", learnable=True)
# Qx = [NormalVariable(QV[0], 0.1, 'x0', learnable=True)]
#
# for t in range(1, T):
# Qx.append(NormalVariable(QV[t], 0.1, x_names[t], learnable=True))
# variational_posterior = ProbabilisticModel(Qx)
# AR_model.set_posterior_model(variational_posterior)
#
# # Inference #
# inference.perform_inference(AR_model,
# number_iterations=N_itr,
# number_samples=N_smpl,
# optimizer=optimizer,
# lr=lr)
#
# loss_list3 = AR_model.diagnostics["loss curve"]
#
# # Plot posterior
# #plot_density(AR_model.posterior_model, variables=["x0", "x1"])
#
# # ELBO
# ELBO3 = AR_model.estimate_log_model_evidence(N_ELBO_smpl)
# print("MN: {}".format(ELBO3))
#
# samples_MN = AR_model.posterior_model.get_sample(1000)
#
# # Statistics
# posterior_samples3 = AR_model._get_posterior_sample(2000)
# #b_posterior_samples3 = posterior_samples3[b].detach().numpy().flatten()
# #b_mean3 = np.mean(b_posterior_samples3)
# #b_sd3 = np.sqrt(np.var(b_posterior_samples3))
#
# x_mean3 = []
# lower_bound3 = []
# upper_bound3 = []
# for xt in x:
# x_posterior_samples3 = posterior_samples3[xt].detach().numpy().flatten()
# mean3 = np.mean(x_posterior_samples3)
# sd3 = np.sqrt(np.var(x_posterior_samples3))
# x_mean3.append(mean3)
# lower_bound3.append(mean3 - sd3)
# upper_bound3.append(mean3 + sd3)
# #print("The estimated coefficient is: {} +- {}".format(b_mean3, b_sd3))
#
# # Structured NN distribution #
# latent_size = 10
# hidden_size = 10
# #Qb = BetaVariable(8., 1., "b", learnable=True)
# Qepsilon = NormalVariable(np.zeros((10,1)), np.ones((10,)), 'epsilon', learnable=True)
# W1 = RootVariable(np.random.normal(0, 0.1, (hidden_size, latent_size)), "W1", learnable=True)
# W2 = RootVariable(np.random.normal(0, 0.1, (T, hidden_size)), "W2", learnable=True)
# pre_x = BF.matmul(W2, BF.sigmoid(BF.matmul(W1, Qepsilon)))
# Qx = []
# for t in range(0, T):
# pre_x_t = DeterministicVariable(pre_x[t], "x{}_mean".format(t), learnable=True)
# Qx.append(NormalVariable(pre_x_t, 1., x_names[t], learnable=True))
# variational_posterior = ProbabilisticModel(Qx)
# AR_model.set_posterior_model(variational_posterior)
#
# # Inference #
# inference.perform_inference(AR_model,
# number_iterations=N_itr,
# number_samples=N_smpl,
# optimizer=optimizer,
# lr=lr)
#
# loss_list4 = AR_model.diagnostics["loss curve"]
#plot_density(AR_model.posterior_model, variables=["x0", "x1"])
#plt.show()
## ELBO
#ELBO4 = AR_model.estimate_log_model_evidence(N_ELBO_smpl)
#print("NN: {}".format(ELBO4))
#
# samples_NN = AR_model.posterior_model.get_sample(1000)
#
# # Statistics
# posterior_samples4 = AR_model._get_posterior_sample(2000)
# #b_posterior_samples4 = posterior_samples4[b].detach().numpy().flatten()
# #b_mean4 = np.mean(b_posterior_samples4)
# #b_sd4 = np.sqrt(np.var(b_posterior_samples2))
#
# x_mean4 = []
# lower_bound4 = []
# upper_bound4 = []
# for xt in x:
# x_posterior_samples4 = posterior_samples4[xt].detach().numpy().flatten()
# mean4 = np.mean(x_posterior_samples4)
# sd4 = np.sqrt(np.var(x_posterior_samples4))
# x_mean4.append(mean4)
# lower_bound4.append(mean4 - sd4)
# upper_bound4.append(mean4 + sd4)
# #print("The estimated coefficient is: {} +- {}".format(b_mean4, b_sd4))
#
# # Densities
# from brancher.visualizations import plot_multiple_samples
# #plot_multiple_samples([samples_PE, samples_MF, samples_NN], variables=["x0", "x1"], labels=["PE","MF", "NN"])
# #plot_multiple_samples([samples_PE, samples_MF, samples_MN, samples_NN], variables=["x0", "x1"], labels=["PE","MF", "MN", "NN"])
# plot_multiple_samples([samples_PE, samples_NN], variables=["x0", "x1"], labels=["PE", "NN"])
# plt.show()
#
# # Two subplots, unpack the axes array immediately
# f, (ax1, ax2) = plt.subplots(1, 2)
# ax1.plot(range(T), x_mean1, color="b", label="PE")
# ax1.plot(range(T), x_mean2, color="r", label="MF")
# ax1.plot(range(T), x_mean3, color="g", label="MV")
# ax1.plot(range(T), x_mean4, color="m", label="NN")
# #ax1.scatter(y_range, time_series, color="k")
# ax1.plot(range(T), ground_truth, color="k", ls ="--", lw=1.5)
# ax1.fill_between(range(T), lower_bound1, upper_bound1, color="b", alpha=0.25)
# ax1.fill_between(range(T), lower_bound2, upper_bound2, color="r", alpha=0.25)
# ax1.fill_between(range(T), lower_bound3, upper_bound3, color="g", alpha=0.25)
# ax1.fill_between(range(T), lower_bound4, upper_bound4, color="m", alpha=0.25)
# ax1.set_title("Time series")
# ax2.plot(np.array(loss_list1), color="b")
# ax2.plot(np.array(loss_list2), color="r")
# ax2.plot(np.array(loss_list3), color="g")
# ax2.plot(np.array(loss_list4), color="m")
# ax2.set_title("Convergence")
# ax2.set_xlabel("Iteration")
# plt.show()
| [
"brancher.visualizations.plot_density",
"numpy.sqrt",
"brancher.functions.sigmoid",
"brancher.variables.ProbabilisticModel",
"brancher.variables.RootVariable",
"brancher.standard_variables.NormalVariable",
"brancher.inference.perform_inference"
] | [((452, 492), 'brancher.standard_variables.NormalVariable', 'NormalVariable', (['(0.0)', 'driving_noise', '"""x0"""'], {}), "(0.0, driving_noise, 'x0')\n", (466, 492), False, 'from brancher.standard_variables import NormalVariable, DeterministicVariable, MultivariateNormalVariable\n'), ((497, 536), 'brancher.standard_variables.NormalVariable', 'NormalVariable', (['x0', 'measure_noise', '"""y0"""'], {}), "(x0, measure_noise, 'y0')\n", (511, 536), False, 'from brancher.standard_variables import NormalVariable, DeterministicVariable, MultivariateNormalVariable\n'), ((937, 962), 'brancher.variables.ProbabilisticModel', 'ProbabilisticModel', (['(x + y)'], {}), '(x + y)\n', (955, 962), False, 'from brancher.variables import RootVariable, RandomVariable, ProbabilisticModel\n'), ((2045, 2067), 'brancher.variables.ProbabilisticModel', 'ProbabilisticModel', (['Qx'], {}), '(Qx)\n', (2063, 2067), False, 'from brancher.variables import RootVariable, RandomVariable, ProbabilisticModel\n'), ((2135, 2252), 'brancher.inference.perform_inference', 'inference.perform_inference', (['AR_model'], {'number_iterations': 'N_itr', 'number_samples': 'N_smpl', 'optimizer': 'optimizer', 'lr': 'lr'}), '(AR_model, number_iterations=N_itr,\n number_samples=N_smpl, optimizer=optimizer, lr=lr)\n', (2162, 2252), False, 'from brancher import inference\n'), ((4261, 4283), 'brancher.variables.ProbabilisticModel', 'ProbabilisticModel', (['Qx'], {}), '(Qx)\n', (4279, 4283), False, 'from brancher.variables import RootVariable, RandomVariable, ProbabilisticModel\n'), ((4351, 4468), 'brancher.inference.perform_inference', 'inference.perform_inference', (['AR_model'], {'number_iterations': 'N_itr', 'number_samples': 'N_smpl', 'optimizer': 'optimizer', 'lr': 'lr'}), '(AR_model, number_iterations=N_itr,\n number_samples=N_smpl, optimizer=optimizer, lr=lr)\n', (4378, 4468), False, 'from brancher import inference\n'), ((4692, 4754), 'brancher.visualizations.plot_density', 'plot_density', (['AR_model.posterior_model'], {'variables': "['x0', 'x1']"}), "(AR_model.posterior_model, variables=['x0', 'x1'])\n", (4704, 4754), False, 'from brancher.visualizations import plot_density\n'), ((1435, 1481), 'brancher.standard_variables.NormalVariable', 'NormalVariable', (['(0.0)', '(1.0)', '"""x0"""'], {'learnable': '(True)'}), "(0.0, 1.0, 'x0', learnable=True)\n", (1449, 1481), False, 'from brancher.standard_variables import NormalVariable, DeterministicVariable, MultivariateNormalVariable\n'), ((1492, 1536), 'brancher.variables.RootVariable', 'RootVariable', (['(0.0)', '"""x0_mean"""'], {'learnable': '(True)'}), "(0.0, 'x0_mean', learnable=True)\n", (1504, 1536), False, 'from brancher.variables import RootVariable, RandomVariable, ProbabilisticModel\n'), ((1548, 1595), 'brancher.variables.RootVariable', 'RootVariable', (['(-1.0)', '"""x0_lambda"""'], {'learnable': '(True)'}), "(-1.0, 'x0_lambda', learnable=True)\n", (1560, 1595), False, 'from brancher.variables import RootVariable, RandomVariable, ProbabilisticModel\n'), ((4103, 4149), 'brancher.standard_variables.NormalVariable', 'NormalVariable', (['(0.0)', '(1.0)', '"""x0"""'], {'learnable': '(True)'}), "(0.0, 1.0, 'x0', learnable=True)\n", (4117, 4149), False, 'from brancher.standard_variables import NormalVariable, DeterministicVariable, MultivariateNormalVariable\n'), ((1700, 1753), 'brancher.variables.RootVariable', 'RootVariable', (['(0)', "(x_names[t] + '_mean')"], {'learnable': '(True)'}), "(0, x_names[t] + '_mean', learnable=True)\n", (1712, 1753), False, 'from brancher.variables import RootVariable, RandomVariable, ProbabilisticModel\n'), ((1774, 1829), 'brancher.variables.RootVariable', 'RootVariable', (['l', "(x_names[t] + '_lambda')"], {'learnable': '(True)'}), "(l, x_names[t] + '_lambda', learnable=True)\n", (1786, 1829), False, 'from brancher.variables import RootVariable, RandomVariable, ProbabilisticModel\n'), ((4186, 4236), 'brancher.standard_variables.NormalVariable', 'NormalVariable', (['(0)', '(2.0)', 'x_names[t]'], {'learnable': '(True)'}), '(0, 2.0, x_names[t], learnable=True)\n', (4200, 4236), False, 'from brancher.standard_variables import NormalVariable, DeterministicVariable, MultivariateNormalVariable\n'), ((881, 924), 'brancher.standard_variables.NormalVariable', 'NormalVariable', (['x[t]', 'measure_noise', 'y_name'], {}), '(x[t], measure_noise, y_name)\n', (895, 924), False, 'from brancher.standard_variables import NormalVariable, DeterministicVariable, MultivariateNormalVariable\n'), ((739, 750), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (746, 750), True, 'import numpy as np\n'), ((1965, 1976), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (1972, 1976), True, 'import numpy as np\n'), ((1860, 1882), 'brancher.functions.sigmoid', 'BF.sigmoid', (['Qlambda[t]'], {}), '(Qlambda[t])\n', (1870, 1882), True, 'import brancher.functions as BF\n'), ((1900, 1922), 'brancher.functions.sigmoid', 'BF.sigmoid', (['Qlambda[t]'], {}), '(Qlambda[t])\n', (1910, 1922), True, 'import brancher.functions as BF\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 22})
import math
plt.rcParams.update({'font.size': 22})
name = "Pl"
data = pd.read_csv(name + ".csv", names=["l", "P"])
K = 0.2
data["P1"] = 9.80665 * K * data["P"]
X = data["l"].values
sigma_X = 0.4
Y = data["P"].values
sigma_Y = 0.7
A = np.vstack([X[1:], np.ones(len(X[1:]))]).T
k, b = np.linalg.lstsq(A, Y[1:], rcond=None)[0]
fig = plt.figure(figsize=(12, 7))
ax = fig.gca()
plt.scatter(X, Y, marker=".")
plt.errorbar(X, Y, xerr=sigma_X, yerr=sigma_Y, linestyle="None")
delta_x = (X.max() - X.min()) / len(X)
delta_y = (Y.max() - Y.min()) / len(Y)
ax.set_xlim(X.min() - delta_x/2, X.max() + delta_x/2)
ax.set_ylim((Y.min() - delta_y/2), Y.max() + delta_y/2)
plt.xlabel("$l, мм$")
plt.ylabel("$P, 10^{-3} Па$")
plt.plot(X, (k*X + b), 'r', label='Fitted line')
plt.grid(True)
plt.savefig("./" + name + ".png")
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.errorbar",
"numpy.linalg.ls... | [((71, 109), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 22}"], {}), "({'font.size': 22})\n", (90, 109), True, 'import matplotlib.pyplot as plt\n'), ((122, 160), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 22}"], {}), "({'font.size': 22})\n", (141, 160), True, 'import matplotlib.pyplot as plt\n'), ((180, 224), 'pandas.read_csv', 'pd.read_csv', (["(name + '.csv')"], {'names': "['l', 'P']"}), "(name + '.csv', names=['l', 'P'])\n", (191, 224), True, 'import pandas as pd\n'), ((440, 467), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (450, 467), True, 'import matplotlib.pyplot as plt\n'), ((483, 512), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'Y'], {'marker': '"""."""'}), "(X, Y, marker='.')\n", (494, 512), True, 'import matplotlib.pyplot as plt\n'), ((513, 577), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['X', 'Y'], {'xerr': 'sigma_X', 'yerr': 'sigma_Y', 'linestyle': '"""None"""'}), "(X, Y, xerr=sigma_X, yerr=sigma_Y, linestyle='None')\n", (525, 577), True, 'import matplotlib.pyplot as plt\n'), ((766, 787), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$l, мм$"""'], {}), "('$l, мм$')\n", (776, 787), True, 'import matplotlib.pyplot as plt\n'), ((788, 817), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$P, 10^{-3} Па$"""'], {}), "('$P, 10^{-3} Па$')\n", (798, 817), True, 'import matplotlib.pyplot as plt\n'), ((818, 866), 'matplotlib.pyplot.plot', 'plt.plot', (['X', '(k * X + b)', '"""r"""'], {'label': '"""Fitted line"""'}), "(X, k * X + b, 'r', label='Fitted line')\n", (826, 866), True, 'import matplotlib.pyplot as plt\n'), ((867, 881), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (875, 881), True, 'import matplotlib.pyplot as plt\n'), ((882, 915), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + name + '.png')"], {}), "('./' + name + '.png')\n", (893, 915), True, 'import matplotlib.pyplot as plt\n'), ((393, 430), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'Y[1:]'], {'rcond': 'None'}), '(A, Y[1:], rcond=None)\n', (408, 430), True, 'import numpy as np\n')] |
import numpy as np
from pandas import read_csv
# an example of asia bayesian net:
# https://www.eecis.udel.edu/~shatkay/Course/papers/Lauritzen1988.pdf
class BayesianNet(object):
def __init__(self, names, edges, tables=None):
self.n_nodes = len(names)
if tables is None:
tables = [[0]] * self.n_nodes
self.nodes = [{'name': name, 'table': np.array(
table)} for name, table in zip(names, tables)]
self.name2idx = {k: v for v, k in enumerate(names)}
self.graph = np.zeros((self.n_nodes, self.n_nodes))
for edge in edges:
self.graph[self.name2idx[edge[1]], self.name2idx[edge[0]]] = 1
self.binary = np.array(
[1 << self.n_nodes - 1 - i for i in range(self.n_nodes)])
def fit(self, data):
data_size = len(data)
for i, node in enumerate(self.nodes):
table = []
parents = self.graph[i] == 1
marginal = data[:, parents]
index = np.zeros(data.shape[0])
if marginal.shape[1] > 0:
index = (
marginal * self.binary[-marginal.shape[1]:]).sum(axis=1)
for j in range(2**parents.sum()):
table.append(data[(index == j), i].sum() / (index == j).sum())
node['table'] = np.array(table)
def joint_p(self, values):
p = 1
for i in range(self.n_nodes):
index = 0
parents = self.graph[i] == 1
if parents.sum() > 0:
index = np.dot(values[parents], self.binary[-parents.sum():])
p *= (1 - values[i]) + (2 * values[i] - 1) * \
self.nodes[i]['table'][int(index)]
return p
def marginal_p(self, condition):
p = 0
values = -np.ones(self.n_nodes)
for v in condition:
values[self.name2idx[v[1]]] = int(v[0] != '~')
mask = np.arange(self.n_nodes)[(values == -1)]
n_unkowns = self.n_nodes - len(condition)
for i in range(2**n_unkowns):
values[mask] = np.array(
[int(x) for x in '{:0{size}b}'.format(i, size=n_unkowns)])
p += self.joint_p(values)
return p
def query(self, v, condition):
p_pos = self.marginal_p([f'+{v}'] + condition) / self.marginal_p(condition)
return [1 - p_pos, p_pos]
def get_asia_data(url):
return read_csv(url).apply(lambda x: x == 'yes').astype(int).values
def main():
names = 'ATSLBEXD'
edges = ['AT', 'SL', 'SB', 'TE', 'LE', 'BD', 'EX', 'ED']
#tables = [[0.01], [0.01, 0.05], [0.5], [0.01, 0.1], [0.3, 0.6], [0, 1, 1, 1], [0.05, 0.98], [0.1, 0.7, 0.8, 0.9]]
# also can use predefined conditional tables
bn = BayesianNet(list(names), edges)
asia_url = 'http://www.ccd.pitt.edu/wiki/images/ASIA10k.csv'
bn.fit(get_asia_data(asia_url))
print(bn.nodes)
for condition in [[], ['+A', '~S'], ['+A', '~S', '~D', '+X']]:
for c in ['T', 'L', 'B', 'E']:
print('p({}|{})={}'.format(c, ','.join(
condition), bn.query(c, condition)))
if __name__ == "__main__":
main()
| [
"numpy.ones",
"pandas.read_csv",
"numpy.array",
"numpy.zeros",
"numpy.arange"
] | [((532, 570), 'numpy.zeros', 'np.zeros', (['(self.n_nodes, self.n_nodes)'], {}), '((self.n_nodes, self.n_nodes))\n', (540, 570), True, 'import numpy as np\n'), ((1001, 1024), 'numpy.zeros', 'np.zeros', (['data.shape[0]'], {}), '(data.shape[0])\n', (1009, 1024), True, 'import numpy as np\n'), ((1319, 1334), 'numpy.array', 'np.array', (['table'], {}), '(table)\n', (1327, 1334), True, 'import numpy as np\n'), ((1791, 1812), 'numpy.ones', 'np.ones', (['self.n_nodes'], {}), '(self.n_nodes)\n', (1798, 1812), True, 'import numpy as np\n'), ((1915, 1938), 'numpy.arange', 'np.arange', (['self.n_nodes'], {}), '(self.n_nodes)\n', (1924, 1938), True, 'import numpy as np\n'), ((382, 397), 'numpy.array', 'np.array', (['table'], {}), '(table)\n', (390, 397), True, 'import numpy as np\n'), ((2401, 2414), 'pandas.read_csv', 'read_csv', (['url'], {}), '(url)\n', (2409, 2414), False, 'from pandas import read_csv\n')] |
# -*- coding: utf-8 -*-
import numpy as np
class Population:
"""
A Population is a measure over the the cells at given timepoint.
Parameters
----------
time : int or float
The time at which the cells where measured.
p : 1-D array-like
Measure over the cells at the given timepoint.
name : str
Optional population name.
"""
def __init__(self, time, p, name=None):
self.time = time
self.p = np.asarray(p, dtype=np.float64)
self.name = name
def normalize(self):
"""
Make the measure sum to 1, i.e. be a probability distribution over cells.
"""
self.p = self.p / self.p.sum()
def make_binary(self):
"""
Set non-zero values to 1.
"""
p = np.zeros(len(self.p))
p[self.p > 0.0] = 1.0
self.p = p
@staticmethod
def get_missing_population(*populations):
initial_p_sum = np.array([pop.p for pop in populations]).T.sum(axis=1)
missing_cells = np.where(initial_p_sum == 0)[0]
if len(missing_cells) > 0:
missing_cells_p = np.zeros_like(populations[0].p)
missing_cells_p[missing_cells] = 1.0
return Population(populations[0].time, missing_cells_p, 'Other')
return None
@staticmethod
def copy(*populations, normalize=None, add_missing=False):
populations_copy = []
for pop in populations:
pop_copy = Population(pop.time, pop.p, pop.name)
populations_copy.append(pop_copy)
populations = populations_copy
if add_missing:
# add "other" population if any cells are missing across all populations
missing_pop = Population.get_missing_population(*populations)
if missing_pop is not None:
populations.append(missing_pop)
if normalize or normalize == False:
for pop in populations:
if normalize:
pop.normalize()
else:
pop.make_binary()
return populations
| [
"numpy.where",
"numpy.array",
"numpy.zeros_like",
"numpy.asarray"
] | [((470, 501), 'numpy.asarray', 'np.asarray', (['p'], {'dtype': 'np.float64'}), '(p, dtype=np.float64)\n', (480, 501), True, 'import numpy as np\n'), ((1035, 1063), 'numpy.where', 'np.where', (['(initial_p_sum == 0)'], {}), '(initial_p_sum == 0)\n', (1043, 1063), True, 'import numpy as np\n'), ((1132, 1163), 'numpy.zeros_like', 'np.zeros_like', (['populations[0].p'], {}), '(populations[0].p)\n', (1145, 1163), True, 'import numpy as np\n'), ((956, 996), 'numpy.array', 'np.array', (['[pop.p for pop in populations]'], {}), '([pop.p for pop in populations])\n', (964, 996), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.