code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
File Name: DetectCubeCenter.py
Author: <NAME>
Description: This program calculates the center of a cube object
and sends it back to the Roborio via NetworkTables. This was created
for the 2018 FIRST Robotics Competition.
Copyright (c) <NAME> 2018
"""
import cv2
import numpy as np
from picamera.array import PiRGBArray
from picamera import PiCamera
from networktables import NetworkTables as nt
import logging
import time
from camera import Camera
from datatransfer import DataTransfer
cap = cv2.VideoCapture(0)
cam = Camera()
scale = DataTransfer()
sc = scale.sc
s = scale.s
centerX = 320
centerY = 225
nt.initialize(server=sc.ip)
def main():
for frame in cam.capture_continuous(cam.rawcap, format="bgr", use_video_port=True):
imge = frame.array
canvas = imge
hsv = cv2.cvtColor(imge, cv2.COLOR_BGR2HSV)
lower_red = np.array([20,120,120])
upper_red = np.array([30,255,255])
# Here we are defining range of blue, red, and yellow color in HSV
# This creates a mask of blue, red, and yellow coloured
# objects found in the frame.
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(imge,imge, mask= mask)
im2, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
blob = max(contours, key=lambda el: cv2.contourArea(el), default=0)
M = cv2.moments(blob)
if (len(contours) == 0):
print("Empty contours")
else:
pass
center = cam.computeCenter(M)
cv2.circle(canvas, center, 2 ,(255,0,0), -1)
x, y = center
scale.sendScaleData(centerX, centerY)
scale.sendSwitchData(centerX, centerY)
s.putNumber('View', 1)
# The bitwise and of the frame and mask is done so
# that only the blue, red, or yellow coloured objects are highlighted
# and stored in res
blurred = cv2.GaussianBlur(canvas, (5,5), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
lowerBlue = np.array([218,64.3, 81.2])
upperBlue = np.array([219, 96.9, 100])
lowerRed = np.array([359.2, 83.9, 100])
upperRed = np.array([359, 96.9, 100])
screenBlue = cv2.inRange(hsv, lowerBlue, upperBlue)
screenRed = cv2.inRange(hsv, lowerRed, upperRed)
# im_with_keypoints = detectScaleLights(blurred)
# cv2.imshow('Keypoints', im_with_keypoints)
cv2.imshow('Gray', hsv)
cv2.imshow('frame',imge)
cv2.imshow('mask',mask)
cv2.imshow('can',canvas)
cv2.setMouseCallback('Gray', cam.post)
cam.rawcap.truncate(0)
# This displays the frame, mask
# and res which we created in 3 separate windows.
k = cv2.waitKey(33)
if k == ord('a'):
break
print("Exited program loop")
# Destroys all of the HighGUI windows.
cv2.destroyAllWindows()
# release the captured frame
cap.release()
scale.sendScaleData(centerX, centerY)
scale.sendSwitchData(centerX, centerY)
if __name__ == '__main__':
main()
| [
"cv2.GaussianBlur",
"cv2.findContours",
"cv2.circle",
"cv2.contourArea",
"cv2.bitwise_and",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.moments",
"camera.Camera",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.setMouseCallback",
"numpy.array",
"networktables.NetworkTables.initialize",
"cv2.destroyAllWin... | [((525, 544), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (541, 544), False, 'import cv2\n'), ((552, 560), 'camera.Camera', 'Camera', ([], {}), '()\n', (558, 560), False, 'from camera import Camera\n'), ((570, 584), 'datatransfer.DataTransfer', 'DataTransfer', ([], {}), '()\n', (582, 584), False, 'from datatransfer import DataTransfer\n'), ((644, 671), 'networktables.NetworkTables.initialize', 'nt.initialize', ([], {'server': 'sc.ip'}), '(server=sc.ip)\n', (657, 671), True, 'from networktables import NetworkTables as nt\n'), ((3106, 3129), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3127, 3129), False, 'import cv2\n'), ((853, 890), 'cv2.cvtColor', 'cv2.cvtColor', (['imge', 'cv2.COLOR_BGR2HSV'], {}), '(imge, cv2.COLOR_BGR2HSV)\n', (865, 890), False, 'import cv2\n'), ((912, 936), 'numpy.array', 'np.array', (['[20, 120, 120]'], {}), '([20, 120, 120])\n', (920, 936), True, 'import numpy as np\n'), ((956, 980), 'numpy.array', 'np.array', (['[30, 255, 255]'], {}), '([30, 255, 255])\n', (964, 980), True, 'import numpy as np\n'), ((1182, 1220), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_red', 'upper_red'], {}), '(hsv, lower_red, upper_red)\n', (1193, 1220), False, 'import cv2\n'), ((1238, 1276), 'cv2.bitwise_and', 'cv2.bitwise_and', (['imge', 'imge'], {'mask': 'mask'}), '(imge, imge, mask=mask)\n', (1253, 1276), False, 'import cv2\n'), ((1313, 1375), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (1329, 1375), False, 'import cv2\n'), ((1476, 1493), 'cv2.moments', 'cv2.moments', (['blob'], {}), '(blob)\n', (1487, 1493), False, 'import cv2\n'), ((1668, 1714), 'cv2.circle', 'cv2.circle', (['canvas', 'center', '(2)', '(255, 0, 0)', '(-1)'], {}), '(canvas, center, 2, (255, 0, 0), -1)\n', (1678, 1714), False, 'import cv2\n'), ((2078, 2113), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['canvas', '(5, 5)', '(0)'], {}), '(canvas, (5, 5), 0)\n', (2094, 2113), False, 'import cv2\n'), ((2129, 2169), 'cv2.cvtColor', 'cv2.cvtColor', (['blurred', 'cv2.COLOR_BGR2HSV'], {}), '(blurred, cv2.COLOR_BGR2HSV)\n', (2141, 2169), False, 'import cv2\n'), ((2191, 2218), 'numpy.array', 'np.array', (['[218, 64.3, 81.2]'], {}), '([218, 64.3, 81.2])\n', (2199, 2218), True, 'import numpy as np\n'), ((2239, 2265), 'numpy.array', 'np.array', (['[219, 96.9, 100]'], {}), '([219, 96.9, 100])\n', (2247, 2265), True, 'import numpy as np\n'), ((2286, 2314), 'numpy.array', 'np.array', (['[359.2, 83.9, 100]'], {}), '([359.2, 83.9, 100])\n', (2294, 2314), True, 'import numpy as np\n'), ((2335, 2361), 'numpy.array', 'np.array', (['[359, 96.9, 100]'], {}), '([359, 96.9, 100])\n', (2343, 2361), True, 'import numpy as np\n'), ((2384, 2422), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lowerBlue', 'upperBlue'], {}), '(hsv, lowerBlue, upperBlue)\n', (2395, 2422), False, 'import cv2\n'), ((2444, 2480), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lowerRed', 'upperRed'], {}), '(hsv, lowerRed, upperRed)\n', (2455, 2480), False, 'import cv2\n'), ((2612, 2635), 'cv2.imshow', 'cv2.imshow', (['"""Gray"""', 'hsv'], {}), "('Gray', hsv)\n", (2622, 2635), False, 'import cv2\n'), ((2645, 2670), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'imge'], {}), "('frame', imge)\n", (2655, 2670), False, 'import cv2\n'), ((2679, 2703), 'cv2.imshow', 'cv2.imshow', (['"""mask"""', 'mask'], {}), "('mask', mask)\n", (2689, 2703), False, 'import cv2\n'), ((2712, 2737), 'cv2.imshow', 'cv2.imshow', (['"""can"""', 'canvas'], {}), "('can', canvas)\n", (2722, 2737), False, 'import cv2\n'), ((2746, 2784), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""Gray"""', 'cam.post'], {}), "('Gray', cam.post)\n", (2766, 2784), False, 'import cv2\n'), ((2955, 2970), 'cv2.waitKey', 'cv2.waitKey', (['(33)'], {}), '(33)\n', (2966, 2970), False, 'import cv2\n'), ((1421, 1440), 'cv2.contourArea', 'cv2.contourArea', (['el'], {}), '(el)\n', (1436, 1440), False, 'import cv2\n')] |
"""
In this example we use the pysid library to estimate a SISO arx model
"""
#Import Libraries
from numpy import sqrt
from numpy.random import rand, randn #To generate the experiment
from scipy.signal import lfilter #To generate the data
from pysid import armax #To estimate an arx model
#True System
na = 2
nb = 1
nc = 2
nk = 1
#with the following true parameters
Ao = [1, -1.2, 0.36]
Bo = [0, 0.5, 0.1]
Co = [1., 0.8, -0.1]
#True parameter vector
thetao = [-1.2, 0.36, 0.5, 0.1, 0.8, -0.1]
#Generate the experiment
#The true system is generates by the following relation:
# S: y(t) = Go(q)*u(t) + Ho(q)*e(t),
#with u(t) the input and e white noise.
#Number of Samples
N = 400
#Take u as uniform
u = -sqrt(3) + 2*sqrt(3)*rand(N, 1)
#Generate gaussian white noise with standat deviation 0.01
e = 0.01*randn(N, 1)
#Calculate the y through S (ARX: G(q) = B(q)/A(q) and H(q) = 1/A(q))
y = lfilter(Bo, Ao, u, axis=0) + lfilter(Co, Ao, e, axis=0)
#Estimate the model and get only the parameters
A, B, C = armax(na, nb, nc, nk, u, y)
| [
"numpy.random.randn",
"scipy.signal.lfilter",
"pysid.armax",
"numpy.random.rand",
"numpy.sqrt"
] | [((1024, 1051), 'pysid.armax', 'armax', (['na', 'nb', 'nc', 'nk', 'u', 'y'], {}), '(na, nb, nc, nk, u, y)\n', (1029, 1051), False, 'from pysid import armax\n'), ((825, 836), 'numpy.random.randn', 'randn', (['N', '(1)'], {}), '(N, 1)\n', (830, 836), False, 'from numpy.random import rand, randn\n'), ((910, 936), 'scipy.signal.lfilter', 'lfilter', (['Bo', 'Ao', 'u'], {'axis': '(0)'}), '(Bo, Ao, u, axis=0)\n', (917, 936), False, 'from scipy.signal import lfilter\n'), ((939, 965), 'scipy.signal.lfilter', 'lfilter', (['Co', 'Ao', 'e'], {'axis': '(0)'}), '(Co, Ao, e, axis=0)\n', (946, 965), False, 'from scipy.signal import lfilter\n'), ((726, 733), 'numpy.sqrt', 'sqrt', (['(3)'], {}), '(3)\n', (730, 733), False, 'from numpy import sqrt\n'), ((746, 756), 'numpy.random.rand', 'rand', (['N', '(1)'], {}), '(N, 1)\n', (750, 756), False, 'from numpy.random import rand, randn\n'), ((738, 745), 'numpy.sqrt', 'sqrt', (['(3)'], {}), '(3)\n', (742, 745), False, 'from numpy import sqrt\n')] |
#!/usr/bin/env python
# coding: utf-8
# <a href="https://colab.research.google.com/github/TrickyTroll/ML-intro/blob/Tricky/OCR.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# # Following a tutorial to learn about classification
#
# https://www.pyimagesearch.com/2020/08/24/ocr-handwriting-recognition-with-opencv-keras-and-tensorflow/
#
# https://www.pyimagesearch.com/2020/08/17/ocr-with-keras-tensorflow-and-deep-learning/
# In[1]:
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.datasets import mnist
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
# ## Loading the minst data
# In[167]:
((train_data, train_labels), (test_data, test_labels)) = mnist.load_data()
num_data = np.vstack([train_data, test_data])
num_labels = np.hstack([train_labels, test_labels])
# In[168]:
train_data.shape
# In[169]:
len(train_labels)
# In[170]:
train_labels
# In[171]:
test_data.shape
# ## Preprocessing
# In[172]:
plt.figure()
plt.imshow(train_data[0])
plt.colorbar()
plt.grid(False)
plt.show()
# In[173]:
train_labels[0]
# In[174]:
train_images = train_data / 255.0
test_images = test_data / 255.0
# In[175]:
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(train_labels[i])
plt.show()
# In[176]:
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10)
])
# In[177]:
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# In[178]:
model.fit(train_images, train_labels, epochs=10)
# In[179]:
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
# In[180]:
model.save("/content/drive/My Drive/OCR_Models/test", save_format="h5")
# In[181]:
model.summary()
# In[182]:
probability_model = tf.keras.Sequential([model,
tf.keras.layers.Softmax()])
# In[183]:
predictions = probability_model.predict(test_images)
# In[184]:
predictions[0]
# In[185]:
np.argmax(predictions[0])
# In[186]:
test_labels[0]
# In[187]:
def plot_image(i, predictions_array, true_label, img):
true_label, img = true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(predicted_label,
100*np.max(predictions_array),
true_label),
color=color)
def plot_value_array(i, predictions_array, true_label):
true_label = true_label[i]
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
# In[188]:
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
# In[189]:
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
# In[190]:
# Plot the first X test images, their predicted labels, and the true labels.
# Color correct predictions in blue and incorrect predictions in red.
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions[i], test_labels)
plt.tight_layout()
plt.show()
# In[191]:
# Grab an image from the test dataset.
img = test_images[1]
print(img.shape)
# In[192]:
# Add the image to a batch where it's the only member.
img = (np.expand_dims(img,0))
print(img.shape)
# In[193]:
predictions_single = probability_model.predict(img)
print(predictions_single)
# In[194]:
plot_value_array(1, predictions_single[0], [0,1,2,3,4,5,6,7,8,9])
_ = plt.xticks(range(10), [0,1,2,3,4,5,6,7,8,9], rotation=45) #The labels are wrong
# In[195]:
np.argmax(predictions_single[0])
# ## Testing it on my own images
# In[283]:
from IPython.display import display, Javascript
from google.colab.output import eval_js
from base64 import b64decode
def take_photo(filename='photo.jpg', quality=0.8):
js = Javascript('''
async function takePhoto(quality) {
const div = document.createElement('div');
const capture = document.createElement('button');
capture.textContent = 'Capture';
div.appendChild(capture);
const video = document.createElement('video');
video.style.display = 'block';
const stream = await navigator.mediaDevices.getUserMedia({video: true});
document.body.appendChild(div);
div.appendChild(video);
video.srcObject = stream;
await video.play();
// Resize the output to fit the video element.
google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);
// Wait for Capture to be clicked.
await new Promise((resolve) => capture.onclick = resolve);
const canvas = document.createElement('canvas');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0);
stream.getVideoTracks()[0].stop();
div.remove();
return canvas.toDataURL('image/jpeg', quality);
}
''')
display(js)
data = eval_js('takePhoto({})'.format(quality))
binary = b64decode(data.split(',')[1])
with open(filename, 'wb') as f:
f.write(binary)
return filename
# In[284]:
from IPython.display import Image
try:
filename = take_photo()
print('Saved to {}'.format(filename))
# Show the image which was just taken.
display(Image(filename))
except Exception as err:
# Errors will be thrown if the user does not have a webcam or if they do not
# grant the page permission to access it.
print(str(err))
# In[285]:
import numpy as np
import imutils
import cv2
from google.colab.patches import cv2_imshow
from matplotlib import pyplot as plt
# In[286]:
image = cv2.imread("photo.jpg")
resized_image = cv2.resize(image, (28, 28))
gray = cv2.cvtColor(resized_image, cv2.COLOR_RGB2GRAY)
# blurred = cv2.GaussianBlur(gray, (5, 5), 0)
(thresh, blackAndWhiteImage) = cv2.threshold(gray, 80, 255, cv2.THRESH_BINARY)
imagem = cv2.bitwise_not(blackAndWhiteImage)
cv2_imshow(imagem)
# In[287]:
imagem.shape
# In[288]:
img2 = (np.expand_dims(imagem,0))
print(img2.shape)
# In[289]:
plt.figure()
plt.imshow(imagem)
plt.colorbar()
plt.grid(False)
plt.show()
# In[290]:
predictions_array = probability_model.predict(img2)
print(predictions_array)
# In[291]:
predicted_label = np.argmax(predictions_array)
# In[292]:
predicted_label
# In[293]:
plot_value_array(1, predictions_array[0], [0,1,2,3,4,5,6,7,8,9])
_ = plt.xticks(range(10), [0,1,2,3,4,5,6,7,8,9], rotation=45) #The labels are wrong
# In[293]:
| [
"numpy.argmax",
"tensorflow.keras.layers.Dense",
"google.colab.patches.cv2_imshow",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"cv2.cvtColor",
"matplotlib.pyplot.imshow",
"matplotlib.pypl... | [((831, 848), 'tensorflow.keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (846, 848), False, 'from tensorflow.keras.datasets import mnist\n'), ((860, 894), 'numpy.vstack', 'np.vstack', (['[train_data, test_data]'], {}), '([train_data, test_data])\n', (869, 894), True, 'import numpy as np\n'), ((908, 946), 'numpy.hstack', 'np.hstack', (['[train_labels, test_labels]'], {}), '([train_labels, test_labels])\n', (917, 946), True, 'import numpy as np\n'), ((1106, 1118), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1116, 1118), True, 'from matplotlib import pyplot as plt\n'), ((1119, 1144), 'matplotlib.pyplot.imshow', 'plt.imshow', (['train_data[0]'], {}), '(train_data[0])\n', (1129, 1144), True, 'from matplotlib import pyplot as plt\n'), ((1145, 1159), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1157, 1159), True, 'from matplotlib import pyplot as plt\n'), ((1160, 1175), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (1168, 1175), True, 'from matplotlib import pyplot as plt\n'), ((1176, 1186), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1184, 1186), True, 'from matplotlib import pyplot as plt\n'), ((1314, 1342), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1324, 1342), True, 'from matplotlib import pyplot as plt\n'), ((1529, 1539), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1537, 1539), True, 'from matplotlib import pyplot as plt\n'), ((2430, 2455), 'numpy.argmax', 'np.argmax', (['predictions[0]'], {}), '(predictions[0])\n', (2439, 2455), True, 'import numpy as np\n'), ((3419, 3445), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 3)'}), '(figsize=(6, 3))\n', (3429, 3445), True, 'from matplotlib import pyplot as plt\n'), ((3445, 3465), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (3456, 3465), True, 'from matplotlib import pyplot as plt\n'), ((3520, 3540), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (3531, 3540), True, 'from matplotlib import pyplot as plt\n'), ((3589, 3599), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3597, 3599), True, 'from matplotlib import pyplot as plt\n'), ((3622, 3648), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 3)'}), '(figsize=(6, 3))\n', (3632, 3648), True, 'from matplotlib import pyplot as plt\n'), ((3648, 3668), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (3659, 3668), True, 'from matplotlib import pyplot as plt\n'), ((3723, 3743), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (3734, 3743), True, 'from matplotlib import pyplot as plt\n'), ((3792, 3802), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3800, 3802), True, 'from matplotlib import pyplot as plt\n'), ((4022, 4074), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2 * 2 * num_cols, 2 * num_rows)'}), '(figsize=(2 * 2 * num_cols, 2 * num_rows))\n', (4032, 4074), True, 'from matplotlib import pyplot as plt\n'), ((4292, 4310), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4308, 4310), True, 'from matplotlib import pyplot as plt\n'), ((4311, 4321), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4319, 4321), True, 'from matplotlib import pyplot as plt\n'), ((4492, 4514), 'numpy.expand_dims', 'np.expand_dims', (['img', '(0)'], {}), '(img, 0)\n', (4506, 4514), True, 'import numpy as np\n'), ((4807, 4839), 'numpy.argmax', 'np.argmax', (['predictions_single[0]'], {}), '(predictions_single[0])\n', (4816, 4839), True, 'import numpy as np\n'), ((6860, 6883), 'cv2.imread', 'cv2.imread', (['"""photo.jpg"""'], {}), "('photo.jpg')\n", (6870, 6883), False, 'import cv2\n'), ((6900, 6927), 'cv2.resize', 'cv2.resize', (['image', '(28, 28)'], {}), '(image, (28, 28))\n', (6910, 6927), False, 'import cv2\n'), ((6935, 6982), 'cv2.cvtColor', 'cv2.cvtColor', (['resized_image', 'cv2.COLOR_RGB2GRAY'], {}), '(resized_image, cv2.COLOR_RGB2GRAY)\n', (6947, 6982), False, 'import cv2\n'), ((7060, 7107), 'cv2.threshold', 'cv2.threshold', (['gray', '(80)', '(255)', 'cv2.THRESH_BINARY'], {}), '(gray, 80, 255, cv2.THRESH_BINARY)\n', (7073, 7107), False, 'import cv2\n'), ((7117, 7152), 'cv2.bitwise_not', 'cv2.bitwise_not', (['blackAndWhiteImage'], {}), '(blackAndWhiteImage)\n', (7132, 7152), False, 'import cv2\n'), ((7153, 7171), 'google.colab.patches.cv2_imshow', 'cv2_imshow', (['imagem'], {}), '(imagem)\n', (7163, 7171), False, 'from google.colab.patches import cv2_imshow\n'), ((7223, 7248), 'numpy.expand_dims', 'np.expand_dims', (['imagem', '(0)'], {}), '(imagem, 0)\n', (7237, 7248), True, 'import numpy as np\n'), ((7283, 7295), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7293, 7295), True, 'from matplotlib import pyplot as plt\n'), ((7296, 7314), 'matplotlib.pyplot.imshow', 'plt.imshow', (['imagem'], {}), '(imagem)\n', (7306, 7314), True, 'from matplotlib import pyplot as plt\n'), ((7315, 7329), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (7327, 7329), True, 'from matplotlib import pyplot as plt\n'), ((7330, 7345), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (7338, 7345), True, 'from matplotlib import pyplot as plt\n'), ((7346, 7356), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7354, 7356), True, 'from matplotlib import pyplot as plt\n'), ((7483, 7511), 'numpy.argmax', 'np.argmax', (['predictions_array'], {}), '(predictions_array)\n', (7492, 7511), True, 'import numpy as np\n'), ((1366, 1390), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(5)', '(i + 1)'], {}), '(5, 5, i + 1)\n', (1377, 1390), True, 'from matplotlib import pyplot as plt\n'), ((1391, 1405), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (1401, 1405), True, 'from matplotlib import pyplot as plt\n'), ((1410, 1424), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1420, 1424), True, 'from matplotlib import pyplot as plt\n'), ((1429, 1444), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (1437, 1444), True, 'from matplotlib import pyplot as plt\n'), ((1449, 1496), 'matplotlib.pyplot.imshow', 'plt.imshow', (['train_images[i]'], {'cmap': 'plt.cm.binary'}), '(train_images[i], cmap=plt.cm.binary)\n', (1459, 1496), True, 'from matplotlib import pyplot as plt\n'), ((1501, 1528), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['train_labels[i]'], {}), '(train_labels[i])\n', (1511, 1528), True, 'from matplotlib import pyplot as plt\n'), ((2600, 2615), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (2608, 2615), True, 'from matplotlib import pyplot as plt\n'), ((2618, 2632), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (2628, 2632), True, 'from matplotlib import pyplot as plt\n'), ((2635, 2649), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2645, 2649), True, 'from matplotlib import pyplot as plt\n'), ((2653, 2688), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'cmap': 'plt.cm.binary'}), '(img, cmap=plt.cm.binary)\n', (2663, 2688), True, 'from matplotlib import pyplot as plt\n'), ((2710, 2738), 'numpy.argmax', 'np.argmax', (['predictions_array'], {}), '(predictions_array)\n', (2719, 2738), True, 'import numpy as np\n'), ((3118, 3133), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (3126, 3133), True, 'from matplotlib import pyplot as plt\n'), ((3160, 3174), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (3170, 3174), True, 'from matplotlib import pyplot as plt\n'), ((3245, 3261), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (3253, 3261), True, 'from matplotlib import pyplot as plt\n'), ((3282, 3310), 'numpy.argmax', 'np.argmax', (['predictions_array'], {}), '(predictions_array)\n', (3291, 3310), True, 'import numpy as np\n'), ((4099, 4145), 'matplotlib.pyplot.subplot', 'plt.subplot', (['num_rows', '(2 * num_cols)', '(2 * i + 1)'], {}), '(num_rows, 2 * num_cols, 2 * i + 1)\n', (4110, 4145), True, 'from matplotlib import pyplot as plt\n'), ((4200, 4246), 'matplotlib.pyplot.subplot', 'plt.subplot', (['num_rows', '(2 * num_cols)', '(2 * i + 2)'], {}), '(num_rows, 2 * num_cols, 2 * i + 2)\n', (4211, 4246), True, 'from matplotlib import pyplot as plt\n'), ((5065, 6171), 'IPython.display.Javascript', 'Javascript', (['"""\n async function takePhoto(quality) {\n const div = document.createElement(\'div\');\n const capture = document.createElement(\'button\');\n capture.textContent = \'Capture\';\n div.appendChild(capture);\n\n const video = document.createElement(\'video\');\n video.style.display = \'block\';\n const stream = await navigator.mediaDevices.getUserMedia({video: true});\n\n document.body.appendChild(div);\n div.appendChild(video);\n video.srcObject = stream;\n await video.play();\n\n // Resize the output to fit the video element.\n google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);\n\n // Wait for Capture to be clicked.\n await new Promise((resolve) => capture.onclick = resolve);\n\n const canvas = document.createElement(\'canvas\');\n canvas.width = video.videoWidth;\n canvas.height = video.videoHeight;\n canvas.getContext(\'2d\').drawImage(video, 0, 0);\n stream.getVideoTracks()[0].stop();\n div.remove();\n return canvas.toDataURL(\'image/jpeg\', quality);\n }\n """'], {}), '(\n """\n async function takePhoto(quality) {\n const div = document.createElement(\'div\');\n const capture = document.createElement(\'button\');\n capture.textContent = \'Capture\';\n div.appendChild(capture);\n\n const video = document.createElement(\'video\');\n video.style.display = \'block\';\n const stream = await navigator.mediaDevices.getUserMedia({video: true});\n\n document.body.appendChild(div);\n div.appendChild(video);\n video.srcObject = stream;\n await video.play();\n\n // Resize the output to fit the video element.\n google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);\n\n // Wait for Capture to be clicked.\n await new Promise((resolve) => capture.onclick = resolve);\n\n const canvas = document.createElement(\'canvas\');\n canvas.width = video.videoWidth;\n canvas.height = video.videoHeight;\n canvas.getContext(\'2d\').drawImage(video, 0, 0);\n stream.getVideoTracks()[0].stop();\n div.remove();\n return canvas.toDataURL(\'image/jpeg\', quality);\n }\n """\n )\n', (5075, 6171), False, 'from IPython.display import display, Javascript\n'), ((6164, 6175), 'IPython.display.display', 'display', (['js'], {}), '(js)\n', (6171, 6175), False, 'from IPython.display import display, Javascript\n'), ((1586, 1628), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {'input_shape': '(28, 28)'}), '(input_shape=(28, 28))\n', (1606, 1628), False, 'from tensorflow import keras\n'), ((1634, 1676), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (1652, 1676), False, 'from tensorflow import keras\n'), ((1682, 1704), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(10)'], {}), '(10)\n', (1700, 1704), False, 'from tensorflow import keras\n'), ((1774, 1837), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (1819, 1837), True, 'import tensorflow as tf\n'), ((2289, 2314), 'tensorflow.keras.layers.Softmax', 'tf.keras.layers.Softmax', ([], {}), '()\n', (2312, 2314), True, 'import tensorflow as tf\n'), ((6513, 6528), 'IPython.display.Image', 'Image', (['filename'], {}), '(filename)\n', (6518, 6528), False, 'from IPython.display import Image\n'), ((2913, 2938), 'numpy.max', 'np.max', (['predictions_array'], {}), '(predictions_array)\n', (2919, 2938), True, 'import numpy as np\n')] |
from pm4py.objects.log.importer.xes import importer as xes_importer
from pm4py.objects.log.util import get_log_representation
import pandas as pd
from sklearn.ensemble import IsolationForest
import numpy as np
import pickle as pickle
def find_anonmalies_with_isolation_forest(log, original_features, original_log_df,result_path):
log_features, feature_names_log = get_log_representation.get_representation(log,str_ev_attr=["concept:name"],str_tr_attr=[],num_ev_attr=[],num_tr_attr=[],str_evsucc_attr=["concept:name"])
log_df = pd.DataFrame(log_features, columns=feature_names_log)
features = np.union1d(original_features, feature_names_log)
new_features_train = np.setxor1d(original_features,features)
new_features_df = pd.DataFrame(columns=new_features_train)
train_df = original_log_df.append(new_features_df)
train_df = train_df.fillna(0)
model = IsolationForest()
model.fit(train_df)
new_features_test = np.setxor1d(feature_names_log,features)
new_features_df = pd.DataFrame(columns=new_features_test)
test_df = log_df.append(new_features_df)
test_df = test_df.fillna(0)
log_df["scores"] = model.decision_function(test_df)
results = dict()
results["avg"] = log_df["scores"].mean()
count_traces = log_df["scores"].count() + 1
anonmalies = log_df[log_df.scores <= 0].shape[0]
results["anonmaly_relative_frequency"] = anonmalies/count_traces
print(results)
with open(result_path,'wb') as file:
pickle.dump(results,file) | [
"pandas.DataFrame",
"pickle.dump",
"sklearn.ensemble.IsolationForest",
"numpy.setxor1d",
"pm4py.objects.log.util.get_log_representation.get_representation",
"numpy.union1d"
] | [((370, 537), 'pm4py.objects.log.util.get_log_representation.get_representation', 'get_log_representation.get_representation', (['log'], {'str_ev_attr': "['concept:name']", 'str_tr_attr': '[]', 'num_ev_attr': '[]', 'num_tr_attr': '[]', 'str_evsucc_attr': "['concept:name']"}), "(log, str_ev_attr=['concept:name'],\n str_tr_attr=[], num_ev_attr=[], num_tr_attr=[], str_evsucc_attr=[\n 'concept:name'])\n", (411, 537), False, 'from pm4py.objects.log.util import get_log_representation\n'), ((537, 590), 'pandas.DataFrame', 'pd.DataFrame', (['log_features'], {'columns': 'feature_names_log'}), '(log_features, columns=feature_names_log)\n', (549, 590), True, 'import pandas as pd\n'), ((607, 655), 'numpy.union1d', 'np.union1d', (['original_features', 'feature_names_log'], {}), '(original_features, feature_names_log)\n', (617, 655), True, 'import numpy as np\n'), ((682, 722), 'numpy.setxor1d', 'np.setxor1d', (['original_features', 'features'], {}), '(original_features, features)\n', (693, 722), True, 'import numpy as np\n'), ((744, 784), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'new_features_train'}), '(columns=new_features_train)\n', (756, 784), True, 'import pandas as pd\n'), ((887, 904), 'sklearn.ensemble.IsolationForest', 'IsolationForest', ([], {}), '()\n', (902, 904), False, 'from sklearn.ensemble import IsolationForest\n'), ((954, 994), 'numpy.setxor1d', 'np.setxor1d', (['feature_names_log', 'features'], {}), '(feature_names_log, features)\n', (965, 994), True, 'import numpy as np\n'), ((1016, 1055), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'new_features_test'}), '(columns=new_features_test)\n', (1028, 1055), True, 'import pandas as pd\n'), ((1495, 1521), 'pickle.dump', 'pickle.dump', (['results', 'file'], {}), '(results, file)\n', (1506, 1521), True, 'import pickle as pickle\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 27 16:35:53 2019
@author: essys
"""
import json
import numpy as np
import os
import skimage
import cv2
import time
import scipy.misc
from PIL import Image
dataset_dir = "/home/essys/datasets/bdd_dataset"
input_dir = 'labels'
mode = 'val'
images_folder = os.path.join(dataset_dir, 'images','100k',mode)
annotations = json.load(open(os.path.join(os.path.join(dataset_dir, input_dir), "bdd100k_labels_images_" + mode+".json")))
annotation_root = os.path.join(os.path.join(dataset_dir, "annotation"), input_dir)
def get_mask_from_polygons(polygon, height, width):
mask = np.zeros([height, width,3],
dtype=np.uint8)
#print ('mask shape',mask.shape)
for i, p in enumerate(polygon):
# print ('i',i)
# print ('p',p)
# Get indexes of pixels inside the polygon and set them to 1
x=[]
y=[]
for d in p:
for l in d['vertices']:
x.append(l[0])
y.append(l[1])
# print ('x',x)
# print ('y',y)
# rr, cc = skimage.draw.polygon(y,x, )
print(x, y)
# print(rr,cc)
# mask[rr, cc] = [255,255,255]
# print(mask)
return mask
i = 0
categories = []
for a in annotations:
i += 1
start = time.time()
categories.extend([d['category'] for d in a['labels']])
# print(np.unique(categories))
polygons=[d['poly2d'] for d in a['labels'] if d['category']=="lane"]
image_path = os.path.join(images_folder, a['name'])
height, width = [720, 1280]#image.shape[:2]
mask = np.zeros([height, width,3],
dtype=np.uint8)
mask = cv2.imread(image_path, -1)
for i, p in enumerate(polygons):
for d in p:
# pt1 = d['vertices'][0]
# pt2 = d['vertices'][1]
# for l in d['vertices']:
# mask[int(l[1]), int(l[0]) ] = [255,255,255]
for k in np.arange(0, len(d['vertices']), 2):
cv2.polylines(mask, np.int32([d['vertices'][k:k+2]]), isClosed=False,color=[255,255,255], thickness=1)
# self.images.append(image_path)
# img = get_mask_from_polygons(polygons,height, width)
# print(np.unique(img))
img = np.array(mask)
# print(np.unique(img))
# mask_path = os.path.join('/media/ixtiyor/087E6CE67E6CCDCE/annotation/',input_dir, a['name'].split('.')[0] +".png" )
# img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
# cv2.imwrite(mask_path, img, [cv2.IMWRITE_JPEG_QUALITY, 255])
# scipy.misc.toimage(img, cmin=0.0, cmax=255.0).save(mask_path)
# im = Image.fromarray(img,"RGB" )
# im.save(mask_path)
# read_image = cv2.imread(mask_path, cv2.IMREAD_UNCHANGED)
# print(np.shape(img), type(img))
# np.save(mask_path,img)
# read_image = np.load(mask_path)
# print()
# print(i, "qolgan vaqt " , (time.time()-start) * (10000-i) * 1./60, " min")
# break
cv2.imshow("img", img)
if(cv2.waitKey(0) ==27):
cv2.destroyAllWindows()
break | [
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.zeros",
"time.time",
"cv2.imread",
"numpy.array",
"numpy.int32",
"cv2.imshow",
"os.path.join"
] | [((329, 378), 'os.path.join', 'os.path.join', (['dataset_dir', '"""images"""', '"""100k"""', 'mode'], {}), "(dataset_dir, 'images', '100k', mode)\n", (341, 378), False, 'import os\n'), ((531, 570), 'os.path.join', 'os.path.join', (['dataset_dir', '"""annotation"""'], {}), "(dataset_dir, 'annotation')\n", (543, 570), False, 'import os\n'), ((651, 695), 'numpy.zeros', 'np.zeros', (['[height, width, 3]'], {'dtype': 'np.uint8'}), '([height, width, 3], dtype=np.uint8)\n', (659, 695), True, 'import numpy as np\n'), ((1406, 1417), 'time.time', 'time.time', ([], {}), '()\n', (1415, 1417), False, 'import time\n'), ((1609, 1647), 'os.path.join', 'os.path.join', (['images_folder', "a['name']"], {}), "(images_folder, a['name'])\n", (1621, 1647), False, 'import os\n'), ((1707, 1751), 'numpy.zeros', 'np.zeros', (['[height, width, 3]'], {'dtype': 'np.uint8'}), '([height, width, 3], dtype=np.uint8)\n', (1715, 1751), True, 'import numpy as np\n'), ((1782, 1808), 'cv2.imread', 'cv2.imread', (['image_path', '(-1)'], {}), '(image_path, -1)\n', (1792, 1808), False, 'import cv2\n'), ((2350, 2364), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (2358, 2364), True, 'import numpy as np\n'), ((3047, 3069), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (3057, 3069), False, 'import cv2\n'), ((3077, 3091), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3088, 3091), False, 'import cv2\n'), ((3107, 3130), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3128, 3130), False, 'import cv2\n'), ((419, 455), 'os.path.join', 'os.path.join', (['dataset_dir', 'input_dir'], {}), '(dataset_dir, input_dir)\n', (431, 455), False, 'import os\n'), ((2132, 2166), 'numpy.int32', 'np.int32', (["[d['vertices'][k:k + 2]]"], {}), "([d['vertices'][k:k + 2]])\n", (2140, 2166), True, 'import numpy as np\n')] |
# coding: utf-8
# In[1]:
import keras
import tensorflow as tf
from keras import backend as K
import numpy as np
import os
config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
session = tf.Session(config=config)
K.set_session(session)
from keras.models import Sequential,model_from_json
from keras.layers import Dense,Dropout,Activation
from keras.optimizers import SGD,RMSprop,Adamax,Adam,Adagrad
from keras.losses import mean_absolute_percentage_error
from keras.utils import np_utils,generic_utils
from keras import metrics
import error
import math
import multiprocessing as mp
import json
import sys
# Parametres
# ====
# In[2]:
app_name='bessel_Jnu'
numA=1
iteration=1
epochA=20
epochC=15
batch_size=128
eb=[]
net_A=[]
net_C=[]
lenN=0
#error_type="absolute_error"
error_type="rmse"
#error_type="relative_error"
def error_compare(error_type):
if (error_type=="absolute_error"):
print(1)
return error.absolute_error
if (error_type=="rmse"):
print(2)
return error.rmse
if (error_type=="relative_error"):
print(3)
return error.relative_error
error_measure=error_compare(error_type)
# In[3]:
def get_net():
global net_A
global net_C
if (app_name=='fft'):
net_A=[1,2,2,2]
net_C=[1,2,numA+1]
if (app_name=='bessel_Jnu'):
net_A=[2,4,4,1]
net_C=[2,4,numA+1]
if (app_name=='blackscholes'):
net_A=[6,8,1]
net_C=[6,8,numA+1]
if (app_name=='jmeint'):
net_A=[18,32,16,2]
net_C=[18,16,numA+1]
if (app_name=='jpeg'):
net_A=[64,16,64]
net_C=[64,18,numA+1]
if (app_name=='inversek2j'):
net_A=[2,8,2]
net_C=[2,8,numA+1]
if (app_name=='sobel'):
net_A=[9,8,1]
net_C=[9,8,numA+1]
if (app_name=='kmeans'):
net_A=[6,8,4,1]
net_C=[6,8,4,numA+1]
# Input data processing
# ======
# In[4]:
def format_data(data):
try:
if (len(data.shape)==1):
return data.reshape((data.shape[0],1))
else:
return data
except:
print("Error! data is not a numpy object,format_data failed!")
exit(0)
def load_data(app_name):
x_train=np.loadtxt('../data/'+app_name+'/train.x')
y_train=np.loadtxt('../data/'+app_name+'/train.y')
x_test=np.loadtxt('../data/'+app_name+'/test.x')
y_test=np.loadtxt('../data/'+app_name+'/test.y')
return format_data(x_train),format_data(y_train),format_data(x_test),format_data(y_test)
# Output evaludation processing
# =========
# In[5]:
def get_output_name(app_name, method, error_bound, iteration, epochA, epochC, net_A, net_C):
output_name = '{}_{}_eb{}_it{}_epA{}_epC{}_netA{}_netC{}'.format(app_name,
method,
error_bound,
iteration,
epochA,
epochC,
'_'.join([str(x) for x in net_A]),
'_'.join( [str(x) for x in net_C]))
return output_name
# Evaluation
# ======
# In[6]:
def Error_of_Approximator(A,x,y): #Input one Approximator, x, y. Return errorA[lenN]
predictA=A.predict(x)
errorA=[]
for i in range(len(y)):
errorA.append(error_measure(predictA[i],y[i]))
return errorA
def Label_for_C(errorA,eb): #errorA[numA][lenN] return labelA[lenN][numA+1] for training C
labelA=[]
for i in range(lenN):
labelA.append(numA)
for j in range(numA):
if (errorA[j][i]<=eb):
labelA[i]=j
break
labelA=keras.utils.to_categorical(labelA,numA+1)
return labelA
# In[7]:
def evaluate(A, C, x_test, y_test, error_bound):
lenN=x_test
#predictC
predictC=C.predict(x_test)
predictC=np.argmax(predictC,1)
#predictA
predictA=[]
for i in range(numA):
predictA.append(Error_of_Approximator(A[i],x_test,y_test))
#calculate labelA Error Error a with C
labelA=[]
Er=[]
Er_c=[]
for i in range(lenN):
Er.append(predictA[0][i])
labelA.append(numA)
min_error=error_bound
for j in range(numA):
if (predictA[j][i]<error_bound and min_error==error_bound):
labelA[i]=j
min_error=predictA[j][i]
if (predictA[j][i]<Er[i]):
Er[i]=predictA[j][i][0]
if (predictC[i]<numA):
Er_c.append(predictA[predictC[i]][i])
#calculate the final results using predictA, labelA, Er, ErAwithC
accuracy_of_C=sum([(labelA[i]==predictC[i]) for i in range(lenN)])/float(lenN)
recall_of_C=sum([1.0 if labelA[i]<numA and predictC[i]<numA else 0 for i in range(lenN)]) / float(1e-10+sum([1 if (v<numA) else 0 for v in labelA]))
invocation_of_C = float(sum([1 if (v<numA) else 0 for v in predictC])) / float(1e-10 + lenN)
invocation_truly = float(sum([1 if (v<numA) else 0 for v in labelA])) / float(1e-10 + lenN)
mean_relative_error_of_A = sum(Er) / float(1e-10 + len(Er))
mean_relative_error_of_A_with_C = sum(Er_c) / (1e-10 + len(Er_c))
return{'accuracy_of_C':accuracy_of_C,
'recall_of_C':recall_of_C,
'invocation_of_C':invocation_of_C,
'invocation_truly':invocation_truly,
'error_of_A_with_C':mean_relative_error_of_A_with_C,
'error_of_A':mean_relative_error_of_A}
# Accelerator & Classifier
# ===
# In[8]:
def AcceleratorModel(net_list,type=0):
if (len(net_list)<2):
print('Error! input accelerator net structure is wrong!')
exit(0)
model=Sequential()
model.add(Dense(net_list[1],input_shape=(net_list[0],)))
model.add(Activation('sigmoid'))
for i in net_list[2:]:
model.add(Dense(i))
model.add(Activation('sigmoid'))
prop=[RMSprop(0.01),RMSprop(0.008),RMSprop(0.006),RMSprop(0.004),RMSprop(0.002)]
#prop=[Adagrad(),Adagrad(),Adagrad()]
model.compile(loss='mse',optimizer=prop[type],metrics=[metrics.mse])
return model
# In[9]:
def ClassifierModel(net_list):
if (len(net_list)<2):
print('Error! input classifier net structure is wrong!')
exit(0)
model=Sequential()
model.add(Dense((net_list[1]),input_shape=(net_list[0],)))
if (len(net_list)>2):
model.add(Activation('sigmoid'))
for i in net_list[2:-1]:
model.add(Dense(i))
model.add(Activation('sigmoid'))
model.add(Dense(net_list[-1]))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',optimizer=RMSprop(0.01))
return model
# Error bound Generation
# ========
# In[10]:
def generate_eb():
global A
#Setting of parameters
get_net()
global eb
global lenN
#Reading the dataset
x_train_origin,y_train_origin,x_test,y_test=load_data(app_name) #numpy type
lenN=len(x_train_origin)
print ("Training data shape:",x_train_origin.shape,y_train_origin.shape)#Print the input shape, compare with the net_A and net_C,
print('Testing data shape:',x_test.shape,y_test.shape) #check whether they match each other
#Setting the neural network
A=[AcceleratorModel(net_A,i)for i in range(numA)]
print("The Model A:")
A[0].summary()
#Training the approximator
A[0].fit(x_train_origin,y_train_origin,epochs=epochA,batch_size=batch_size,verbose=1)
#Generate error_bound(30% 50% 80%)
errorA=Error_of_Approximator(A[0],x_train_origin,y_train_origin)
sortError=sorted(errorA)
eb=[sortError[int(lenN*0.3)],sortError[int(lenN*0.5)],sortError[int(lenN*0.8)]]
f_results = open('../data/' + app_name+ '_eb.csv', 'w')
f_results.write(' '.join([str(eb[i]) for i in range(len(eb))]))
f_results.close()
# In[14]:
def main(app,eA,eC):
global app_name
global epochA
global epochC
app_name=app
epochA=eA
epochC=eC
generate_eb()
# In[ ]:
if __name__=="__main__":
if (len(sys.argv)==4):
#net_A = [int(x) for x in sys.argv[2].split('_')[1:]]
#net_C = [int(x) for x in sys.argv[3].split('_')[1:]]
#print(net_A)
#print(net_C)
main(sys.argv[1], int(sys.argv[2]),int(sys.argv[3]))
else:
print('Usage: python train_origin.py [benchmark_name] [epochA] [epochC]')
#print('#net_A|net_C: like a_6_8_8_1, c_6_8(the last layer depends on number of A)')
exit(0)
| [
"numpy.argmax",
"keras.layers.Activation",
"keras.backend.set_session",
"tensorflow.Session",
"keras.layers.Dense",
"numpy.loadtxt",
"keras.models.Sequential",
"keras.optimizers.RMSprop",
"tensorflow.GPUOptions",
"keras.utils.to_categorical"
] | [((204, 229), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (214, 229), True, 'import tensorflow as tf\n'), ((230, 252), 'keras.backend.set_session', 'K.set_session', (['session'], {}), '(session)\n', (243, 252), True, 'from keras import backend as K\n'), ((2217, 2263), 'numpy.loadtxt', 'np.loadtxt', (["('../data/' + app_name + '/train.x')"], {}), "('../data/' + app_name + '/train.x')\n", (2227, 2263), True, 'import numpy as np\n'), ((2272, 2318), 'numpy.loadtxt', 'np.loadtxt', (["('../data/' + app_name + '/train.y')"], {}), "('../data/' + app_name + '/train.y')\n", (2282, 2318), True, 'import numpy as np\n'), ((2326, 2371), 'numpy.loadtxt', 'np.loadtxt', (["('../data/' + app_name + '/test.x')"], {}), "('../data/' + app_name + '/test.x')\n", (2336, 2371), True, 'import numpy as np\n'), ((2379, 2424), 'numpy.loadtxt', 'np.loadtxt', (["('../data/' + app_name + '/test.y')"], {}), "('../data/' + app_name + '/test.y')\n", (2389, 2424), True, 'import numpy as np\n'), ((3915, 3959), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['labelA', '(numA + 1)'], {}), '(labelA, numA + 1)\n', (3941, 3959), False, 'import keras\n'), ((4110, 4132), 'numpy.argmax', 'np.argmax', (['predictC', '(1)'], {}), '(predictC, 1)\n', (4119, 4132), True, 'import numpy as np\n'), ((5930, 5942), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (5940, 5942), False, 'from keras.models import Sequential, model_from_json\n'), ((6528, 6540), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (6538, 6540), False, 'from keras.models import Sequential, model_from_json\n'), ((160, 192), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (173, 192), True, 'import tensorflow as tf\n'), ((5957, 6003), 'keras.layers.Dense', 'Dense', (['net_list[1]'], {'input_shape': '(net_list[0],)'}), '(net_list[1], input_shape=(net_list[0],))\n', (5962, 6003), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((6018, 6039), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (6028, 6039), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((6161, 6174), 'keras.optimizers.RMSprop', 'RMSprop', (['(0.01)'], {}), '(0.01)\n', (6168, 6174), False, 'from keras.optimizers import SGD, RMSprop, Adamax, Adam, Adagrad\n'), ((6175, 6189), 'keras.optimizers.RMSprop', 'RMSprop', (['(0.008)'], {}), '(0.008)\n', (6182, 6189), False, 'from keras.optimizers import SGD, RMSprop, Adamax, Adam, Adagrad\n'), ((6190, 6204), 'keras.optimizers.RMSprop', 'RMSprop', (['(0.006)'], {}), '(0.006)\n', (6197, 6204), False, 'from keras.optimizers import SGD, RMSprop, Adamax, Adam, Adagrad\n'), ((6205, 6219), 'keras.optimizers.RMSprop', 'RMSprop', (['(0.004)'], {}), '(0.004)\n', (6212, 6219), False, 'from keras.optimizers import SGD, RMSprop, Adamax, Adam, Adagrad\n'), ((6220, 6234), 'keras.optimizers.RMSprop', 'RMSprop', (['(0.002)'], {}), '(0.002)\n', (6227, 6234), False, 'from keras.optimizers import SGD, RMSprop, Adamax, Adam, Adagrad\n'), ((6555, 6601), 'keras.layers.Dense', 'Dense', (['net_list[1]'], {'input_shape': '(net_list[0],)'}), '(net_list[1], input_shape=(net_list[0],))\n', (6560, 6601), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((6848, 6869), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (6858, 6869), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((6091, 6099), 'keras.layers.Dense', 'Dense', (['i'], {}), '(i)\n', (6096, 6099), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((6119, 6140), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (6129, 6140), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((6653, 6674), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (6663, 6674), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((6804, 6823), 'keras.layers.Dense', 'Dense', (['net_list[-1]'], {}), '(net_list[-1])\n', (6809, 6823), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((6931, 6944), 'keras.optimizers.RMSprop', 'RMSprop', (['(0.01)'], {}), '(0.01)\n', (6938, 6944), False, 'from keras.optimizers import SGD, RMSprop, Adamax, Adam, Adagrad\n'), ((6731, 6739), 'keras.layers.Dense', 'Dense', (['i'], {}), '(i)\n', (6736, 6739), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((6763, 6784), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (6773, 6784), False, 'from keras.layers import Dense, Dropout, Activation\n')] |
import os
import sys
import gc
current_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(root_path)
import time
import json
import random
import math
from PIL import Image
import signal
import threading
import multiprocessing
import numpy as np
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
import torch
from torch.utils.data.dataloader import DataLoader
from torchvision.utils import save_image
from wolf.data import load_datasets, get_batch, preprocess, postprocess
from wolf import WolfModel
from experiments.options import parse_synthesize_args
from experiments.distributed import ErrorHandler
def setup(args):
def check_dataset():
if dataset == 'cifar10':
assert image_size == 32, 'CIFAR-10 expected image size 32 but got {}'.format(image_size)
elif dataset.startswith('lsun'):
assert image_size in [128, 256]
elif dataset == 'celeba':
assert image_size in [256, 512]
elif dataset == 'imagenet':
assert image_size in [64, 128, 256]
dataset = args.dataset
if args.category is not None:
dataset = dataset + '_' + args.category
image_size = args.image_size
check_dataset()
num_class = 10 if dataset == 'cifar10' else None
nc = 3
args.nx = image_size ** 2 * nc
n_bits = args.n_bits
args.n_bins = 2. ** n_bits
args.test_k = 5
model_path = args.model_path
result_path = os.path.join(model_path, 'synthesis')
args.result_path = result_path
if not os.path.exists(result_path):
os.makedirs(result_path)
data_path = args.data_path
args.cuda = torch.cuda.is_available()
random_seed = args.seed
print('random seed: {}'.format(random_seed))
if random_seed is not None:
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
if args.cuda:
torch.cuda.manual_seed(random_seed)
device = torch.device('cuda', 0) if args.cuda else torch.device('cpu')
if args.cuda:
torch.cuda.set_device(device)
torch.backends.cudnn.benchmark = True
train_data, val_data = load_datasets(dataset, image_size, data_path=data_path)
args.device = device
wolf = WolfModel.load(model_path, device=device)
wolf.eval()
return args, wolf, (train_data, val_data, num_class)
def sample(args, wolf):
print('sampling')
wolf.eval()
nsamples = args.nsamples
n = 64 if args.image_size > 128 else 256
tau = args.tau
image_size = (3, args.image_size, args.image_size)
nums = 0
nnans = 0
images = []
make_grid = args.make_grid
if make_grid:
result_path = args.result_path
else:
result_path = os.path.join(args.result_path, str(tau))
if not os.path.exists(result_path):
os.makedirs(result_path)
start_time = time.time()
while nums < nsamples:
imgs = wolf.synthesize(n, image_size, tau=tau, n_bits=args.n_bits, device=args.device)
mask = torch.isnan(imgs).view(n, -1).any(dim=1)
nnans += mask.sum().item()
imgs = imgs[torch.logical_not(mask)].cpu()
if make_grid:
images.append(imgs)
else:
for i in range(imgs.size(0)):
img = imgs[i]
img = img.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
image_file = 'sample{}.t{:.1f}.png'.format(i + nums, tau)
im = Image.fromarray(img)
im.save(os.path.join(result_path, image_file))
nums += n - mask.sum().item()
print(nums, nnans)
if make_grid:
imgs = torch.cat(images, dim=0)[:nsamples]
nrow = int(math.sqrt(nsamples))
image_file = 'sample.t{:.1f}.png'.format(tau)
save_image(imgs, os.path.join(result_path, image_file), nrow=nrow)
print('time: {:.1f}s'.format(time.time() - start_time))
def reconstruct(args, data, wolf):
print('reconstruct')
wolf.eval()
batch = 16
nsamples = 15
index = np.arange(len(data))
np.random.shuffle(index)
img, y = get_batch(data, index[:batch])
img = img.to(args.device)
y = y.to(args.device)
image_size = (3, args.image_size, args.image_size)
_, epsilon = wolf.encode(img, y=y, n_bits=args.n_bits, nsamples=nsamples, random=True)
epsilon = epsilon.view(batch * nsamples, *image_size)
z = wolf.encode_global(img, y=y, n_bits=args.n_bits, nsamples=nsamples, random=True)
z = z.view(batch * nsamples, z.size(2))
# [batch, nsamples, c, h, w]
img_recon = wolf.decode(epsilon, z=z, n_bits=args.n_bits).view(batch, nsamples, *image_size)
# [batch, 1, c, h, w]
img = postprocess(preprocess(img, args.n_bits), args.n_bits).unsqueeze(1)
# [batch, nsamples + 1, c, h, w] -> [batch*(nsamples + 1), c, h, w]
comparison = torch.cat([img, img_recon], dim=1).view(-1, *image_size).cpu()
image_file = 'reconstruct.png'
save_image(comparison, os.path.join(args.result_path, image_file), nrow=nsamples + 1)
def _interpolate(args, data, index, wolf, clabel):
print('interpolate: {}, #{}'.format(clabel, len(index)))
wolf.eval()
batch = 64
np.random.shuffle(index)
img0, y0 = get_batch(data, index[:batch])
img0 = img0.to(args.device)
y0 = y0.to(args.device)
img1, y1 = get_batch(data, index[batch:2 * batch])
img1 = img1.to(args.device)
y1 = y1.to(args.device)
image_size = (3, args.image_size, args.image_size)
z0, epsilon0 = wolf.encode(img0, y=y0, n_bits=args.n_bits, random=False)
z1, epsilon1 = wolf.encode(img1, y=y1, n_bits=args.n_bits, random=False)
alphas = [x * 0.1 for x in range(11)]
# [1, time, 1, 1, 1]
betas = torch.arange(11, device=args.device).float().view(1, 11, 1, 1, 1) * 0.1
# [batch, time, dim]
z0 = z0.expand(-1, betas.size(1), -1)
z1 = z1.expand(-1, betas.size(1), -1)
imgs = []
for alpha in alphas:
# [batch, time, dim]
z = z0 * (1.0 - alpha) + z1 * alpha
# [batch, time, c, h, w]
epsilon = epsilon0 * (1.0 - betas) + epsilon1 * betas
# [batch * time, *]
z = z.view(-1, z.size(2))
epsilon = epsilon.view(-1, *image_size)
# [batch, time, c, h, w]
img = wolf.decode(epsilon, z=z, n_bits=args.n_bits).view(batch, -1, *image_size)
imgs.append(img)
img = torch.stack(imgs, dim=1).view(-1, *image_size).cpu()
image_file = 'interpolate{}.png'.format(clabel)
save_image(img, os.path.join(args.result_path, image_file), nrow=11)
def interpolate(args, data, wolf, num_class):
index = np.arange(len(data))
_interpolate(args, data, index, wolf, '')
if num_class is not None:
index = np.arange(len(data))
_, y = get_batch(data, index)
index = torch.from_numpy(index)
for label in range(num_class):
mask = y.eq(label)
idx = index[mask].numpy()
_interpolate(args, data, idx, wolf, label)
def _switch(args, data, index, wolf, clabel):
print('switch: {}, #{}'.format(clabel, len(index)))
wolf.eval()
batch = 64
np.random.shuffle(index)
for run in range(5):
img0, y0 = get_batch(data, index[:batch])
img0 = img0.to(args.device)
y0 = y0.to(args.device)
img1, y1 = get_batch(data, index[(run + 1) * batch:(run + 2) * batch])
img1 = img1.to(args.device)
y1 = y1.to(args.device)
image_size = (3, args.image_size, args.image_size)
z0, epsilon0 = wolf.encode(img0, y=y0, n_bits=args.n_bits, random=False)
z1, epsilon1 = wolf.encode(img1, y=y1, n_bits=args.n_bits, random=False)
alphas = torch.arange(2, device=args.device).float().view(1, 2, 1)
# [1, time, 1, 1, 1]
betas = [0, 1]
# [batch, time, dim]
epsilon0 = epsilon0.expand(-1, alphas.size(1), *image_size)
epsilon1 = epsilon1.expand(-1, alphas.size(1), *image_size)
imgs = []
for beta in betas:
# [batch, time, c, h, w]
epsilon = epsilon0 * (1.0 - beta) + epsilon1 * beta
# [batch, time, dim]
z = z0 * (1.0 - alphas) + z1 * alphas if beta == 0 else z0 * alphas + z1 * (1.0 - alphas)
# [batch * time, *]
z = z.view(-1, z.size(2))
epsilon = epsilon.view(-1, *image_size)
# [batch, time, c, h, w]
img = wolf.decode(epsilon, z=z, n_bits=args.n_bits).view(batch, -1, *image_size)
imgs.append(img)
nn = int(math.sqrt(batch))
# [batch, 2, 2, c, h, w]
img = torch.stack(imgs, dim=1)
# [nn, nn, 2, 2, c, h, w] -> [nn, 2, nn, 2, c, h, w]
img = img.view(nn, nn, 2, 2, *image_size).transpose(1, 2)
img = img.contiguous().view(-1, *image_size).cpu()
image_file = 'switch{}.png'.format(clabel + '-' + str(run))
save_image(img, os.path.join(args.result_path, image_file), nrow=2 * nn)
def switch(args, data, wolf, num_class):
index = np.arange(len(data))
_switch(args, data, index, wolf, 'g')
if num_class is not None:
index = np.arange(len(data))
_, y = get_batch(data, index)
index = torch.from_numpy(index)
for label in range(num_class):
mask = y.eq(label)
idx = index[mask].numpy()
_switch(args, data, idx, wolf, label)
def classify(args, train_data, test_data, wolf):
probe = args.probe
wolf.eval()
print('encoding')
train_features, train_label = encode(args, train_data, wolf)
test_features, test_label = encode(args, test_data, wolf)
print('classifying')
mp = multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
processes = []
for key in train_features:
x_train = train_features[key]
y_train = train_label
x_test = test_features[key]
y_test = test_label
process = mp.Process(target=run_classifier,
args=(probe, key, x_train, y_train, x_test, y_test, error_queue),
daemon=False)
process.start()
error_handler.add_child(process.pid)
processes.append(process)
for process in processes:
process.join()
def run_classifier(probe, key, x_train, y_train, x_test, y_test, error_queue):
if probe == 'svm-rbf':
clf = SVC(kernel='rbf')
elif probe == 'svm-linear':
clf = SVC(kernel='linear')
elif probe == 'logistic':
clf = LogisticRegression(max_iter=1000, n_jobs=1)
else:
raise ValueError('unknown probe: {}'.format(probe))
try:
start = time.time()
clf.fit(x_train.numpy(), y_train.numpy())
acc = clf.score(x_test.numpy(), y_test.numpy()) * 100
gc.collect()
print("Dimensions on {}: {}, {}".format(key, tuple(x_train.size()), tuple(x_test.size())))
print("Accuracy on {} is {:.2f}, time: {:.2f}s".format(key, acc, time.time() - start))
print('-' * 25)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.rank, traceback.format_exc()))
def encode(args, data, wolf):
batch_size = 64 if args.image_size > 128 else 256
data_loader = DataLoader(data, batch_size=batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
device = args.device
zs = []
epsilons = []
imgs = []
labels = []
for img, y in data_loader:
imgs.append(img)
labels.append(y)
img = img.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
z, epsilon = wolf.encode(img, y=y, n_bits=args.n_bits, random=False)
if z is not None:
zs.append(z.squeeze(1).cpu())
epsilons.append(epsilon.squeeze(1).cpu())
imgs = torch.cat(imgs, dim=0)
imgs = imgs.view(imgs.size(0), -1)
epsilons = torch.cat(epsilons, dim=0)
epsilons = epsilons.view(epsilons.size(0), -1)
labels = torch.cat(labels, dim=0)
features = {'img': imgs, 'epsilon': epsilons}
if len(zs) > 0:
zs = torch.cat(zs, dim=0)
features.update({'latent code': zs})
return features, labels
def main(args):
args, wolf, (train_data, val_data, num_class) = setup(args)
if args.mode == 'sample':
sample(args, wolf)
elif args.mode == 'reconstruct':
reconstruct(args, train_data, wolf)
elif args.mode == 'interpolate':
interpolate(args, train_data, wolf, num_class)
elif args.mode == 'switch':
switch(args, train_data, wolf, num_class)
elif args.mode == 'classify':
classify(args, train_data, val_data, wolf)
else:
raise ValueError('Unknown mode: {}'.format(args.mode))
if __name__ == "__main__":
args = parse_synthesize_args()
with torch.no_grad():
main(args)
| [
"numpy.random.seed",
"torch.cat",
"multiprocessing.get_context",
"gc.collect",
"torch.arange",
"torch.device",
"sklearn.svm.SVC",
"torch.no_grad",
"os.path.join",
"torch.isnan",
"sys.path.append",
"torch.logical_not",
"os.path.exists",
"experiments.distributed.ErrorHandler",
"random.seed... | [((164, 190), 'sys.path.append', 'sys.path.append', (['root_path'], {}), '(root_path)\n', (179, 190), False, 'import sys\n'), ((63, 89), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (79, 89), False, 'import os\n'), ((1543, 1580), 'os.path.join', 'os.path.join', (['model_path', '"""synthesis"""'], {}), "(model_path, 'synthesis')\n", (1555, 1580), False, 'import os\n'), ((1736, 1761), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1759, 1761), False, 'import torch\n'), ((2253, 2308), 'wolf.data.load_datasets', 'load_datasets', (['dataset', 'image_size'], {'data_path': 'data_path'}), '(dataset, image_size, data_path=data_path)\n', (2266, 2308), False, 'from wolf.data import load_datasets, get_batch, preprocess, postprocess\n'), ((2345, 2386), 'wolf.WolfModel.load', 'WolfModel.load', (['model_path'], {'device': 'device'}), '(model_path, device=device)\n', (2359, 2386), False, 'from wolf import WolfModel\n'), ((2977, 2988), 'time.time', 'time.time', ([], {}), '()\n', (2986, 2988), False, 'import time\n'), ((4196, 4220), 'numpy.random.shuffle', 'np.random.shuffle', (['index'], {}), '(index)\n', (4213, 4220), True, 'import numpy as np\n'), ((4234, 4264), 'wolf.data.get_batch', 'get_batch', (['data', 'index[:batch]'], {}), '(data, index[:batch])\n', (4243, 4264), False, 'from wolf.data import load_datasets, get_batch, preprocess, postprocess\n'), ((5320, 5344), 'numpy.random.shuffle', 'np.random.shuffle', (['index'], {}), '(index)\n', (5337, 5344), True, 'import numpy as np\n'), ((5361, 5391), 'wolf.data.get_batch', 'get_batch', (['data', 'index[:batch]'], {}), '(data, index[:batch])\n', (5370, 5391), False, 'from wolf.data import load_datasets, get_batch, preprocess, postprocess\n'), ((5467, 5506), 'wolf.data.get_batch', 'get_batch', (['data', 'index[batch:2 * batch]'], {}), '(data, index[batch:2 * batch])\n', (5476, 5506), False, 'from wolf.data import load_datasets, get_batch, preprocess, postprocess\n'), ((7265, 7289), 'numpy.random.shuffle', 'np.random.shuffle', (['index'], {}), '(index)\n', (7282, 7289), True, 'import numpy as np\n'), ((9797, 9833), 'multiprocessing.get_context', 'multiprocessing.get_context', (['"""spawn"""'], {}), "('spawn')\n", (9824, 9833), False, 'import multiprocessing\n'), ((9956, 9981), 'experiments.distributed.ErrorHandler', 'ErrorHandler', (['error_queue'], {}), '(error_queue)\n', (9968, 9981), False, 'from experiments.distributed import ErrorHandler\n'), ((11629, 11731), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['data'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'args.workers', 'pin_memory': '(True)'}), '(data, batch_size=batch_size, shuffle=False, num_workers=args.\n workers, pin_memory=True)\n', (11639, 11731), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((12224, 12246), 'torch.cat', 'torch.cat', (['imgs'], {'dim': '(0)'}), '(imgs, dim=0)\n', (12233, 12246), False, 'import torch\n'), ((12301, 12327), 'torch.cat', 'torch.cat', (['epsilons'], {'dim': '(0)'}), '(epsilons, dim=0)\n', (12310, 12327), False, 'import torch\n'), ((12392, 12416), 'torch.cat', 'torch.cat', (['labels'], {'dim': '(0)'}), '(labels, dim=0)\n', (12401, 12416), False, 'import torch\n'), ((13186, 13209), 'experiments.options.parse_synthesize_args', 'parse_synthesize_args', ([], {}), '()\n', (13207, 13209), False, 'from experiments.options import parse_synthesize_args\n'), ((135, 161), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (151, 161), False, 'import os\n'), ((1627, 1654), 'os.path.exists', 'os.path.exists', (['result_path'], {}), '(result_path)\n', (1641, 1654), False, 'import os\n'), ((1664, 1688), 'os.makedirs', 'os.makedirs', (['result_path'], {}), '(result_path)\n', (1675, 1688), False, 'import os\n'), ((1880, 1904), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (1891, 1904), False, 'import random\n'), ((1913, 1940), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (1927, 1940), True, 'import numpy as np\n'), ((1949, 1979), 'torch.manual_seed', 'torch.manual_seed', (['random_seed'], {}), '(random_seed)\n', (1966, 1979), False, 'import torch\n'), ((2064, 2087), 'torch.device', 'torch.device', (['"""cuda"""', '(0)'], {}), "('cuda', 0)\n", (2076, 2087), False, 'import torch\n'), ((2106, 2125), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2118, 2125), False, 'import torch\n'), ((2152, 2181), 'torch.cuda.set_device', 'torch.cuda.set_device', (['device'], {}), '(device)\n', (2173, 2181), False, 'import torch\n'), ((5108, 5150), 'os.path.join', 'os.path.join', (['args.result_path', 'image_file'], {}), '(args.result_path, image_file)\n', (5120, 5150), False, 'import os\n'), ((6638, 6680), 'os.path.join', 'os.path.join', (['args.result_path', 'image_file'], {}), '(args.result_path, image_file)\n', (6650, 6680), False, 'import os\n'), ((6900, 6922), 'wolf.data.get_batch', 'get_batch', (['data', 'index'], {}), '(data, index)\n', (6909, 6922), False, 'from wolf.data import load_datasets, get_batch, preprocess, postprocess\n'), ((6939, 6962), 'torch.from_numpy', 'torch.from_numpy', (['index'], {}), '(index)\n', (6955, 6962), False, 'import torch\n'), ((7335, 7365), 'wolf.data.get_batch', 'get_batch', (['data', 'index[:batch]'], {}), '(data, index[:batch])\n', (7344, 7365), False, 'from wolf.data import load_datasets, get_batch, preprocess, postprocess\n'), ((7453, 7512), 'wolf.data.get_batch', 'get_batch', (['data', 'index[(run + 1) * batch:(run + 2) * batch]'], {}), '(data, index[(run + 1) * batch:(run + 2) * batch])\n', (7462, 7512), False, 'from wolf.data import load_datasets, get_batch, preprocess, postprocess\n'), ((8742, 8766), 'torch.stack', 'torch.stack', (['imgs'], {'dim': '(1)'}), '(imgs, dim=1)\n', (8753, 8766), False, 'import torch\n'), ((9302, 9324), 'wolf.data.get_batch', 'get_batch', (['data', 'index'], {}), '(data, index)\n', (9311, 9324), False, 'from wolf.data import load_datasets, get_batch, preprocess, postprocess\n'), ((9341, 9364), 'torch.from_numpy', 'torch.from_numpy', (['index'], {}), '(index)\n', (9357, 9364), False, 'import torch\n'), ((10634, 10651), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""'}), "(kernel='rbf')\n", (10637, 10651), False, 'from sklearn.svm import SVC\n'), ((10903, 10914), 'time.time', 'time.time', ([], {}), '()\n', (10912, 10914), False, 'import time\n'), ((11035, 11047), 'gc.collect', 'gc.collect', ([], {}), '()\n', (11045, 11047), False, 'import gc\n'), ((12500, 12520), 'torch.cat', 'torch.cat', (['zs'], {'dim': '(0)'}), '(zs, dim=0)\n', (12509, 12520), False, 'import torch\n'), ((13219, 13234), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13232, 13234), False, 'import torch\n'), ((2014, 2049), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['random_seed'], {}), '(random_seed)\n', (2036, 2049), False, 'import torch\n'), ((2893, 2920), 'os.path.exists', 'os.path.exists', (['result_path'], {}), '(result_path)\n', (2907, 2920), False, 'import os\n'), ((2934, 2958), 'os.makedirs', 'os.makedirs', (['result_path'], {}), '(result_path)\n', (2945, 2958), False, 'import os\n'), ((3781, 3805), 'torch.cat', 'torch.cat', (['images'], {'dim': '(0)'}), '(images, dim=0)\n', (3790, 3805), False, 'import torch\n'), ((3836, 3855), 'math.sqrt', 'math.sqrt', (['nsamples'], {}), '(nsamples)\n', (3845, 3855), False, 'import math\n'), ((3936, 3973), 'os.path.join', 'os.path.join', (['result_path', 'image_file'], {}), '(result_path, image_file)\n', (3948, 3973), False, 'import os\n'), ((8677, 8693), 'math.sqrt', 'math.sqrt', (['batch'], {}), '(batch)\n', (8686, 8693), False, 'import math\n'), ((9045, 9087), 'os.path.join', 'os.path.join', (['args.result_path', 'image_file'], {}), '(args.result_path, image_file)\n', (9057, 9087), False, 'import os\n'), ((10698, 10718), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (10701, 10718), False, 'from sklearn.svm import SVC\n'), ((3597, 3617), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (3612, 3617), False, 'from PIL import Image\n'), ((4020, 4031), 'time.time', 'time.time', ([], {}), '()\n', (4029, 4031), False, 'import time\n'), ((4837, 4865), 'wolf.data.preprocess', 'preprocess', (['img', 'args.n_bits'], {}), '(img, args.n_bits)\n', (4847, 4865), False, 'from wolf.data import load_datasets, get_batch, preprocess, postprocess\n'), ((10763, 10806), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'max_iter': '(1000)', 'n_jobs': '(1)'}), '(max_iter=1000, n_jobs=1)\n', (10781, 10806), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3222, 3245), 'torch.logical_not', 'torch.logical_not', (['mask'], {}), '(mask)\n', (3239, 3245), False, 'import torch\n'), ((3642, 3679), 'os.path.join', 'os.path.join', (['result_path', 'image_file'], {}), '(result_path, image_file)\n', (3654, 3679), False, 'import os\n'), ((4983, 5017), 'torch.cat', 'torch.cat', (['[img, img_recon]'], {'dim': '(1)'}), '([img, img_recon], dim=1)\n', (4992, 5017), False, 'import torch\n'), ((6513, 6537), 'torch.stack', 'torch.stack', (['imgs'], {'dim': '(1)'}), '(imgs, dim=1)\n', (6524, 6537), False, 'import torch\n'), ((11220, 11231), 'time.time', 'time.time', ([], {}), '()\n', (11229, 11231), False, 'import time\n'), ((11500, 11522), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (11520, 11522), False, 'import traceback\n'), ((3126, 3143), 'torch.isnan', 'torch.isnan', (['imgs'], {}), '(imgs)\n', (3137, 3143), False, 'import torch\n'), ((5857, 5893), 'torch.arange', 'torch.arange', (['(11)'], {'device': 'args.device'}), '(11, device=args.device)\n', (5869, 5893), False, 'import torch\n'), ((7821, 7856), 'torch.arange', 'torch.arange', (['(2)'], {'device': 'args.device'}), '(2, device=args.device)\n', (7833, 7856), False, 'import torch\n')] |
import json
import boto3
import logging
import io
import numpy as np
import pandas as pd
from utils import create_response_obj
# scaler = load(open('scaler.pkl', 'rb'))
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def post(event, context):
if event['body'] is not None:
body = json.loads(event['body'])
params = body['input']
# The last range parameter
range = body['range']
start = range['start']
end = range['end']
step = range['step']
endpoint_name = body['modelName']
else:
return create_response_obj(400, {
'error': 'invalid message body'
})
logger.info('params: {}'.format(params))
params = [float(i) for i in params]
all_data = []
xaxis = []
# Build CSV with these values
# Input#1, Input#2, .....#Input 19, Input#20
# 1, 2, 3..., 19, 1.1
# 1, 2, 3..., 19, 1.2
# 1, 2, 3..., 19, 1.3
# ....
# 1, 2, 3..., 19, 2.8
# 1, 2, 3..., 19, 2.9
# 1, 2, 3..., 19, 3.0
for i in np.arange(start, end, step):
all_data.append(params + [i])
xaxis.append(str(i))
logger.info('xaxis: {}'.format(xaxis))
logger.info('all_data[0]: {}'.format(all_data[0]))
logger.info(f'all_data: {all_data}')
df = pd.DataFrame(np.array(all_data))
test_file = io.StringIO()
logger.info(df.head())
df.to_csv(test_file, header=None, index=None)
try:
client = boto3.client('sagemaker-runtime')
response = client.invoke_endpoint(
EndpointName=endpoint_name,
Body=test_file.getvalue(),
ContentType='text/csv',
Accept='Accept'
)
preds_string = response['Body'].read().decode('ascii').split()
preds = list(map(lambda x: float(x), preds_string))
return create_response_obj(200, {
'x_axis': xaxis,
'predictions': preds,
})
except client.exceptions.ModelError as e:
logger.error(repr(e))
return create_response_obj(502, {
'error': repr(e),
})
| [
"io.StringIO",
"json.loads",
"boto3.client",
"numpy.arange",
"numpy.array",
"utils.create_response_obj",
"logging.getLogger"
] | [((181, 200), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (198, 200), False, 'import logging\n'), ((1051, 1078), 'numpy.arange', 'np.arange', (['start', 'end', 'step'], {}), '(start, end, step)\n', (1060, 1078), True, 'import numpy as np\n'), ((1346, 1359), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (1357, 1359), False, 'import io\n'), ((308, 333), 'json.loads', 'json.loads', (["event['body']"], {}), "(event['body'])\n", (318, 333), False, 'import json\n'), ((585, 644), 'utils.create_response_obj', 'create_response_obj', (['(400)', "{'error': 'invalid message body'}"], {}), "(400, {'error': 'invalid message body'})\n", (604, 644), False, 'from utils import create_response_obj\n'), ((1310, 1328), 'numpy.array', 'np.array', (['all_data'], {}), '(all_data)\n', (1318, 1328), True, 'import numpy as np\n'), ((1464, 1497), 'boto3.client', 'boto3.client', (['"""sagemaker-runtime"""'], {}), "('sagemaker-runtime')\n", (1476, 1497), False, 'import boto3\n'), ((1842, 1907), 'utils.create_response_obj', 'create_response_obj', (['(200)', "{'x_axis': xaxis, 'predictions': preds}"], {}), "(200, {'x_axis': xaxis, 'predictions': preds})\n", (1861, 1907), False, 'from utils import create_response_obj\n')] |
import csv
from ast import literal_eval
from collections import defaultdict
import click
import numpy as np
import torch
from tqdm import tqdm
from transformers import BertForTokenClassification, AlbertForTokenClassification, ElectraForTokenClassification, \
RobertaForTokenClassification, XLNetForTokenClassification, MobileBertForTokenClassification, \
SqueezeBertForTokenClassification, SqueezeBertTokenizerFast, XLNetTokenizerFast, MobileBertTokenizerFast, \
RobertaTokenizerFast, ElectraTokenizerFast, AlbertTokenizerFast, BertTokenizerFast
from dataset import DatasetModule
from utils import get_api_and_experiment, f1_semeval, fill_holes_in_row, remove_ones_in_row, fill_holes_in_row_three
from model import LitModule
@click.command()
@click.option('--experiment', required=True, type=str, help='For example ce132011516346c99185d139fb23c70c')
@click.option('--weights-path', required=True, type=str, help='For example epoch=25-val_mae=8.2030.ckpt')
def validate(experiment, weights_path):
_, experiment = get_api_and_experiment(experiment)
model_param = experiment.get_parameters_summary("model")['valueCurrent']
# length = int(experiment.get_parameters_summary("length")['valueCurrent'])
length=512
model_data = {
'bert': [BertForTokenClassification, BertTokenizerFast, 'bert-base-uncased'],
'albert': [AlbertForTokenClassification, AlbertTokenizerFast, 'albert-base-v2'],
'electra': [ElectraForTokenClassification, ElectraTokenizerFast, 'google/electra-small-discriminator'],
'roberta': [RobertaForTokenClassification, RobertaTokenizerFast, 'roberta-base'],
'xlnet': [XLNetForTokenClassification, XLNetTokenizerFast, 'xlnet-base-cased'],
'mobilebert': [MobileBertForTokenClassification, MobileBertTokenizerFast, 'google/mobilebert-uncased'],
'squeezebert': [SqueezeBertForTokenClassification, SqueezeBertTokenizerFast,
'squeezebert/squeezebert-mnli-headless']
}
model_class, tokenizer_class, model_name = model_data[model_param]
tokenizer = tokenizer_class.from_pretrained(model_name, do_lower_case=True)
model_backbone = model_class.from_pretrained(model_name, num_labels=2, output_attentions=False,
output_hidden_states=False)
data_module = DatasetModule(data_dir='data/spans', tokenizer=tokenizer, batch_size=4, length=length)
data_module.prepare_data()
# experiment.download_model(name=weights_path, output_path='comet-ml/', expand=True)
model = LitModule.load_from_checkpoint(weights_path, model=model_backbone, tokenizer=tokenizer, lr=4.7e-5,
freeze=False)
model.eval()
model.cuda()
result_spans = defaultdict(lambda: defaultdict(list))
for batch in tqdm(data_module.val_dataloader()):
with torch.no_grad():
outputs = model(batch['input_ids'].cuda(), token_type_ids=None,
attention_mask=batch['attention_mask'].cuda(),
labels=batch['labels'].cuda())
logits = outputs.logits.detach().cpu().numpy()
y_pred = np.argmax(logits, axis=-1).astype(int)
y_true = batch['labels'].cpu().numpy().astype(int)
pad_span = batch['pad_span'].cpu().numpy().astype(int)
offset_mapping = batch['offset_mapping'].cpu().numpy().astype(int)
sentence_id = batch['sentence_id'].cpu().numpy().astype(int)
sentence_offset = batch['offset'].cpu().numpy().astype(int)
for i in range(len(y_true)):
true_spans = list(set(pad_span[i]) - {-1}) # remove padding
predicted_offsets = offset_mapping[i][y_pred[i].astype(bool)]
predicted_spans = [i for offset in predicted_offsets for i in range(offset[0], offset[1])]
result_spans[sentence_id[i]]['true'].extend(list(np.array(true_spans) + sentence_offset[i]))
result_spans[sentence_id[i]]['pred'].extend(list(np.array(predicted_spans) + sentence_offset[i]))
f1_semeval_avg = np.array([f1_semeval(result_spans[sentence_id]['true'], result_spans[sentence_id]['pred'])
for sentence_id in result_spans])
print(np.mean(f1_semeval_avg))
f1_semeval_avg = np.array([f1_semeval(result_spans[sentence_id]['true'],
literal_eval(fill_holes_in_row(str(result_spans[sentence_id]['pred']))))
for sentence_id in result_spans])
print(np.mean(f1_semeval_avg))
f1_semeval_avg = np.array([f1_semeval(result_spans[sentence_id]['true'],
literal_eval(fill_holes_in_row_three(str(result_spans[sentence_id]['pred']))))
for sentence_id in result_spans])
print(np.mean(f1_semeval_avg))
f1_semeval_avg = np.array([f1_semeval(result_spans[sentence_id]['true'],
literal_eval(remove_ones_in_row(str(result_spans[sentence_id]['pred']))))
for sentence_id in result_spans])
print(np.mean(f1_semeval_avg))
f1_semeval_avg = np.array([f1_semeval(result_spans[sentence_id]['true'], literal_eval(
remove_ones_in_row(fill_holes_in_row(str(result_spans[sentence_id]['pred'])))))
for sentence_id in result_spans])
print(np.mean(f1_semeval_avg))
#
# predicted_df = model.predict_dataframe(data_module.test_df, length)
# predicted_df.to_csv('spans-pred.txt', header=False, sep='\t', quoting=csv.QUOTE_NONE, escapechar='\n')
if __name__ == '__main__':
validate()
| [
"utils.get_api_and_experiment",
"utils.f1_semeval",
"numpy.argmax",
"dataset.DatasetModule",
"model.LitModule.load_from_checkpoint",
"click.option",
"click.command",
"collections.defaultdict",
"numpy.mean",
"numpy.array",
"torch.no_grad"
] | [((742, 757), 'click.command', 'click.command', ([], {}), '()\n', (755, 757), False, 'import click\n'), ((759, 870), 'click.option', 'click.option', (['"""--experiment"""'], {'required': '(True)', 'type': 'str', 'help': '"""For example ce132011516346c99185d139fb23c70c"""'}), "('--experiment', required=True, type=str, help=\n 'For example ce132011516346c99185d139fb23c70c')\n", (771, 870), False, 'import click\n'), ((867, 976), 'click.option', 'click.option', (['"""--weights-path"""'], {'required': '(True)', 'type': 'str', 'help': '"""For example epoch=25-val_mae=8.2030.ckpt"""'}), "('--weights-path', required=True, type=str, help=\n 'For example epoch=25-val_mae=8.2030.ckpt')\n", (879, 976), False, 'import click\n'), ((1032, 1066), 'utils.get_api_and_experiment', 'get_api_and_experiment', (['experiment'], {}), '(experiment)\n', (1054, 1066), False, 'from utils import get_api_and_experiment, f1_semeval, fill_holes_in_row, remove_ones_in_row, fill_holes_in_row_three\n'), ((2338, 2428), 'dataset.DatasetModule', 'DatasetModule', ([], {'data_dir': '"""data/spans"""', 'tokenizer': 'tokenizer', 'batch_size': '(4)', 'length': 'length'}), "(data_dir='data/spans', tokenizer=tokenizer, batch_size=4,\n length=length)\n", (2351, 2428), False, 'from dataset import DatasetModule\n'), ((2557, 2674), 'model.LitModule.load_from_checkpoint', 'LitModule.load_from_checkpoint', (['weights_path'], {'model': 'model_backbone', 'tokenizer': 'tokenizer', 'lr': '(4.7e-05)', 'freeze': '(False)'}), '(weights_path, model=model_backbone,\n tokenizer=tokenizer, lr=4.7e-05, freeze=False)\n', (2587, 2674), False, 'from model import LitModule\n'), ((4288, 4311), 'numpy.mean', 'np.mean', (['f1_semeval_avg'], {}), '(f1_semeval_avg)\n', (4295, 4311), True, 'import numpy as np\n'), ((4580, 4603), 'numpy.mean', 'np.mean', (['f1_semeval_avg'], {}), '(f1_semeval_avg)\n', (4587, 4603), True, 'import numpy as np\n'), ((4878, 4901), 'numpy.mean', 'np.mean', (['f1_semeval_avg'], {}), '(f1_semeval_avg)\n', (4885, 4901), True, 'import numpy as np\n'), ((5171, 5194), 'numpy.mean', 'np.mean', (['f1_semeval_avg'], {}), '(f1_semeval_avg)\n', (5178, 5194), True, 'import numpy as np\n'), ((5450, 5473), 'numpy.mean', 'np.mean', (['f1_semeval_avg'], {}), '(f1_semeval_avg)\n', (5457, 5473), True, 'import numpy as np\n'), ((2787, 2804), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2798, 2804), False, 'from collections import defaultdict\n'), ((2872, 2887), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2885, 2887), False, 'import torch\n'), ((4132, 4217), 'utils.f1_semeval', 'f1_semeval', (["result_spans[sentence_id]['true']", "result_spans[sentence_id]['pred']"], {}), "(result_spans[sentence_id]['true'], result_spans[sentence_id]['pred']\n )\n", (4142, 4217), False, 'from utils import get_api_and_experiment, f1_semeval, fill_holes_in_row, remove_ones_in_row, fill_holes_in_row_three\n'), ((3179, 3205), 'numpy.argmax', 'np.argmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (3188, 3205), True, 'import numpy as np\n'), ((3943, 3963), 'numpy.array', 'np.array', (['true_spans'], {}), '(true_spans)\n', (3951, 3963), True, 'import numpy as np\n'), ((4052, 4077), 'numpy.array', 'np.array', (['predicted_spans'], {}), '(predicted_spans)\n', (4060, 4077), True, 'import numpy as np\n')] |
"""Multi classifier testing"""
__author__ = 'thor'
import csv
import itertools
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from datetime import datetime
from sklearn.pipeline import Pipeline
from sklearn.model_selection import StratifiedKFold
from sklearn import ensemble
from sklearn.feature_extraction import text
from sklearn import feature_extraction
from sklearn import feature_selection
from sklearn import preprocessing
from sklearn import decomposition
from sklearn import linear_model
from sklearn import metrics
from sklearn import naive_bayes
from sklearn import svm
from sklearn import tree
from ut.util.log import printProgress
from ut.daf.manip import reorder_columns_as
default_scorers = {
'accuracy': metrics.accuracy_score
}
default_score_aggreg = [
np.mean,
np.min
]
default_classifiers = [
svm.LinearSVC(random_state=0),
svm.SVC(random_state=0),
linear_model.LogisticRegression(),
tree.DecisionTreeClassifier(),
naive_bayes.BernoulliNB(),
naive_bayes.MultinomialNB(),
naive_bayes.GaussianNB(),
linear_model.SGDClassifier(),
linear_model.RidgeClassifier(),
ensemble.RandomForestClassifier(n_estimators=10)
]
plt.style.use('fivethirtyeight')
_PLT_LEGEND_OPTIONS = dict(loc="upper center",
bbox_to_anchor=(0.5, -0.15),
fancybox=True,
shadow=True,
ncol=3)
colors = [ii.strip() for ii in '#30a2da, #fc4f30, #e5ae38, #6d904f, #8b8b8b'.split(',')]
colors += ['#' + ii.strip() for ii in
'348ABD, A60628, 7A68A6, 467821,D55E00, CC79A7, 56B4E9, 009E73, F0E442, 0072B2'.split(',')]
markers = itertools.cycle(["o", "D"])
colors = itertools.cycle(colors)
def score_classifier(X, y, clf, nfeats=None,
scoring=default_scorers, score_aggreg=default_score_aggreg,
scale=None, decompose=None, select=None, decompose_params={},
nfolds=10, shuffle=True, random_fold_state=None,
include_train_stats=False):
"""
Tests the CLF classifier with NFOLDS of train/test splits, and scores the results using one or several score
functions (specified by SCORING), returning a pandas Series listing the aggregate(s) (specified by SCORE_AGGREG)
of the scores.
"""
# give scoring and score_aggreg elements some names
scoring = scoring or default_scorers
scoring = mk_scoring_dict(scoring)
score_aggreg = score_aggreg or default_score_aggreg
score_aggreg = mk_score_aggreg_dict(score_aggreg)
if nfeats is None:
nfeats = np.shape(X)[1]
# X = X[:, :nfeats]
stratified_k_fold = StratifiedKFold(y, n_folds=nfolds,
shuffle=shuffle,
random_state=random_fold_state)
score_info = list()
for train, test in stratified_k_fold:
d = dict()
X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]
if include_train_stats:
d['train_pts'] = np.shape(X_train)[0]
d['train_nfeats'] = np.shape(X_train)[1]
pipeline_steps = list()
if scale: # preprocessing.StandardScaler(), preprocessing.MinMaxScaler()
pipeline_steps.append(('scale', scale))
if decompose:
pipeline_steps.append(('decompose', decompose))
if select:
pipeline_steps.append(('select', feature_selection.SelectKBest(k=nfeats)))
else:
X = X[:, :nfeats]
pipeline_steps.append(('clf', clf))
pipeline = Pipeline(steps=pipeline_steps)
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_test)
for score_name, score_fun in scoring.items():
d[score_name] = score_fun(y_test, y_pred)
score_info.append(d)
# return score_info
score_info = pd.DataFrame(score_info)
score_result = pd.Series()
for score_aggreg_name, score_aggreg_fun in score_aggreg.items():
t = score_info.apply(score_aggreg_fun)
t.set_axis(axis=0,
labels=[mk_aggreg_score_name(score_aggreg_name, score_name) for score_name in t.index.values])
score_result = score_result.append(t)
return score_result
def test_classifiers(X, y,
scoring=default_scorers,
score_aggreg=default_score_aggreg,
n_features=7,
# an int will be transformed to a list (with different num of features) of given size
clfs=None,
nfolds=10,
scale=None,
decompose=None,
select=None,
decompose_params={},
print_progress=False,
score_to_plot=None
):
"""
tests and scores (given by SCORING and SCORE_AGGREG) several classifiers (given by clfs) with several number of
features, returning a pandas DataFrame of the results.
"""
scoring = scoring or default_scorers
score_aggreg = score_aggreg or default_score_aggreg
if isinstance(n_features, int): # if n_features is an int, it's the number of different feature set lens to try out
# ... so make this feature set len list
total_n_features = np.shape(X)[1]
n_features = list(range(1, total_n_features + 1, int(np.floor(total_n_features / n_features))))[:n_features]
y = np.asarray(y, dtype="|S6")
n_features = np.array(n_features)
if clfs is None:
clfs = default_classifiers
clfs = clfs_to_dict_clfs(clfs)
general_info_dict = dict()
if scale is not None and scale is not False: # preprocessing.StandardScaler(), preprocessing.MinMaxScaler()
if scale is True:
scale = preprocessing.StandardScaler()
general_info_dict['scale'] = get_name(scale)
if decompose is not None and decompose is not False:
if decompose is True:
decompose = decomposition.PCA(
**decompose_params) # PCA, KernelPCA, ProbabilisticPCA, RandomizedPCA, TruncatedSVD
general_info_dict['decompose'] = get_name(decompose)
clf_results = list()
for i_nfeats, nfeats in enumerate(n_features):
for i_clf, clf in enumerate(clfs):
clf_name = list(clf.keys())[0]
clf = clf[clf_name]
d = dict(general_info_dict, **{'model': clf_name, 'nfeats': nfeats})
if print_progress:
printProgress("{}: nfeats={}, nfolds={}".format(
clf_name,
n_features[i_nfeats],
nfolds))
# try:
start_time = datetime.now()
score_result = \
score_classifier(X,
y,
clf=clf,
nfeats=nfeats,
scoring=scoring,
score_aggreg=score_aggreg,
nfolds=nfolds,
scale=scale,
decompose=decompose,
select=select,
decompose_params=decompose_params)
d.update({'seconds': (datetime.now() - start_time).total_seconds()})
d.update(score_result.to_dict())
# except ValueError as e:
# raise e
# print("Error with: {} ({} features)".format(get_name(clf),
# n_features[i_nfeats]))
clf_results.append(d) # accumulate results
clf_results = pd.DataFrame(clf_results)
if score_to_plot:
if score_to_plot is True:
score_to_plot = mk_aggreg_score_name(score_aggreg_name=list(mk_score_aggreg_dict(score_aggreg).keys())[0],
score_name=list(mk_scoring_dict(scoring).keys())[0])
plot_score(clf_results, score_to_plot)
return reorder_columns_as(clf_results, ['model', 'nfeats', 'seconds'])
def clfs_to_dict_clfs(clfs):
for i, clf in enumerate(clfs):
if not isinstance(clf, dict):
clfs[i] = {get_name(clf): clf}
return clfs
def decompose_data(X, decompose, n_components=None, y=None, decompose_params={}):
if n_components is None:
n_components = np.shape(X)[1]
try:
decomposer = decompose(n_components=n_components, whiten=True, **decompose_params)
except TypeError:
print(("No whiten option in {}".format(decompose)))
decomposer = decompose(n_components=n_components, **decompose_params)
try:
if y is None:
decomposer.fit(X)
else:
decomposer.fit(X, y)
except ValueError:
decomposer = decompose(n_components=n_components - 1, **decompose_params)
if y is None:
decomposer.fit(X)
else:
decomposer.fit(X, y)
return decomposer.transform(X)
def plot_score(clf_results, score_to_plot, parameter='nfeats', **kwargs):
# defaults
kwargs = dict(dict(figsize=(7, 5)), **kwargs)
t = clf_results[['model', parameter, score_to_plot]] \
.set_index(['model', parameter]).unstack('model')[score_to_plot]
ax = t.plot(**kwargs)
plt.xlabel(parameter)
plt.ylabel(score_to_plot)
plt.title("{} vs {}".format(score_to_plot, parameter))
return ax
def get_name(obj):
if hasattr(obj, '__name__'):
return obj.__name__
else:
return type(obj).__name__
def mk_scoring_dict(scoring):
if not isinstance(scoring, dict):
if not hasattr(scoring, '__iter__'):
scoring = [scoring]
scoring = {x.__name__: x for x in scoring}
return scoring
def mk_score_aggreg_dict(score_aggreg):
if not isinstance(score_aggreg, dict):
if not hasattr(score_aggreg, '__iter__'):
score_aggreg = {'': score_aggreg}
else:
score_aggreg = {x.__name__: x for x in score_aggreg}
return score_aggreg
def mk_aggreg_score_name(score_aggreg_name, score_name):
if score_aggreg_name:
return score_aggreg_name + '_' + score_name
else:
return score_name
def __main__():
from sklearn.svm import LinearSVC
from sklearn.datasets import load_iris
iris = load_iris()
X, y = iris.data, iris.target
clf_results = test_classifiers(X, y,
scoring=metrics.accuracy_score,
n_features=list(range(1, np.shape(X)[1])),
clfs=None,
print_progress=False,
score_to_plot=None)
print(clf_results)
| [
"sklearn.datasets.load_iris",
"sklearn.preprocessing.StandardScaler",
"numpy.floor",
"sklearn.tree.DecisionTreeClassifier",
"numpy.shape",
"matplotlib.pyplot.style.use",
"sklearn.svm.SVC",
"itertools.cycle",
"pandas.DataFrame",
"sklearn.linear_model.SGDClassifier",
"ut.daf.manip.reorder_columns_... | [((1222, 1254), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (1235, 1254), True, 'import matplotlib.pyplot as plt\n'), ((1718, 1745), 'itertools.cycle', 'itertools.cycle', (["['o', 'D']"], {}), "(['o', 'D'])\n", (1733, 1745), False, 'import itertools\n'), ((1755, 1778), 'itertools.cycle', 'itertools.cycle', (['colors'], {}), '(colors)\n', (1770, 1778), False, 'import itertools\n'), ((868, 897), 'sklearn.svm.LinearSVC', 'svm.LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (881, 897), False, 'from sklearn import svm\n'), ((903, 926), 'sklearn.svm.SVC', 'svm.SVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (910, 926), False, 'from sklearn import svm\n'), ((932, 965), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {}), '()\n', (963, 965), False, 'from sklearn import linear_model\n'), ((971, 1000), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (998, 1000), False, 'from sklearn import tree\n'), ((1006, 1031), 'sklearn.naive_bayes.BernoulliNB', 'naive_bayes.BernoulliNB', ([], {}), '()\n', (1029, 1031), False, 'from sklearn import naive_bayes\n'), ((1037, 1064), 'sklearn.naive_bayes.MultinomialNB', 'naive_bayes.MultinomialNB', ([], {}), '()\n', (1062, 1064), False, 'from sklearn import naive_bayes\n'), ((1070, 1094), 'sklearn.naive_bayes.GaussianNB', 'naive_bayes.GaussianNB', ([], {}), '()\n', (1092, 1094), False, 'from sklearn import naive_bayes\n'), ((1100, 1128), 'sklearn.linear_model.SGDClassifier', 'linear_model.SGDClassifier', ([], {}), '()\n', (1126, 1128), False, 'from sklearn import linear_model\n'), ((1134, 1164), 'sklearn.linear_model.RidgeClassifier', 'linear_model.RidgeClassifier', ([], {}), '()\n', (1162, 1164), False, 'from sklearn import linear_model\n'), ((1170, 1218), 'sklearn.ensemble.RandomForestClassifier', 'ensemble.RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (1201, 1218), False, 'from sklearn import ensemble\n'), ((2726, 2814), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['y'], {'n_folds': 'nfolds', 'shuffle': 'shuffle', 'random_state': 'random_fold_state'}), '(y, n_folds=nfolds, shuffle=shuffle, random_state=\n random_fold_state)\n', (2741, 2814), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((3949, 3973), 'pandas.DataFrame', 'pd.DataFrame', (['score_info'], {}), '(score_info)\n', (3961, 3973), True, 'import pandas as pd\n'), ((3993, 4004), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (4002, 4004), True, 'import pandas as pd\n'), ((5548, 5574), 'numpy.asarray', 'np.asarray', (['y'], {'dtype': '"""|S6"""'}), "(y, dtype='|S6')\n", (5558, 5574), True, 'import numpy as np\n'), ((5592, 5612), 'numpy.array', 'np.array', (['n_features'], {}), '(n_features)\n', (5600, 5612), True, 'import numpy as np\n'), ((7791, 7816), 'pandas.DataFrame', 'pd.DataFrame', (['clf_results'], {}), '(clf_results)\n', (7803, 7816), True, 'import pandas as pd\n'), ((8153, 8216), 'ut.daf.manip.reorder_columns_as', 'reorder_columns_as', (['clf_results', "['model', 'nfeats', 'seconds']"], {}), "(clf_results, ['model', 'nfeats', 'seconds'])\n", (8171, 8216), False, 'from ut.daf.manip import reorder_columns_as\n'), ((9443, 9464), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['parameter'], {}), '(parameter)\n', (9453, 9464), True, 'import matplotlib.pyplot as plt\n'), ((9469, 9494), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['score_to_plot'], {}), '(score_to_plot)\n', (9479, 9494), True, 'import matplotlib.pyplot as plt\n'), ((10478, 10489), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (10487, 10489), False, 'from sklearn.datasets import load_iris\n'), ((3656, 3686), 'sklearn.pipeline.Pipeline', 'Pipeline', ([], {'steps': 'pipeline_steps'}), '(steps=pipeline_steps)\n', (3664, 3686), False, 'from sklearn.pipeline import Pipeline\n'), ((2661, 2672), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (2669, 2672), True, 'import numpy as np\n'), ((5408, 5419), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (5416, 5419), True, 'import numpy as np\n'), ((5897, 5927), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (5925, 5927), False, 'from sklearn import preprocessing\n'), ((6092, 6129), 'sklearn.decomposition.PCA', 'decomposition.PCA', ([], {}), '(**decompose_params)\n', (6109, 6129), False, 'from sklearn import decomposition\n'), ((6791, 6805), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6803, 6805), False, 'from datetime import datetime\n'), ((8516, 8527), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (8524, 8527), True, 'import numpy as np\n'), ((3118, 3135), 'numpy.shape', 'np.shape', (['X_train'], {}), '(X_train)\n', (3126, 3135), True, 'import numpy as np\n'), ((3171, 3188), 'numpy.shape', 'np.shape', (['X_train'], {}), '(X_train)\n', (3179, 3188), True, 'import numpy as np\n'), ((3505, 3544), 'sklearn.feature_selection.SelectKBest', 'feature_selection.SelectKBest', ([], {'k': 'nfeats'}), '(k=nfeats)\n', (3534, 3544), False, 'from sklearn import feature_selection\n'), ((5484, 5523), 'numpy.floor', 'np.floor', (['(total_n_features / n_features)'], {}), '(total_n_features / n_features)\n', (5492, 5523), True, 'import numpy as np\n'), ((10693, 10704), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (10701, 10704), True, 'import numpy as np\n'), ((7405, 7419), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7417, 7419), False, 'from datetime import datetime\n')] |
import numpy
from trefoil.analysis.summary import summarize_areas_by_category, calculate_weighted_statistics
from trefoil.utilities.window import Window
# Days per month from Tim, starting with January. Useful for weighting statistics when rolling months up to year.
# Assumes 365 day calendar with no leap years
DAYS_PER_MONTH = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
MONTH_LABELS = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')
def extract_categorical_timeseries_by_area(values, areas, timestep_indicies, window=None):
"""
Extract a timeseries array for each category found within the values of variable, using the areas occupied by
each category within each pixel.
:param values: the values for a given variable from the netcdf file. Must be organized on only 3 dimensions: time, row, col
:param areas: the areas of each pixel to count toward total area of given category in each timestep
:param timestep_indicies: the indices over which to extract the time series from the values of variable
#:param row_offset: the offset from the starting row coordinate of values to the starting row coordinate of areas
#:param col_offset: the offset from the starting column coordinate of values to the starting column coordinate of areas
:param window: the subdomain coordinates of areas within values. Must
:return: a dictionary of the categorical values found, each with a full timeseries
"""
assert len(values.shape) == 3
if window is not None:
assert isinstance(window, Window)
results = dict()
num_timesteps = len(timestep_indicies)
for index in timestep_indicies:
if window is not None:
data = window.clip(values, index).ravel()
else:
data = values[index].ravel()
data = numpy.ma.masked_array(data, mask=areas.mask)
category_summary = summarize_areas_by_category(data.astype("i"), areas)
for category in category_summary:
if not category in results:
results[category] = numpy.zeros(num_timesteps)
results[category][index] = category_summary[category]
return results
def extract_statistics_timeseries_by_weight(values, weights, statistics, window=None):
"""
Extract weighted time series statistics,
:param values: the values for a given variable from the netcdf file. Must be organized on only 3 dimensions: time, row, col
:param weights: the weight of each pixel for the statistic
:param statistics: a tuple indicating the statistics to be calculated, e.g., ("MEAN", "STD"). Note: statistics
do not account for different weights between time periods (e.g., months of different durations).
:param window: the subdomain coordinates of areas within values.
:return: a dictionary of statistic name to time series array
"""
assert len(values.shape) == 3
if window is not None:
assert isinstance(window, Window)
results = dict()
for statistic in statistics:
results[statistic] = numpy.zeros(values.shape[0])
for index in xrange(values.shape[0]):
if window is not None:
data = window.clip(values, index).ravel()
else:
data = values[index].ravel()
data = numpy.ma.masked_array(data, mask=weights.mask)
statistics_results = calculate_weighted_statistics(data, weights, statistics)
for stat_index, statistic in enumerate(statistics):
results[statistic][index] = statistics_results[stat_index]
return results
def linear_regression(timesteps, values, full=False):
"""Perform linear regression using linear algebra operators
Note: does not account for missing data within time series.
:param timesteps: 1D array of timesteps to use for x value of linear regression
:param values: 3D array of data to use for y value of linear regression, assumes timestep is first axis
:param full: return full statistics or just slopes & intercepts. Default is False. If True, requires scipy.
:returns: (slopes, intercepts) or (slopes, intercepts, r-squared, p-value) if full is True
"""
# ideas from:
# http://stackoverflow.com/questions/20343500/efficient-1d-linear-regression-for-each-element-of-3d-numpy-array
# http://stackoverflow.com/questions/3054191/converting-numpy-lstsq-residual-value-to-r2
# p-value calculation derived from scipy: https://github.com/scipy/scipy/blob/master/scipy/stats/stats.py
assert len(values.shape) == 3
assert values.shape[0] == timesteps.shape[0]
shape = values.shape
y = values.reshape((shape[0], shape[1] * shape[2]))
fit, residuals = numpy.linalg.lstsq(numpy.c_[timesteps, numpy.ones_like(timesteps)], y)[:2]
slopes = fit[0].reshape((shape[1], shape[2]))
intercepts = fit[1].reshape((shape[1], shape[2]))
mask = None
if hasattr(values, 'mask'):
mask = values.mask[0]
slopes = numpy.ma.masked_array(slopes, mask=mask)
intercepts = numpy.ma.masked_array(intercepts, mask=mask)
if not full:
return slopes, intercepts
# T-distribution used for p-value requires scipy
from scipy.stats.distributions import t as t_dist
# Calculate R2 value
r2 = (1 - residuals / (y.shape[0] * y.var(axis=0)))
r = numpy.sqrt(r2)
r2 = r2.reshape((shape[1], shape[2]))
# Calculate p-value
tiny = 1.0e-20
df = timesteps.shape[0] - 2
t = r * numpy.sqrt(df / ((1.0 - r + tiny)*(1.0 + r + tiny)))
p = (2 * t_dist.sf(numpy.abs(t), df)).reshape(shape[1], shape[2])
if mask is not None:
r2 = numpy.ma.masked_array(r2, mask=mask)
p = numpy.ma.masked_array(p, mask=mask)
return slopes, intercepts, r2, p
| [
"numpy.ones_like",
"numpy.abs",
"numpy.zeros",
"trefoil.analysis.summary.calculate_weighted_statistics",
"numpy.ma.masked_array",
"numpy.sqrt"
] | [((5469, 5483), 'numpy.sqrt', 'numpy.sqrt', (['r2'], {}), '(r2)\n', (5479, 5483), False, 'import numpy\n'), ((1886, 1930), 'numpy.ma.masked_array', 'numpy.ma.masked_array', (['data'], {'mask': 'areas.mask'}), '(data, mask=areas.mask)\n', (1907, 1930), False, 'import numpy\n'), ((3150, 3178), 'numpy.zeros', 'numpy.zeros', (['values.shape[0]'], {}), '(values.shape[0])\n', (3161, 3178), False, 'import numpy\n'), ((3384, 3430), 'numpy.ma.masked_array', 'numpy.ma.masked_array', (['data'], {'mask': 'weights.mask'}), '(data, mask=weights.mask)\n', (3405, 3430), False, 'import numpy\n'), ((3461, 3517), 'trefoil.analysis.summary.calculate_weighted_statistics', 'calculate_weighted_statistics', (['data', 'weights', 'statistics'], {}), '(data, weights, statistics)\n', (3490, 3517), False, 'from trefoil.analysis.summary import summarize_areas_by_category, calculate_weighted_statistics\n'), ((5099, 5139), 'numpy.ma.masked_array', 'numpy.ma.masked_array', (['slopes'], {'mask': 'mask'}), '(slopes, mask=mask)\n', (5120, 5139), False, 'import numpy\n'), ((5162, 5206), 'numpy.ma.masked_array', 'numpy.ma.masked_array', (['intercepts'], {'mask': 'mask'}), '(intercepts, mask=mask)\n', (5183, 5206), False, 'import numpy\n'), ((5620, 5674), 'numpy.sqrt', 'numpy.sqrt', (['(df / ((1.0 - r + tiny) * (1.0 + r + tiny)))'], {}), '(df / ((1.0 - r + tiny) * (1.0 + r + tiny)))\n', (5630, 5674), False, 'import numpy\n'), ((5786, 5822), 'numpy.ma.masked_array', 'numpy.ma.masked_array', (['r2'], {'mask': 'mask'}), '(r2, mask=mask)\n', (5807, 5822), False, 'import numpy\n'), ((5836, 5871), 'numpy.ma.masked_array', 'numpy.ma.masked_array', (['p'], {'mask': 'mask'}), '(p, mask=mask)\n', (5857, 5871), False, 'import numpy\n'), ((2133, 2159), 'numpy.zeros', 'numpy.zeros', (['num_timesteps'], {}), '(num_timesteps)\n', (2144, 2159), False, 'import numpy\n'), ((4856, 4882), 'numpy.ones_like', 'numpy.ones_like', (['timesteps'], {}), '(timesteps)\n', (4871, 4882), False, 'import numpy\n'), ((5697, 5709), 'numpy.abs', 'numpy.abs', (['t'], {}), '(t)\n', (5706, 5709), False, 'import numpy\n')] |
""" DataManager organizing the data for the benchmarks.
DataManager organizing the download of the data. Each data set should have an
own DataManger. The load function of a DataManger downloads the data from a
given online source and splits the data train, test and optional validation
splits.
For OpenML data sets (defined by task id or similar) please use the
hpolib.util.openml_data_manager.
"""
import abc
import gzip
import logging
import pickle
import tarfile
from io import BytesIO
from pathlib import Path
from typing import Tuple, Dict
from urllib.request import urlretrieve, urlopen
from zipfile import ZipFile
from time import time
import numpy as np
try:
from oslo_concurrency import lockutils
except ImportError:
print("oslo_concurrency not installed, can't download datasets for nasbench201 (not needed for containers)")
import hpolib
class DataManager(object, metaclass=abc.ABCMeta):
""" Base Class for loading and managing the data.
Attributes
----------
logger : logging.Logger
"""
def __init__(self):
self.logger = logging.getLogger("DataManager")
@abc.abstractmethod
def load(self):
""" Loads data from data directory as defined in
config_file.data_directory
"""
raise NotImplementedError()
def create_save_directory(self, save_dir: Path):
""" Helper function. Check if data directory exists. If not, create it.
Parameters
----------
save_dir : Path
Path to the directory. where the data should be stored
"""
if not save_dir.is_dir():
self.logger.debug(f'Create directory {save_dir}')
save_dir.mkdir(parents=True, exist_ok=True)
class HoldoutDataManager(DataManager):
""" Base Class for loading and managing the Holdout data sets.
Attributes
----------
X_train : np.ndarray
y_train : np.ndarray
X_val : np.ndarray
y_val : np.ndarray
X_test : np.ndarray
y_test : np.ndarray
"""
def __init__(self):
super().__init__()
self.X_train = None
self.y_train = None
self.X_val = None
self.y_val = None
self.X_test = None
self.y_test = None
class CrossvalidationDataManager(DataManager):
"""
Base Class for loading and managing the cross-validation data sets.
Attributes
----------
X_train : np.ndarray
y_train : np.ndarray
X_test : np.ndarray
y_test : np.ndarray
"""
def __init__(self):
super().__init__()
self.X_train = None
self.y_train = None
self.X_test = None
self.y_test = None
class MNISTData(HoldoutDataManager):
"""Class implementing the HoldoutDataManger, managing the MNIST data set"""
def __init__(self):
super(MNISTData, self).__init__()
self._url_source = 'http://yann.lecun.com/exdb/mnist/'
self._save_to = hpolib.config_file.data_dir / "MNIST"
self.create_save_directory(self._save_to)
def load(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray,
np.ndarray, np.ndarray, np.ndarray]:
"""
Loads MNIST from data directory as defined in
config_file.data_directory. Downloads data if necessary. Code is copied
and modified from the Lasagne tutorial.
Returns
-------
X_train : np.ndarray
y_train : np.ndarray
X_val : np.ndarray
y_val : np.ndarray
X_test : np.ndarray
y_test : np.ndarray
"""
X_train = self.__load_data(filename='train-images-idx3-ubyte.gz',
images=True)
y_train = self.__load_data(filename='train-labels-idx1-ubyte.gz')
X_test = self.__load_data(filename='t10k-images-idx3-ubyte.gz',
images=True)
y_test = self.__load_data(filename='t10k-labels-idx1-ubyte.gz')
# Split data in training and validation
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
assert X_train.shape[0] == 50000, X_train.shape
assert X_val.shape[0] == 10000, X_val.shape
assert X_test.shape[0] == 10000, X_test.shape
# Reshape data to NxD
X_train = X_train.reshape(X_train.shape[0], 28 * 28)
X_val = X_val.reshape(X_val.shape[0], 28 * 28)
X_test = X_test.reshape(X_test.shape[0], 28 * 28)
return X_train, y_train, X_val, y_val, X_test, y_test
def __load_data(self, filename: str, images: bool = False) -> np.ndarray:
"""
Loads data in Yann LeCun's binary format as available under
'http://yann.lecun.com/exdb/mnist/'.
If necessary downloads data, otherwise loads data from data_directory
Parameters
----------
filename : str
file to download
images : bool
if True converts data to X
Returns
-------
np.ndarray
"""
# 1) If necessary download data
save_fl = self._save_to / filename
if not save_fl.exists():
self.logger.debug(f"Downloading {self._url_source + filename} "
f"to {save_fl}")
urlretrieve(self._url_source + filename, str(save_fl))
else:
self.logger.debug(f"Load data {save_fl}")
# 2) Read in data
if images:
with gzip.open(save_fl, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
# Follow the shape convention: (examples, channels, rows, columns)
data = data.reshape(-1, 1, 28, 28)
# Convert them to float32 in range [0,1].
# (Actually to range [0, 255/256], for compatibility to the version
# provided at: http://deeplearning.net/data/mnist/mnist.pkl.gz.
data = data / np.float32(256)
else:
with gzip.open(save_fl, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
return data
class MNISTDataCrossvalidation(MNISTData, CrossvalidationDataManager):
""" Class loading the MNIST data set. """
def load(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Loads MNIST from data directory as defined in
config_file.data_directory. Downloads data if necessary.
Returns
-------
X_train : np.ndarray
y_train : np.ndarray
X_test : np.ndarray
y_test : np.ndarray
"""
X_train, y_train, X_val, y_val, X_test, y_test = \
super(MNISTDataCrossvalidation, self).load()
X_train = np.concatenate([X_train, X_val], axis=0)
y_train = np.concatenate([y_train, y_val], axis=0)
return X_train, y_train, X_test, y_test
class CIFAR10Data(DataManager):
""" Class loading the Cifar10 data set. """
def __init__(self):
super(CIFAR10Data, self).__init__()
self._url_source = 'https://www.cs.toronto.edu/~kriz/' \
'cifar-10-python.tar.gz'
self._save_to = hpolib.config_file.data_dir / "cifar10"
self.create_save_directory(self._save_to)
def load(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray,
np.ndarray, np.ndarray, np.ndarray]:
"""
Loads CIFAR10 from data directory as defined in
config_file.data_directory. Downloads data if necessary.
Returns
-------
X_train : np.ndarray
y_train : np.ndarray
X_val : np.ndarray
y_val : np.ndarray
X_test : np.ndarray
y_test : np.ndarray
"""
xs = []
ys = []
for j in range(5):
fh = open(self.__load_data(filename=f'data_batch_{j + 1}'), "rb")
d = pickle.load(fh, encoding='latin1')
fh.close()
x = d['data']
y = d['labels']
xs.append(x)
ys.append(y)
fh = open(self.__load_data(filename='test_batch'), "rb")
d = pickle.load(fh, encoding='latin1')
fh.close()
xs.append(d['data'])
ys.append(d['labels'])
x = np.concatenate(xs) / np.float32(255)
y = np.concatenate(ys)
x = np.dstack((x[:, :1024], x[:, 1024:2048], x[:, 2048:]))
x = x.reshape((x.shape[0], 32, 32, 3)).transpose(0, 3, 1, 2)
# Subtract per-pixel mean
pixel_mean = np.mean(x[0:50000], axis=0)
x -= pixel_mean
# Split in training, validation and test
X_train = x[:40000, :, :, :]
y_train = y[:40000]
X_valid = x[40000:50000, :, :, :]
y_valid = y[40000:50000]
X_test = x[50000:, :, :, :]
y_test = y[50000:]
return X_train, y_train, X_valid, y_valid, X_test, y_test
def __load_data(self, filename: str) -> Path:
"""
Loads data in binary format as available under
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'.
Parameters
----------
filename : str
file to download
Returns
-------
Path
"""
save_fl = self._save_to / 'cifar-10-batches-py' / filename
if not save_fl.exists():
self.logger.debug(f'Downloading {self._url_source} to {save_fl}')
urlretrieve(self._url_source,
self._save_to / "cifar-10-python.tar.gz")
tar = tarfile.open(self._save_to / "cifar-10-python.tar.gz")
tar.extractall(self._save_to)
else:
self.logger.debug("Load data %s", save_fl)
return save_fl
class SVHNData(DataManager):
""" Class loading the house numbers data set.
Attributes
----------
n_train_all : int
n_valid : int
n_train : int
n_test : int
"""
def __init__(self):
super(SVHNData, self).__init__()
self._url_source = 'http://ufldl.stanford.edu/housenumbers/'
self._save_to = hpolib.config_file.data_dir / "svhn"
self.n_train_all = 73257
self.n_valid = 6000
self.n_train = self.n_train_all - self.n_valid
self.n_test = 26032
self.create_save_directory(self._save_to)
def load(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray,
np.ndarray, np.ndarray, np.ndarray]:
"""
Loads SVHN from data directory as defined in
config_file.data_directory. Downloads data if necessary.
Returns
-------
X_train : np.ndarray
y_train : np.ndarray
X_val : np.ndarray
y_val : np.ndarray
X_test : np.ndarray
y_test : np.ndarray
"""
X, y, X_test, y_test = self.__load_data("train_32x32.mat",
"test_32x32.mat")
# Change the label encoding from [1, ... 10] to [0, ..., 9]
y = y - 1
y_test = y_test - 1
X_train = X[:self.n_train, :, :, :]
y_train = y[:self.n_train]
X_valid = X[self.n_train:self.n_train_all, :, :, :]
y_valid = y[self.n_train:self.n_train_all]
X_train = np.array(X_train, dtype=np.float32)
X_valid = np.array(X_valid, dtype=np.float32)
X_test = np.array(X_test, dtype=np.float32)
all_X = [X_train, X_valid, X_test]
# Subtract per pixel mean
for X in all_X:
data_shape = X.shape
X = X.reshape(X.shape[0], np.product(X.shape[1:]))
X -= X.mean(axis=1)[:, np.newaxis]
X = X.reshape(data_shape)
return X_train, y_train[:, 0], X_valid, y_valid[:, 0], X_test, y_test[:, 0]
def __load_data(self, filename_train: str,
filename_test: str) -> Tuple[np.ndarray, np.ndarray,
np.ndarray, np.ndarray]:
"""
Loads data in binary format as available under
'http://ufldl.stanford.edu/housenumbers/'.
Parameters
----------
filename_train : str
file to download
filename_test : str
file to download
Returns
-------
Tuple[np.ndarray, np.ndarray, np.ndarray,
np.ndarray, np.ndarray, np.ndarray]
"""
def __load_x_y(file_name):
save_fl = self._save_to / file_name
if not save_fl.exists():
self.logger.debug(f"Downloading {self._url_source + file_name}"
f" to {save_fl}")
urlretrieve(self._url_source + file_name, save_fl)
else:
self.logger.debug(f"Load data {save_fl}")
from scipy.io import loadmat
data = loadmat(save_fl)
x = data['X'].T
y = data['y']
return x, y
X_train, y_train = __load_x_y(filename_train)
X_test, y_test = __load_x_y(filename_test)
return X_train, y_train, X_test, y_test
class NASBench_201Data(DataManager):
""" Download the necessary files for the nasbench201 benchmark. The benchmark has a data file for every pair of
data set (cifar10, cifar10-valid, cifar100, ImageNet16-120)
seed (777,888,999)
metric (train_acc1es, train_times, train_losses, eval_acc1es, eval_times, eval_losses)
Download for each data set the all corresponding data files.
The files should be hosted on automl.org.
For more information about the metric, have a look in the benchmark docstrings.
"""
def __init__(self, dataset: str):
"""
Init the NasbenchData Manager.
Parameters
----------
dataset : str
One of cifar10, cifar10-valid, cifar100, ImageNet16-120
"""
assert dataset in ['cifar10', 'cifar10-valid', 'cifar100', 'ImageNet16-120']
super(NASBench_201Data, self).__init__()
self.files = self.get_files_per_dataset(dataset)
self._save_dir = hpolib.config_file.data_dir / "nasbench_201"
self._url_source = 'https://www.automl.org/wp-content/uploads/2020/08/nasbench_201_data_v1.1.zip'
self.data = {}
self.create_save_directory(self._save_dir)
@staticmethod
def get_seeds_metrics():
from itertools import product
seeds = [777, 888, 999]
metrics = NASBench_201Data.get_metrics()
return product(seeds, metrics)
@staticmethod
def get_metrics():
return ['train_acc1es', 'train_losses', 'train_times',
'eval_acc1es', 'eval_times', 'eval_losses']
@staticmethod
def get_files_per_dataset(dataset):
seeds_metrics = NASBench_201Data.get_seeds_metrics()
files = [f'nb201_{dataset}_{seed}_{metric}.pkl' for seed, metric in seeds_metrics]
return files
@lockutils.synchronized('not_thread_process_safe', external=True,
lock_path=f'{hpolib.config_file.cache_dir}/lock_nasbench_201_data', delay=0.5)
def _download(self):
# Check if data is already downloaded. If a single file is missing, we have to download the complete zip again.
# Use a file lock to ensure that no two processes try to download the same files at the same time.
file_is_missing = not all([(self._save_dir / 'data' / file).exists() for file in self.files])
if not file_is_missing:
self.logger.debug('NasBench201DataManager: Data already downloaded')
else:
self.logger.info(f'NasBench201DataManager: Start downloading data from {self._url_source} '
f'to {self._save_dir}')
with urlopen(self._url_source) as zip_archive:
with ZipFile(BytesIO(zip_archive.read())) as zip_file:
zip_file.extractall(self._save_dir)
def _load(self) -> Dict:
""" Load the data from the file system """
import pickle
data = {}
for (seed, metric_name), file in zip(NASBench_201Data.get_seeds_metrics(), self.files):
with (self._save_dir / 'data' / file).open('rb') as fh:
metric = pickle.load(fh)
data[(seed, metric_name)] = metric
return data
def load(self) -> Dict:
""" Loads data from data directory as defined in config_file.data_directory"""
self.logger.debug('NasBench201DataManager: Starting to load data')
t = time()
self._download()
self.data = self._load()
self.logger.info(f'NasBench201DataManager: Data successfully loaded after {time() - t:.2f}')
return self.data
| [
"numpy.dstack",
"gzip.open",
"scipy.io.loadmat",
"numpy.float32",
"oslo_concurrency.lockutils.synchronized",
"urllib.request.urlopen",
"logging.getLogger",
"time.time",
"urllib.request.urlretrieve",
"numpy.product",
"pickle.load",
"numpy.mean",
"numpy.array",
"itertools.product",
"tarfil... | [((14909, 15057), 'oslo_concurrency.lockutils.synchronized', 'lockutils.synchronized', (['"""not_thread_process_safe"""'], {'external': '(True)', 'lock_path': 'f"""{hpolib.config_file.cache_dir}/lock_nasbench_201_data"""', 'delay': '(0.5)'}), "('not_thread_process_safe', external=True, lock_path=\n f'{hpolib.config_file.cache_dir}/lock_nasbench_201_data', delay=0.5)\n", (14931, 15057), False, 'from oslo_concurrency import lockutils\n'), ((1084, 1116), 'logging.getLogger', 'logging.getLogger', (['"""DataManager"""'], {}), "('DataManager')\n", (1101, 1116), False, 'import logging\n'), ((6741, 6781), 'numpy.concatenate', 'np.concatenate', (['[X_train, X_val]'], {'axis': '(0)'}), '([X_train, X_val], axis=0)\n', (6755, 6781), True, 'import numpy as np\n'), ((6800, 6840), 'numpy.concatenate', 'np.concatenate', (['[y_train, y_val]'], {'axis': '(0)'}), '([y_train, y_val], axis=0)\n', (6814, 6840), True, 'import numpy as np\n'), ((8144, 8178), 'pickle.load', 'pickle.load', (['fh'], {'encoding': '"""latin1"""'}), "(fh, encoding='latin1')\n", (8155, 8178), False, 'import pickle\n'), ((8321, 8339), 'numpy.concatenate', 'np.concatenate', (['ys'], {}), '(ys)\n', (8335, 8339), True, 'import numpy as np\n'), ((8352, 8406), 'numpy.dstack', 'np.dstack', (['(x[:, :1024], x[:, 1024:2048], x[:, 2048:])'], {}), '((x[:, :1024], x[:, 1024:2048], x[:, 2048:]))\n', (8361, 8406), True, 'import numpy as np\n'), ((8532, 8559), 'numpy.mean', 'np.mean', (['x[0:50000]'], {'axis': '(0)'}), '(x[0:50000], axis=0)\n', (8539, 8559), True, 'import numpy as np\n'), ((11256, 11291), 'numpy.array', 'np.array', (['X_train'], {'dtype': 'np.float32'}), '(X_train, dtype=np.float32)\n', (11264, 11291), True, 'import numpy as np\n'), ((11310, 11345), 'numpy.array', 'np.array', (['X_valid'], {'dtype': 'np.float32'}), '(X_valid, dtype=np.float32)\n', (11318, 11345), True, 'import numpy as np\n'), ((11363, 11397), 'numpy.array', 'np.array', (['X_test'], {'dtype': 'np.float32'}), '(X_test, dtype=np.float32)\n', (11371, 11397), True, 'import numpy as np\n'), ((14482, 14505), 'itertools.product', 'product', (['seeds', 'metrics'], {}), '(seeds, metrics)\n', (14489, 14505), False, 'from itertools import product\n'), ((16508, 16514), 'time.time', 'time', ([], {}), '()\n', (16512, 16514), False, 'from time import time\n'), ((7904, 7938), 'pickle.load', 'pickle.load', (['fh'], {'encoding': '"""latin1"""'}), "(fh, encoding='latin1')\n", (7915, 7938), False, 'import pickle\n'), ((8272, 8290), 'numpy.concatenate', 'np.concatenate', (['xs'], {}), '(xs)\n', (8286, 8290), True, 'import numpy as np\n'), ((8293, 8308), 'numpy.float32', 'np.float32', (['(255)'], {}), '(255)\n', (8303, 8308), True, 'import numpy as np\n'), ((9432, 9503), 'urllib.request.urlretrieve', 'urlretrieve', (['self._url_source', "(self._save_to / 'cifar-10-python.tar.gz')"], {}), "(self._url_source, self._save_to / 'cifar-10-python.tar.gz')\n", (9443, 9503), False, 'from urllib.request import urlretrieve, urlopen\n'), ((9546, 9600), 'tarfile.open', 'tarfile.open', (["(self._save_to / 'cifar-10-python.tar.gz')"], {}), "(self._save_to / 'cifar-10-python.tar.gz')\n", (9558, 9600), False, 'import tarfile\n'), ((12836, 12852), 'scipy.io.loadmat', 'loadmat', (['save_fl'], {}), '(save_fl)\n', (12843, 12852), False, 'from scipy.io import loadmat\n'), ((5490, 5514), 'gzip.open', 'gzip.open', (['save_fl', '"""rb"""'], {}), "(save_fl, 'rb')\n", (5499, 5514), False, 'import gzip\n'), ((5952, 5967), 'numpy.float32', 'np.float32', (['(256)'], {}), '(256)\n', (5962, 5967), True, 'import numpy as np\n'), ((5999, 6023), 'gzip.open', 'gzip.open', (['save_fl', '"""rb"""'], {}), "(save_fl, 'rb')\n", (6008, 6023), False, 'import gzip\n'), ((11572, 11595), 'numpy.product', 'np.product', (['X.shape[1:]'], {}), '(X.shape[1:])\n', (11582, 11595), True, 'import numpy as np\n'), ((12648, 12698), 'urllib.request.urlretrieve', 'urlretrieve', (['(self._url_source + file_name)', 'save_fl'], {}), '(self._url_source + file_name, save_fl)\n', (12659, 12698), False, 'from urllib.request import urlretrieve, urlopen\n'), ((15738, 15763), 'urllib.request.urlopen', 'urlopen', (['self._url_source'], {}), '(self._url_source)\n', (15745, 15763), False, 'from urllib.request import urlretrieve, urlopen\n'), ((16217, 16232), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (16228, 16232), False, 'import pickle\n'), ((16657, 16663), 'time.time', 'time', ([], {}), '()\n', (16661, 16663), False, 'from time import time\n')] |
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# Read back in the normalised data
df = pd.read_csv('norm_auto_mpg.csv', sep=',')
# Sort the data into output and input
input_data = [[row['cylinders'], row['displacement'], row['horsepower'], row['weight'], row['acceleration'],
row['model year'], row['origin']] for _, row in df.iterrows()]
output_data = [[row['mpg']] for _, row in df.iterrows()]
TRAINING_SIZE = 0.8
INPUT_SIZE = len(input_data[0])
OUTPUT_SIZE = len(output_data[0])
HIDDEN_NEURONS = 14
np.random.seed(1)
# Sort the data into training and testing inputs and outputs
training_input = input_data[int(len(input_data) * TRAINING_SIZE):]
testing_input = input_data[:int(len(input_data) * TRAINING_SIZE)]
training_output = output_data[int(len(input_data) * TRAINING_SIZE):]
testing_output = output_data[:int(len(input_data) * TRAINING_SIZE)]
# Set up our placeholder's i.e. the inputs and the output
input_x = tf.placeholder(np.float32, [None, INPUT_SIZE], name='input_x')
output_x = tf.placeholder(np.float32, [None, OUTPUT_SIZE], name='output_x')
# Hidden layer stuff
hidden_W = tf.Variable(tf.random_normal([INPUT_SIZE, HIDDEN_NEURONS]))
hidden_B = tf.Variable(tf.random_normal([HIDDEN_NEURONS]))
hidden_output = tf.sigmoid(tf.matmul(input_x, hidden_W) + hidden_B)
# Hidden layer 2
hidden_W2 = tf.Variable(tf.random_normal([HIDDEN_NEURONS, HIDDEN_NEURONS]))
hidden_B2 = tf.Variable(tf.random_normal([HIDDEN_NEURONS]))
hidden_output2 = tf.sigmoid(tf.matmul(hidden_output, hidden_W2) + hidden_B2)
# Output layer stuff
output_W = tf.Variable(tf.random_normal([HIDDEN_NEURONS, OUTPUT_SIZE]))
output = tf.sigmoid(tf.matmul(hidden_output2, output_W))
# Loss function:
cost = tf.reduce_mean(tf.square(output_x - output))
optimiser = tf.train.AdamOptimizer(0.01)
train = optimiser.minimize(cost)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
loss_ = []
res = []
for i in range(10000):
c_values = sess.run([train, cost, hidden_W, output_x], feed_dict={input_x: input_data, output_x: output_data})
# Append the loss to an array so we can see how the loss goes down
loss_.append(c_values[1])
for j, val in enumerate(testing_input):
conf = sess.run(output, feed_dict={input_x: [val]}).tolist()
# Find the difference to see how far off we are
res.append(1/testing_output[j][0] - 1/conf[0][0])
print(np.mean(res))
print(np.max(res) - np.min(res))
# plt.plot([i for i in range(len(loss_))], loss_)
plt.plot([i for i in range(len(res))], res)
plt.show()
| [
"numpy.random.seed",
"matplotlib.pyplot.show",
"pandas.read_csv",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.matmul",
"numpy.mean",
"tensorflow.random_normal",
"numpy.max",
"numpy.min",
"tensorflow.square",
"tensorflow.train.AdamOpt... | [((136, 177), 'pandas.read_csv', 'pd.read_csv', (['"""norm_auto_mpg.csv"""'], {'sep': '""","""'}), "('norm_auto_mpg.csv', sep=',')\n", (147, 177), True, 'import pandas as pd\n'), ((570, 587), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (584, 587), True, 'import numpy as np\n'), ((990, 1052), 'tensorflow.placeholder', 'tf.placeholder', (['np.float32', '[None, INPUT_SIZE]'], {'name': '"""input_x"""'}), "(np.float32, [None, INPUT_SIZE], name='input_x')\n", (1004, 1052), True, 'import tensorflow as tf\n'), ((1064, 1128), 'tensorflow.placeholder', 'tf.placeholder', (['np.float32', '[None, OUTPUT_SIZE]'], {'name': '"""output_x"""'}), "(np.float32, [None, OUTPUT_SIZE], name='output_x')\n", (1078, 1128), True, 'import tensorflow as tf\n'), ((1815, 1843), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.01)'], {}), '(0.01)\n', (1837, 1843), True, 'import tensorflow as tf\n'), ((1885, 1918), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1916, 1918), True, 'import tensorflow as tf\n'), ((1926, 1938), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1936, 1938), True, 'import tensorflow as tf\n'), ((2577, 2587), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2585, 2587), True, 'import matplotlib.pyplot as plt\n'), ((1174, 1220), 'tensorflow.random_normal', 'tf.random_normal', (['[INPUT_SIZE, HIDDEN_NEURONS]'], {}), '([INPUT_SIZE, HIDDEN_NEURONS])\n', (1190, 1220), True, 'import tensorflow as tf\n'), ((1245, 1279), 'tensorflow.random_normal', 'tf.random_normal', (['[HIDDEN_NEURONS]'], {}), '([HIDDEN_NEURONS])\n', (1261, 1279), True, 'import tensorflow as tf\n'), ((1392, 1442), 'tensorflow.random_normal', 'tf.random_normal', (['[HIDDEN_NEURONS, HIDDEN_NEURONS]'], {}), '([HIDDEN_NEURONS, HIDDEN_NEURONS])\n', (1408, 1442), True, 'import tensorflow as tf\n'), ((1468, 1502), 'tensorflow.random_normal', 'tf.random_normal', (['[HIDDEN_NEURONS]'], {}), '([HIDDEN_NEURONS])\n', (1484, 1502), True, 'import tensorflow as tf\n'), ((1627, 1674), 'tensorflow.random_normal', 'tf.random_normal', (['[HIDDEN_NEURONS, OUTPUT_SIZE]'], {}), '([HIDDEN_NEURONS, OUTPUT_SIZE])\n', (1643, 1674), True, 'import tensorflow as tf\n'), ((1696, 1731), 'tensorflow.matmul', 'tf.matmul', (['hidden_output2', 'output_W'], {}), '(hidden_output2, output_W)\n', (1705, 1731), True, 'import tensorflow as tf\n'), ((1773, 1801), 'tensorflow.square', 'tf.square', (['(output_x - output)'], {}), '(output_x - output)\n', (1782, 1801), True, 'import tensorflow as tf\n'), ((2436, 2448), 'numpy.mean', 'np.mean', (['res'], {}), '(res)\n', (2443, 2448), True, 'import numpy as np\n'), ((1309, 1337), 'tensorflow.matmul', 'tf.matmul', (['input_x', 'hidden_W'], {}), '(input_x, hidden_W)\n', (1318, 1337), True, 'import tensorflow as tf\n'), ((1533, 1568), 'tensorflow.matmul', 'tf.matmul', (['hidden_output', 'hidden_W2'], {}), '(hidden_output, hidden_W2)\n', (1542, 1568), True, 'import tensorflow as tf\n'), ((2456, 2467), 'numpy.max', 'np.max', (['res'], {}), '(res)\n', (2462, 2467), True, 'import numpy as np\n'), ((2470, 2481), 'numpy.min', 'np.min', (['res'], {}), '(res)\n', (2476, 2481), True, 'import numpy as np\n')] |
from __future__ import print_function
from __future__ import division
import numpy as np
class OUNoise:
'''docstring for OUNoise'''
def __init__(self, action_dimension, mu=0, theta=0.15, sigma=0.2):
self.action_dimension = action_dimension
self.mu = mu
self.theta = theta
self.sigma = sigma
self.state = np.ones(self.action_dimension) * self.mu
self.reset()
def reset(self):
self.state = np.ones(self.action_dimension) * self.mu
def noise(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
self.state = x + dx
return self.state
# if __name__ == '__main__':
# ou = OUNoise(3)
# noises = []
# for i in range(10000):
# noises.append(ou.noise())
#
# import matplotlib.pyplot as plt
# plt.plot(noises)
# plt.show()
| [
"numpy.ones"
] | [((356, 386), 'numpy.ones', 'np.ones', (['self.action_dimension'], {}), '(self.action_dimension)\n', (363, 386), True, 'import numpy as np\n'), ((461, 491), 'numpy.ones', 'np.ones', (['self.action_dimension'], {}), '(self.action_dimension)\n', (468, 491), True, 'import numpy as np\n')] |
#externel
import torch
import csv, os
import numpy as np
import pickle as pk
import networkx as nx
import scipy.sparse as sp
from torch_geometric.utils import to_undirected
from torch_geometric.datasets import WebKB, WikipediaNetwork
#internel
from utils.hermitian import *
def load_cora(q, path = '../../dataset/cora/', save_pk = False, K = 1):
#only graph structure without features
# create the graph, networkx graph
G = nx.read_edgelist(path + '/cora.edges', nodetype=int, delimiter = ',', data=(('weight',float),), create_using=nx.DiGraph())
# create the label set
label = {}
with open(path + '/cora.node_labels') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
label[int(row[0])] = int(row[1])
# get the adj matrix
A = nx.adjacency_matrix(G, nodelist = sorted(list(label.keys())), weight = 'weight')
L, w, v = hermitian_decomp(A.todense(), q, norm=True)
multi_order_laplacian = cheb_poly(L, K)
if save_pk:
cora = {}
cora['A'] = A
cora['L'] = multi_order_laplacian
cora['eigen_col'] = v
cora['label'] = label
pk.dump(cora, open(path + '/cora'+str(q)+'_'+str(K)+'.pk', 'wb'))
return A, multi_order_laplacian, v, label
def load_edge_index(file = 'cora.edges', path = '../dataset/cora/'):
G = nx.read_edgelist(path + file, nodetype=int, delimiter = ',', data=(('weight',float),), create_using=nx.DiGraph())
edge_index = []
for line in nx.generate_edgelist(G, data=False):
line = line.split(' ')
_from_, _to_ = int(line[0]), int(line[1])
edge_index.append([_from_, _to_])
edge_index = np.array(edge_index, dtype=np.int).T
edge_index = torch.from_numpy(edge_index)
return edge_index
def load_raw_cora(q=0, path="../pygcn/data/cora/", dataset="cora", save_pk = False, K = 1, feature_only = False):
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
print('Loading {} dataset...'.format(dataset))
idx_features_labels = np.genfromtxt("{}{}.content".format(path, dataset),
dtype=np.dtype(str))
if feature_only:
features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
return features
labels = encode_onehot(idx_features_labels[:, -1])
# build graph
idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt("{}{}.cites".format(path, dataset),
dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(labels.shape[0], labels.shape[0]),
dtype=np.float32)
adj = adj.toarray()
L, w, v = hermitian_decomp(adj, q, norm=True)
multi_order_laplacian = cheb_poly(L, K)
if save_pk:
cora = {}
#cora['A'] = adj.astype('float32')
cora['L'] = multi_order_laplacian
cora['eigen_col'] = v
cora['label'] = labels.astype('uint8')
pk.dump(cora, open(path + '/cora_raw'+str(q)+'_'+str(K)+'.pk', 'wb'))
return adj, multi_order_laplacian, v, labels
def load_syn(root, name = None):
data = pk.load(open(root + '.pk', 'rb'))
if os.path.isdir(root) == False:
try:
os.makedirs(root)
except FileExistsError:
print('Folder exists!')
return [data]
def geometric_dataset(q, K, root='../dataset/data/tmp/', subset='Cornell', dataset=WebKB,
load_only = False, save_pk = True, laplacian = True, gcn_appr = False):
if subset == '':
dataset = dataset(root=root)
else:
dataset = dataset(root=root, name=subset)
size = dataset[0].y.size(-1)
adj = torch.zeros(size, size).data.numpy().astype('uint8')
adj[dataset[0].edge_index[0], dataset[0].edge_index[1]] = 1
label = dataset[0].y.data.numpy().astype('int')
X = dataset[0].x.data.numpy().astype('float32')
train_mask = dataset[0].train_mask.data.numpy().astype('bool_')
val_mask = dataset[0].val_mask.data.numpy().astype('bool_')
test_mask = dataset[0].test_mask.data.numpy().astype('bool_')
if load_only:
return X, label, train_mask, val_mask, test_mask
if isinstance(q, list) == False:
L, _, _ = hermitian_decomp(adj, q, norm=True, laplacian=laplacian, max_eigen = 2.0, gcn_appr = gcn_appr)
multi_order_laplacian = cheb_poly(L, K)
else:
multi_order_laplacian = []
for value in q:
L, _, _ = hermitian_decomp(adj, value, norm=True, laplacian=laplacian, max_eigen = 2.0, gcn_appr = gcn_appr)
multi_l = cheb_poly(L, K)
multi_order_laplacian.append(multi_l)
multi_order_laplacian = np.array(multi_order_laplacian).transpose((1,0,2,3))
save_name = root+'/data'+str(q)+'_'+str(K)
if laplacian == False:
save_name += '_P'
if save_pk:
data = {}
data['L'] = multi_order_laplacian
pk.dump(data, open(save_name+'.pk', 'wb'), protocol=pk.HIGHEST_PROTOCOL)
return X, label, train_mask, val_mask, test_mask, multi_order_laplacian
# sparse version of function geometric_dataset()
def geometric_dataset_sparse(q, K, root='../dataset/data/tmp/', subset='Cornell', dataset=WebKB,
load_only = False, save_pk = True, laplacian = True, gcn_appr = False):
if subset == '':
dataset = dataset(root=root)
else:
dataset = dataset(root=root, name=subset)
size = dataset[0].y.size(-1)
#adj = torch.zeros(size, size).data.numpy().astype('uint8')
#adj[dataset[0].edge_index[0], dataset[0].edge_index[1]] = 1
f_node, e_node = dataset[0].edge_index[0], dataset[0].edge_index[1]
label = dataset[0].y.data.numpy().astype('int')
X = dataset[0].x.data.numpy().astype('float32')
train_mask = dataset[0].train_mask.data.numpy().astype('bool_')
val_mask = dataset[0].val_mask.data.numpy().astype('bool_')
test_mask = dataset[0].test_mask.data.numpy().astype('bool_')
if load_only:
return X, label, train_mask, val_mask, test_mask
try:
L = hermitian_decomp_sparse(f_node, e_node, size, q, norm=True, laplacian=laplacian,
max_eigen = 2.0, gcn_appr = gcn_appr, edge_weight = dataset[0].edge_weight)
except AttributeError:
L = hermitian_decomp_sparse(f_node, e_node, size, q, norm=True, laplacian=laplacian,
max_eigen = 2.0, gcn_appr = gcn_appr, edge_weight = None)
multi_order_laplacian = cheb_poly_sparse(L, K)
save_name = root+'/data'+str(q)+'_'+str(K)
if laplacian == False:
save_name += '_P'
if save_pk:
data = {}
data['L'] = multi_order_laplacian
pk.dump(data, open(save_name+'_sparse.pk', 'wb'), protocol=pk.HIGHEST_PROTOCOL)
return X, label, train_mask, val_mask, test_mask, multi_order_laplacian
def to_edge_dataset(q, edge_index, K, data_split, size, root='../dataset/data/tmp/', laplacian=True, norm=True, max_eigen = 2.0, gcn_appr = False):
save_name = root+'/edge_'+str(q)+'_'+str(K)+'_'+str(data_split)+'.pk'
if os.path.isfile(save_name):
multi_order_laplacian = pk.load(open(save_name, 'rb'))
return multi_order_laplacian
adj = torch.zeros(size, size).data.numpy().astype('uint8')
adj[edge_index[0], edge_index[1]] = 1
#L, w, v = hermitian_decomp(adj, q, norm=norm, laplacian=laplacian, max_eigen=max_eigen, gcn_appr = gcn_appr)
#multi_order_laplacian = cheb_poly(L, K)
#if laplacian == False:
# save_name += '_P'
if isinstance(q, list) == False:
L, _, _ = hermitian_decomp(adj, q, norm=True, laplacian=laplacian, max_eigen = 2.0, gcn_appr = gcn_appr)
multi_order_laplacian = cheb_poly(L, K)
else:
multi_order_laplacian = []
for value in q:
L, _, _ = hermitian_decomp(adj, q, norm=True, laplacian=laplacian, max_eigen = 2.0, gcn_appr = gcn_appr)
multi_l = cheb_poly(L, K)
multi_order_laplacian.append(multi_l)
multi_order_laplacian = np.array(multi_order_laplacian).transpose((1,0,2,3))
#pk.dump(multi_order_laplacian, open(save_name, 'wb'), protocol=pk.HIGHEST_PROTOCOL)
return multi_order_laplacian
def to_edge_dataset_sparse(q, edge_index, K, data_split, size, root='../dataset/data/tmp/', laplacian=True, norm=True, max_eigen = 2.0, gcn_appr = False):
save_name = root+'/edge_'+str(q)+'_'+str(K)+'_'+str(data_split)+'.pk'
if os.path.isfile(save_name):
multi_order_laplacian = pk.load(open(save_name, 'rb'))
return multi_order_laplacian
f_node, e_node = edge_index[0], edge_index[1]
L = hermitian_decomp_sparse(f_node, e_node, size, q, norm=True, laplacian=laplacian, max_eigen = 2.0, gcn_appr = gcn_appr)
multi_order_laplacian = cheb_poly_sparse(L, K)
return multi_order_laplacian
def F_in_out(edge_index, size, edge_weight=None):
if edge_weight is not None:
a = sp.coo_matrix((edge_weight, edge_index), shape=(size, size)).tocsc()
else:
a = sp.coo_matrix((np.ones(len(edge_index[0])), edge_index), shape=(size, size)).tocsc()
out_degree = np.array(a.sum(axis=0))[0]
out_degree[out_degree == 0] = 1
in_degree = np.array(a.sum(axis=1))[:, 0]
in_degree[in_degree == 0] = 1
'''
# can be more efficient
a = np.zeros((size, size), dtype=np.uint8)
a[edge_index[0], edge_index[1]] = 1
out_degree = np.sum(a, axis = 1)
out_degree[out_degree == 0] = 1
in_degree = np.sum(a, axis = 0)
in_degree[in_degree == 0] = 1
'''
# sparse implementation
a = sp.csr_matrix(a)
A_in = sp.csr_matrix(np.zeros((size, size)))
A_out = sp.csr_matrix(np.zeros((size, size)))
for k in range(size):
A_in += np.dot(a[k, :].T, a[k, :])/out_degree[k]
A_out += np.dot(a[:,k], a[:,k].T)/in_degree[k]
A_in = A_in.tocoo()
A_out = A_out.tocoo()
edge_in = torch.from_numpy(np.vstack((A_in.row, A_in.col))).long()
edge_out = torch.from_numpy(np.vstack((A_out.row, A_out.col))).long()
in_weight = torch.from_numpy(A_in.data).float()
out_weight = torch.from_numpy(A_out.data).float()
return to_undirected(edge_index), edge_in, in_weight, edge_out, out_weight | [
"csv.reader",
"os.makedirs",
"os.path.isdir",
"numpy.dtype",
"numpy.zeros",
"torch_geometric.utils.to_undirected",
"numpy.ones",
"networkx.generate_edgelist",
"os.path.isfile",
"scipy.sparse.csr_matrix",
"numpy.array",
"scipy.sparse.coo_matrix",
"torch.zeros",
"numpy.dot",
"networkx.DiGr... | [((1508, 1543), 'networkx.generate_edgelist', 'nx.generate_edgelist', (['G'], {'data': '(False)'}), '(G, data=False)\n', (1528, 1543), True, 'import networkx as nx\n'), ((1739, 1767), 'torch.from_numpy', 'torch.from_numpy', (['edge_index'], {}), '(edge_index)\n', (1755, 1767), False, 'import torch\n'), ((2631, 2682), 'numpy.array', 'np.array', (['idx_features_labels[:, 0]'], {'dtype': 'np.int32'}), '(idx_features_labels[:, 0], dtype=np.int32)\n', (2639, 2682), True, 'import numpy as np\n'), ((7614, 7639), 'os.path.isfile', 'os.path.isfile', (['save_name'], {}), '(save_name)\n', (7628, 7639), False, 'import csv, os\n'), ((8985, 9010), 'os.path.isfile', 'os.path.isfile', (['save_name'], {}), '(save_name)\n', (8999, 9010), False, 'import csv, os\n'), ((10128, 10144), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['a'], {}), '(a)\n', (10141, 10144), True, 'import scipy.sparse as sp\n'), ((675, 709), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (685, 709), False, 'import csv, os\n'), ((1685, 1719), 'numpy.array', 'np.array', (['edge_index'], {'dtype': 'np.int'}), '(edge_index, dtype=np.int)\n', (1693, 1719), True, 'import numpy as np\n'), ((2460, 2521), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['idx_features_labels[:, 1:-1]'], {'dtype': 'np.float32'}), '(idx_features_labels[:, 1:-1], dtype=np.float32)\n', (2473, 2521), True, 'import scipy.sparse as sp\n'), ((3713, 3732), 'os.path.isdir', 'os.path.isdir', (['root'], {}), '(root)\n', (3726, 3732), False, 'import csv, os\n'), ((10170, 10192), 'numpy.zeros', 'np.zeros', (['(size, size)'], {}), '((size, size))\n', (10178, 10192), True, 'import numpy as np\n'), ((10220, 10242), 'numpy.zeros', 'np.zeros', (['(size, size)'], {}), '((size, size))\n', (10228, 10242), True, 'import numpy as np\n'), ((10704, 10729), 'torch_geometric.utils.to_undirected', 'to_undirected', (['edge_index'], {}), '(edge_index)\n', (10717, 10729), False, 'from torch_geometric.utils import to_undirected\n'), ((547, 559), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (557, 559), True, 'import networkx as nx\n'), ((1458, 1470), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (1468, 1470), True, 'import networkx as nx\n'), ((2405, 2418), 'numpy.dtype', 'np.dtype', (['str'], {}), '(str)\n', (2413, 2418), True, 'import numpy as np\n'), ((3020, 3043), 'numpy.ones', 'np.ones', (['edges.shape[0]'], {}), '(edges.shape[0])\n', (3027, 3043), True, 'import numpy as np\n'), ((3768, 3785), 'os.makedirs', 'os.makedirs', (['root'], {}), '(root)\n', (3779, 3785), False, 'import csv, os\n'), ((10286, 10312), 'numpy.dot', 'np.dot', (['a[k, :].T', 'a[k, :]'], {}), '(a[k, :].T, a[k, :])\n', (10292, 10312), True, 'import numpy as np\n'), ((10344, 10370), 'numpy.dot', 'np.dot', (['a[:, k]', 'a[:, k].T'], {}), '(a[:, k], a[:, k].T)\n', (10350, 10370), True, 'import numpy as np\n'), ((10603, 10630), 'torch.from_numpy', 'torch.from_numpy', (['A_in.data'], {}), '(A_in.data)\n', (10619, 10630), False, 'import torch\n'), ((10656, 10684), 'torch.from_numpy', 'torch.from_numpy', (['A_out.data'], {}), '(A_out.data)\n', (10672, 10684), False, 'import torch\n'), ((5231, 5262), 'numpy.array', 'np.array', (['multi_order_laplacian'], {}), '(multi_order_laplacian)\n', (5239, 5262), True, 'import numpy as np\n'), ((8572, 8603), 'numpy.array', 'np.array', (['multi_order_laplacian'], {}), '(multi_order_laplacian)\n', (8580, 8603), True, 'import numpy as np\n'), ((9470, 9530), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['(edge_weight, edge_index)'], {'shape': '(size, size)'}), '((edge_weight, edge_index), shape=(size, size))\n', (9483, 9530), True, 'import scipy.sparse as sp\n'), ((10466, 10497), 'numpy.vstack', 'np.vstack', (['(A_in.row, A_in.col)'], {}), '((A_in.row, A_in.col))\n', (10475, 10497), True, 'import numpy as np\n'), ((10539, 10572), 'numpy.vstack', 'np.vstack', (['(A_out.row, A_out.col)'], {}), '((A_out.row, A_out.col))\n', (10548, 10572), True, 'import numpy as np\n'), ((4222, 4245), 'torch.zeros', 'torch.zeros', (['size', 'size'], {}), '(size, size)\n', (4233, 4245), False, 'import torch\n'), ((7752, 7775), 'torch.zeros', 'torch.zeros', (['size', 'size'], {}), '(size, size)\n', (7763, 7775), False, 'import torch\n')] |
from os import path
import numpy as np
import torch as ch
import pytorch_lightning as pl
from torch.utils.data import random_split, DataLoader, TensorDataset, Dataset
import scipy.stats as stats
DEFAULT_PATH = '../datasets/sequentially_encoded_spatial_wang_science_2018.npz'
DEFAULT_PATH = path.join(path.dirname(path.realpath(__file__)), DEFAULT_PATH)
TEST_CELLS = 3000
VAL_CELLS = 3000
class SpatialWang2018DataModule(pl.LightningDataModule):
def __init__(self, data_file: str = DEFAULT_PATH, random_seed=0,
batch_size=1024, normalize_coords=True):
super().__init__()
self.data_file = data_file
self.random_seed = random_seed
self.normalize_coords = normalize_coords
self.batch_size = batch_size
self.dims = 28
def prepare_data(self):
pass
def setup(self, stage=None):
content = np.load(self.data_file)['arr_0']
# Extract the labels as the last 3 columns
data = ch.from_numpy(content[:, :-3]).float()
coordinates = ch.from_numpy(content[:, -3:]).float()
data /= ((data ** 2).sum(1) ** 0.5)[:, None] + 1e-5
if self.normalize_coords:
mean = coordinates.mean(0)
maxs = coordinates.max()
coordinates -= mean[None, :]
coordinates /= coordinates.abs().max() * 2
else:
coordinates = coordinates / coordinates.abs().max()
full_dataset = TensorDataset(data, coordinates)
num_samples = len(full_dataset)
# We want to always left out the same data to make sure it never
# leaks into our models/experiments
test_generator = ch.Generator().manual_seed(42)
# For validation we want to be able to sample different sets to do
# cross validation for example
val_generator = ch.Generator().manual_seed(self.random_seed)
rest, test_dataset = random_split(full_dataset,
[num_samples - TEST_CELLS, TEST_CELLS],
generator=test_generator)
train_dataset, val_dataset = random_split(rest,
[len(rest) - VAL_CELLS, VAL_CELLS],
generator=val_generator)
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.test_dataset = test_dataset
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size,
pin_memory=True, shuffle=True, num_workers=0)
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.batch_size,
pin_memory=True, shuffle=False, num_workers=0)
def test_dataloader(self):
return DataLoader(self.test_dataset, batch_size=self.batch_size,
pin_memory=True, shuffle=False, num_workers=0)
def compute_baseline(self, mode='chance', samples=100, loss=ch.nn.functional.mse_loss):
l = DataLoader(self.test_dataset, batch_size=len(self.test_dataset))
test_coords = next(iter(l))[1]
l = DataLoader(self.train_dataset, batch_size=len(self.train_dataset))
train_coords = next(iter(l))[1]
results = []
for _ in range(samples):
if mode == 'chance':
random_indices = ch.randint(0, train_coords.shape[0],
(test_coords.shape[0],))
prediction = ch.index_select(train_coords, 0, random_indices)
elif mode == 'center':
prediction = test_coords*0 + train_coords.mean(0)[None, :]
error = ((prediction - test_coords)**2).mean(0).numpy()
results.append(error)
per_dim_errors = np.mean(results, 0)
return list(per_dim_errors), np.mean(per_dim_errors)
class PairwiseDataset(Dataset):
def __init__(self, original_dataset, norm=2, length=1e5, scale=None):
super().__init__()
self.original_dataset = original_dataset
self.length = int(length)
self.norm = norm
self.scale = scale
if scale is not None:
self.all_distances = np.linspace(0, stats.expon.ppf(0.999, loc=0, scale=scale), 100000)
self.mapped = stats.norm.ppf(stats.expon.cdf(self.all_distances, 0, self.scale) + 1e-8)
def __len__(self):
return self.length
def __getitem__(self, ix):
count = len(self.original_dataset)
try:
seed = ch.utils.data.get_worker_info().seed
np.random.seed(seed % int(2**32 - 1))
except:
pass
ix1 = np.random.randint(0, count)
ix2 = np.random.randint(0, count)
features_a, coords_a = self.original_dataset[ix1]
features_b, coords_b = self.original_dataset[ix2]
distance = (((coords_a - coords_b) ** 2).sum())
if self.scale is not None:
pass
distance = np.interp(distance, self.all_distances, self.mapped).astype('float32')
final_features = ch.stack([features_a, features_b])
return final_features, distance
def compute_params(*args, **kwargs):
ds = PairwiseDataset(*args, **kwargs)
sample = next(iter(DataLoader(ds, batch_size=30000,
num_workers=0, shuffle=True)))[1].numpy()
return stats.expon.fit(sample, floc=0)
class PairWiseSpatialWang2018DataModule(SpatialWang2018DataModule):
def train_dataloader(self):
loc, scale = compute_params(self.train_dataset)
return DataLoader(PairwiseDataset(self.train_dataset, length=1e7, scale=scale),
batch_size=self.batch_size,
pin_memory=True, shuffle=True, num_workers=40)
def val_dataloader(self):
loc, scale = compute_params(self.train_dataset)
return DataLoader(PairwiseDataset(self.val_dataset, length=1e5, scale=scale),
batch_size=self.batch_size,
pin_memory=True, shuffle=False, num_workers=40)
def test_dataloader(self):
loc, scale = compute_params(self.train_dataset)
return DataLoader(PairwiseDataset(self.test_dataset, length=1e6, scale=scale),
batch_size=self.batch_size,
pin_memory=True, shuffle=False, num_workers=40)
if __name__ == '__main__':
y = SpatialWang2018DataModule()
y.setup()
print("Chance baseline:", y.compute_baseline(mode='chance'))
print("Center baseline:", y.compute_baseline(mode='center'))
y2 = PairWiseSpatialWang2018DataModule()
y2.setup()
| [
"scipy.stats.expon.ppf",
"numpy.load",
"torch.randint",
"torch.utils.data.get_worker_info",
"torch.stack",
"torch.utils.data.DataLoader",
"scipy.stats.expon.fit",
"os.path.realpath",
"torch.index_select",
"numpy.mean",
"numpy.random.randint",
"torch.utils.data.TensorDataset",
"scipy.stats.ex... | [((5438, 5469), 'scipy.stats.expon.fit', 'stats.expon.fit', (['sample'], {'floc': '(0)'}), '(sample, floc=0)\n', (5453, 5469), True, 'import scipy.stats as stats\n'), ((316, 339), 'os.path.realpath', 'path.realpath', (['__file__'], {}), '(__file__)\n', (329, 339), False, 'from os import path\n'), ((1453, 1485), 'torch.utils.data.TensorDataset', 'TensorDataset', (['data', 'coordinates'], {}), '(data, coordinates)\n', (1466, 1485), False, 'from torch.utils.data import random_split, DataLoader, TensorDataset, Dataset\n'), ((1914, 2010), 'torch.utils.data.random_split', 'random_split', (['full_dataset', '[num_samples - TEST_CELLS, TEST_CELLS]'], {'generator': 'test_generator'}), '(full_dataset, [num_samples - TEST_CELLS, TEST_CELLS],\n generator=test_generator)\n', (1926, 2010), False, 'from torch.utils.data import random_split, DataLoader, TensorDataset, Dataset\n'), ((2483, 2591), 'torch.utils.data.DataLoader', 'DataLoader', (['self.train_dataset'], {'batch_size': 'self.batch_size', 'pin_memory': '(True)', 'shuffle': '(True)', 'num_workers': '(0)'}), '(self.train_dataset, batch_size=self.batch_size, pin_memory=True,\n shuffle=True, num_workers=0)\n', (2493, 2591), False, 'from torch.utils.data import random_split, DataLoader, TensorDataset, Dataset\n'), ((2661, 2768), 'torch.utils.data.DataLoader', 'DataLoader', (['self.val_dataset'], {'batch_size': 'self.batch_size', 'pin_memory': '(True)', 'shuffle': '(False)', 'num_workers': '(0)'}), '(self.val_dataset, batch_size=self.batch_size, pin_memory=True,\n shuffle=False, num_workers=0)\n', (2671, 2768), False, 'from torch.utils.data import random_split, DataLoader, TensorDataset, Dataset\n'), ((2839, 2947), 'torch.utils.data.DataLoader', 'DataLoader', (['self.test_dataset'], {'batch_size': 'self.batch_size', 'pin_memory': '(True)', 'shuffle': '(False)', 'num_workers': '(0)'}), '(self.test_dataset, batch_size=self.batch_size, pin_memory=True,\n shuffle=False, num_workers=0)\n', (2849, 2947), False, 'from torch.utils.data import random_split, DataLoader, TensorDataset, Dataset\n'), ((3843, 3862), 'numpy.mean', 'np.mean', (['results', '(0)'], {}), '(results, 0)\n', (3850, 3862), True, 'import numpy as np\n'), ((4720, 4747), 'numpy.random.randint', 'np.random.randint', (['(0)', 'count'], {}), '(0, count)\n', (4737, 4747), True, 'import numpy as np\n'), ((4762, 4789), 'numpy.random.randint', 'np.random.randint', (['(0)', 'count'], {}), '(0, count)\n', (4779, 4789), True, 'import numpy as np\n'), ((5138, 5172), 'torch.stack', 'ch.stack', (['[features_a, features_b]'], {}), '([features_a, features_b])\n', (5146, 5172), True, 'import torch as ch\n'), ((882, 905), 'numpy.load', 'np.load', (['self.data_file'], {}), '(self.data_file)\n', (889, 905), True, 'import numpy as np\n'), ((3900, 3923), 'numpy.mean', 'np.mean', (['per_dim_errors'], {}), '(per_dim_errors)\n', (3907, 3923), True, 'import numpy as np\n'), ((982, 1012), 'torch.from_numpy', 'ch.from_numpy', (['content[:, :-3]'], {}), '(content[:, :-3])\n', (995, 1012), True, 'import torch as ch\n'), ((1043, 1073), 'torch.from_numpy', 'ch.from_numpy', (['content[:, -3:]'], {}), '(content[:, -3:])\n', (1056, 1073), True, 'import torch as ch\n'), ((1670, 1684), 'torch.Generator', 'ch.Generator', ([], {}), '()\n', (1682, 1684), True, 'import torch as ch\n'), ((1839, 1853), 'torch.Generator', 'ch.Generator', ([], {}), '()\n', (1851, 1853), True, 'import torch as ch\n'), ((3419, 3480), 'torch.randint', 'ch.randint', (['(0)', 'train_coords.shape[0]', '(test_coords.shape[0],)'], {}), '(0, train_coords.shape[0], (test_coords.shape[0],))\n', (3429, 3480), True, 'import torch as ch\n'), ((3554, 3602), 'torch.index_select', 'ch.index_select', (['train_coords', '(0)', 'random_indices'], {}), '(train_coords, 0, random_indices)\n', (3569, 3602), True, 'import torch as ch\n'), ((4274, 4316), 'scipy.stats.expon.ppf', 'stats.expon.ppf', (['(0.999)'], {'loc': '(0)', 'scale': 'scale'}), '(0.999, loc=0, scale=scale)\n', (4289, 4316), True, 'import scipy.stats as stats\n'), ((4585, 4616), 'torch.utils.data.get_worker_info', 'ch.utils.data.get_worker_info', ([], {}), '()\n', (4614, 4616), True, 'import torch as ch\n'), ((4367, 4417), 'scipy.stats.expon.cdf', 'stats.expon.cdf', (['self.all_distances', '(0)', 'self.scale'], {}), '(self.all_distances, 0, self.scale)\n', (4382, 4417), True, 'import scipy.stats as stats\n'), ((5041, 5093), 'numpy.interp', 'np.interp', (['distance', 'self.all_distances', 'self.mapped'], {}), '(distance, self.all_distances, self.mapped)\n', (5050, 5093), True, 'import numpy as np\n'), ((5317, 5378), 'torch.utils.data.DataLoader', 'DataLoader', (['ds'], {'batch_size': '(30000)', 'num_workers': '(0)', 'shuffle': '(True)'}), '(ds, batch_size=30000, num_workers=0, shuffle=True)\n', (5327, 5378), False, 'from torch.utils.data import random_split, DataLoader, TensorDataset, Dataset\n')] |
import itertools
import os
import pickle
import pandas as pd
import numpy as np
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from clusterevaluator import SmellEvaluator
from clusterconfigurator import ClusterConfigurator
from data import Data
import cliffsDelta
#Rule-based Detector
from rulebased.detector import toomanyattributes
from rulebased.detector import duplicateblocks
from rulebased.detector import insufficientmodularization
from imblearn.over_sampling import RandomOverSampler
from sklearn.metrics import precision_score
from sklearn.metrics import matthews_corrcoef
from scipy.stats import mannwhitneyu
root_folder = os.path.dirname(os.path.dirname( __file__ ))
results_folder = os.path.join(root_folder, 'results', 'case_study')
data_folder = os.path.join(root_folder, 'dataminer', 'tmp')
temp_folder = os.path.join(root_folder, 'temp_data', 'case_study')
if not os.path.exists(results_folder):
os.makedirs(results_folder)
if not os.path.exists(data_folder):
os.makedirs(data_folder)
if not os.path.exists(temp_folder):
os.makedirs(temp_folder)
#--------------Table
db = SmellEvaluator('db')
tma = SmellEvaluator('tma')
im = SmellEvaluator('im')
# #--------------Internal and External measurement figure
def resetIndex(df):
df['newIndex'] = [i for i in range(1, (df.shape[0] + 1))]
df = df.set_index(keys='newIndex', drop=True)
return df
dfsInternal = {
'db' : resetIndex(db.evalDf).head(5),
'tma' : resetIndex(tma.evalDf).head(5),
'im' : resetIndex(im.evalDf).head(5)
}
colors = {
'db' : '#0057e7',
'tma' : '#d62d20',
'im' : '#ffa700'
}
names = {
'db' : 'Duplicate Block',
'tma' : 'Too many Attributes',
'im' : 'Insufficient Modularization'
}
def createTrace(smell, pm, legend):
df = dfsInternal[smell]
trace = dict(
type = 'scatter',
x = df.index,
y = df[pm],
mode = 'lines',
line = dict(color = colors[smell], width=6),
name = names[smell],
showlegend = legend
)
return trace
fig = make_subplots(rows=6, cols=1, shared_xaxes=True, subplot_titles=['Silhouette Score (Higher is better)', 'Calinski-Harabasz Index (Higher is better)', 'Davies-Bouldin Index (Lower is better)', 'Precision (Higher is better)', 'Matthews Correlation Coefficient (Higher is better)', 'Adjusted Rand Index (Higher is better)'], vertical_spacing = 0.05)
legend = True
for ix, pm in enumerate(['sc', 'ch', 'db', 'precision', 'mcc', 'ari']):
ix += 1
fig.append_trace(createTrace('db', pm, legend), ix, 1)
fig.append_trace(createTrace('tma', pm, legend), ix, 1)
fig.append_trace(createTrace('im', pm, legend), ix, 1)
legend = False
fig.update_layout(
height=2900,
width=2800,
title_text="Top 5 Configurations",
paper_bgcolor='rgba(255, 255, 255, 1)',
plot_bgcolor='rgba(255, 255, 255, 1)',
legend=dict(x=-.1, y=1.2, orientation='h'),
font = dict(size=47)
)
#fix subplot title size
for i in fig['layout']['annotations']:
i['font'] = dict(size=47)
fig.show()
#fig.write_image(os.path.join(results_folder, 'configurationperformance.png'))
#-----------Stability
dbTopConfigs = [
(False, False, True, False, None, ('gm', 'spherical')),
(True, False, True, False, None, ('gm', 'spherical')),
(True, False, True, False, 'braycurtis', ('kmedoids', None)),
(False, False, True, False, None, ('gm', 'tied')),
(True, True, True, False, None, ('gm', 'full')),
]
tmaTopConfigs = [
(False, False, True, False, None, ('gm', 'full')),
(False, False, True, False, None, ('gm', 'tied')),
(False, False, True, False, None, ('gm', 'spherical')),
(True, False, True, False, None, ('gm', 'spherical')),
(False, False, True, False, 'l1', ('agglo', 'complete'))
]
imTopConfigs = [
(False, False, True, False, None, ('gm', 'full')),
(False, False, True, False, None, ('gm', 'tied')),
(True, False, True, False, None, ('gm', 'full')),
(True, False, True, False, None, ('gm', 'spherical')),
(False, False, True, False, None, ('gm', 'spherical'))
]
for ix, dbTopConfig in enumerate(dbTopConfigs):
tempDf = db.df.copy(deep=True)
tempDf = tempDf.drop(['index'], axis=1)
dbTopConfigModel = ClusterConfigurator(tempDf, dbTopConfig)
stability = dbTopConfigModel.getStability()
print(f'DB config {ix}: {stability[0]}')
for ix, tmaTopConfig in enumerate(tmaTopConfigs):
tempDf = tma.df.copy(deep=True)
tempDf = tempDf.drop(['index'], axis=1)
tmaTopConfigModel = ClusterConfigurator(tempDf, tmaTopConfig)
stability = tmaTopConfigModel.getStability()
print(f'TmA config {ix}: {stability[0]}')
for ix, imTopConfig in enumerate(imTopConfigs):
tempDf = im.df.copy(deep=True)
tempDf = tempDf.drop(['index'], axis=1)
imTopConfigModel = ClusterConfigurator(tempDf, imTopConfig)
stability = imTopConfigModel.getStability()
print(f'IM config {ix}: {stability[0]}')
#----------------------Comparision
#Here, we calculate the MCC and Precision for 100 subsamples
#We create boxplots and a statistical test to compare the two detectors
subSample = 70
iters = 100
def comparison():
blueprintLabels = getGroundTruth()
scoreDict = {
'rule-db-mcc' : [],
'rule-db-precision' : [],
'rule-tma-mcc' : [],
'rule-tma-precision' : [],
'rule-im-mcc' : [],
'rule-im-precision' : [],
'cluster-db-mcc' : [],
'cluster-db-precision' : [],
'cluster-tma-mcc' : [],
'cluster-tma-precision' : [],
'cluster-im-mcc' : [],
'cluster-im-precision' : [],
}
try:
scoreDict = pickle.load(open(os.path.join(temp_folder, f'MCCandPrecisionFor{iters}iters{subSample}percent'), 'rb'))
except (OSError, IOError):
for i in range(iters):
print('Iteration: ', i)
sampleLabels = blueprintLabels.sample(frac=subSample/100)
for smell in ['db', 'tma', 'im']:
ruleLabels = getRuleLabels(sampleLabels.index, smell)
clusterLabels = getClusterLabels(sampleLabels.index, smell)
#Ensure same cluster shape when outliers are dropped
sampleLabels = sampleLabels[sampleLabels.index.isin(clusterLabels.index)]
sampleLabels = sampleLabels.sort_index()
ruleLabels = ruleLabels[ruleLabels.index.isin(clusterLabels.index)]
scoreDict[f'rule-{smell}-mcc'].append(calculateScore('mcc', sampleLabels[smell], ruleLabels))
scoreDict[f'rule-{smell}-precision'].append(calculateScore('precision', sampleLabels[smell], ruleLabels))
scoreDict[f'cluster-{smell}-mcc'].append(calculateScore('mcc', sampleLabels[smell], clusterLabels))
scoreDict[f'cluster-{smell}-precision'].append(calculateScore('precision', sampleLabels[smell], clusterLabels))
pickle.dump(scoreDict, open(os.path.join(temp_folder, f'MCCandPrecisionFor{iters}iters{subSample}percent'), 'wb'))
statisticalTest(scoreDict)
boxplotCreation(scoreDict)
def getGroundTruth():
smells = pd.read_excel('results/labeling/to_label.xlsx', sheet_name='Sheet1', usecols='B,E,D,G', nrows=685, index_col=0)
smells = smells.drop(r'SeaCloudsEU\tosca-parser\Industry\Noart.tomcat-DC-compute-mysql-compute.yaml')
return smells.astype(bool)
def calculteRule(path, smell):
if smell is 'db':
label = duplicateblocks.evaluate_script_with_rule(path)
elif smell is 'tma':
label = toomanyattributes.evaluate_script_with_rule(path)
elif smell is 'im':
label = insufficientmodularization.evaluate_script_with_rule(path)
return label
def getRuleLabels(ix, smell):
try:
ruleDf = pickle.load(open(os.path.join(temp_folder, 'rulebasedlabels'), 'rb'))
except (OSError, IOError):
results = {}
for blueprint in getGroundTruth().index:
path = os.path.join(data_folder, blueprint)
results[blueprint] = {
'tma' : toomanyattributes.evaluate_script_with_rule(path),
'db' : duplicateblocks.evaluate_script_with_rule(path),
'im' : insufficientmodularization.evaluate_script_with_rule(path),
}
ruleDf = pd.DataFrame(results).T
ruleDf = ruleDf.astype(bool)
pickle.dump(ruleDf, open(os.path.join(temp_folder, 'rulebasedlabels'), 'wb'))
ruleDf = ruleDf[ruleDf.index.isin(ix)].sort_index()
return ruleDf[smell]
def constructDf(ix, smell):
'''Copied from the clusterEvaluator class to enable data balancing.
Afterwards, we filterout the identified indexes of the subset again'''
if smell == 'db':
clusterDf = db.df
elif smell == 'tma':
clusterDf = tma.df
elif smell == 'im':
clusterDf = im.df
clusterDf = clusterDf.set_index('index')
clusterDf = clusterDf[clusterDf.index.isin(ix)]
clusterDf = clusterDf.loc[~clusterDf.index.duplicated(keep='first')]
return clusterDf.sort_index()
def getClusterLabels(ix, smell):
df = constructDf(ix, smell)
bestConfigurations = {
'db' : (False, False, True, False, None, ('gm', 'spherical')),
'tma' : (True, False, True, False, None, ('gm', 'spherical')),
'im' : (True, False, True, False, None, ('gm', 'spherical'))
}
configInstance = ClusterConfigurator(df, bestConfigurations[smell])
return configInstance.labels['cluster']
def calculateScore(pm, trueLabels, predLabels):
trueLabels = trueLabels.map({True: 1, False: 0})
predLabels = predLabels.map({True: 1, False: 0})
if pm is 'mcc':
score = matthews_corrcoef(trueLabels, predLabels)
elif pm is 'precision':
score = precision_score(trueLabels, predLabels)
return score
def statisticalTest(scoreDict):
pairs = {
'db-mcc' : ('rule-db-mcc', 'cluster-db-mcc'),
'db-precision' : ('rule-db-precision', 'cluster-db-precision'),
'tma-mcc' : ('rule-tma-mcc', 'cluster-tma-mcc'),
'tma-precision' : ('rule-tma-precision', 'cluster-tma-precision'),
'im-mcc' : ('rule-im-mcc', 'cluster-im-mcc'),
'im-precision' : ('rule-im-precision', 'cluster-im-precision'),
}
uDict = {}
effectDict = {}
for name, pair in pairs.items():
stat, p = mannwhitneyu(np.array(scoreDict[pair[0]]), np.array(scoreDict[pair[1]]))
uDict[name] = (stat, p)
effectsize, res = cliffsDelta.cliffsDelta(scoreDict[pair[0]], scoreDict[pair[1]])
effectDict[name] = (effectsize, res)
uDf = pd.DataFrame(data=uDict).T.to_excel(os.path.join(results_folder, f'utestresults{iters}iters{subSample}percent.xlsx'))
effectDf = pd.DataFrame(data=effectDict).T.to_excel(os.path.join(results_folder, f'effectresults{iters}iters{subSample}percent.xlsx'))
def boxplotCreation(scoreDict):
def createTrace(combi, scoreDict):
scoreList = scoreDict[combi]
#Hier gaat nog iets niet goed
detector, smell, pm = combi.split('-')
if detector == 'rule':
detector = 'Rule-Based'
else:
detector = 'Cluster'
colors = {
'db' : '#0057e7',
'tma' : '#d62d20',
'im' : '#ffa700'
}
box = go.Box(
y=scoreList,
name=detector,
boxpoints='all',
marker_color=colors[smell],
line_color=colors[smell]
)
return box
#MCC
fig = make_subplots(rows=1, cols=6, shared_yaxes=True, subplot_titles=['Duplicate Block', '', 'Too many Attributes', '', 'Insufficient Modularization', ''])
for ix, combi in enumerate(['rule-db-mcc', 'cluster-db-mcc', 'rule-tma-mcc', 'cluster-tma-mcc', 'rule-im-mcc', 'cluster-im-mcc']):
ix += 1
fig.append_trace(createTrace(combi, scoreDict), 1, ix)
fig.update_layout(
height=1300,
width=2800,
paper_bgcolor='rgba(255, 255, 255, 1)',
plot_bgcolor='rgba(255, 255, 255, 1)',
showlegend=False,
font = dict(size=47)
)
#fix subplot title size
for i in fig['layout']['annotations']:
i['font'] = dict(size=47)
fig.show()
#fig.write_image(os.path.join(results_folder, f'comparison50sampleMCC{iters}iters{subSample}percent.png'))
#Precision
fig = make_subplots(rows=1, cols=6, shared_yaxes=True, subplot_titles=['Duplicate Block', '', 'Too many Attributes', '', 'Insufficient Modularization', ''])
for ix, combi in enumerate(['rule-db-precision', 'cluster-db-precision', 'rule-tma-precision', 'cluster-tma-precision', 'rule-im-precision', 'cluster-im-precision']):
ix += 1
fig.append_trace(createTrace(combi, scoreDict), 1, ix)
fig.update_layout(
height=1300,
width=2800,
paper_bgcolor='rgba(255, 255, 255, 1)',
plot_bgcolor='rgba(255, 255, 255, 1)',
showlegend=False,
font = dict(size=47),
yaxis=dict(range=[0,0.7])
)
#fix subplot title size
for i in fig['layout']['annotations']:
i['font'] = dict(size=47)
fig.show()
#fig.write_image(os.path.join(results_folder, f'comparison50sampleprecision{iters}iters{subSample}percent.png'))
comparison() | [
"pandas.DataFrame",
"rulebased.detector.duplicateblocks.evaluate_script_with_rule",
"cliffsDelta.cliffsDelta",
"os.makedirs",
"rulebased.detector.toomanyattributes.evaluate_script_with_rule",
"os.path.dirname",
"clusterconfigurator.ClusterConfigurator",
"os.path.exists",
"plotly.graph_objects.Box",
... | [((731, 781), 'os.path.join', 'os.path.join', (['root_folder', '"""results"""', '"""case_study"""'], {}), "(root_folder, 'results', 'case_study')\n", (743, 781), False, 'import os\n'), ((796, 841), 'os.path.join', 'os.path.join', (['root_folder', '"""dataminer"""', '"""tmp"""'], {}), "(root_folder, 'dataminer', 'tmp')\n", (808, 841), False, 'import os\n'), ((856, 908), 'os.path.join', 'os.path.join', (['root_folder', '"""temp_data"""', '"""case_study"""'], {}), "(root_folder, 'temp_data', 'case_study')\n", (868, 908), False, 'import os\n'), ((1141, 1161), 'clusterevaluator.SmellEvaluator', 'SmellEvaluator', (['"""db"""'], {}), "('db')\n", (1155, 1161), False, 'from clusterevaluator import SmellEvaluator\n'), ((1168, 1189), 'clusterevaluator.SmellEvaluator', 'SmellEvaluator', (['"""tma"""'], {}), "('tma')\n", (1182, 1189), False, 'from clusterevaluator import SmellEvaluator\n'), ((1195, 1215), 'clusterevaluator.SmellEvaluator', 'SmellEvaluator', (['"""im"""'], {}), "('im')\n", (1209, 1215), False, 'from clusterevaluator import SmellEvaluator\n'), ((2083, 2453), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(6)', 'cols': '(1)', 'shared_xaxes': '(True)', 'subplot_titles': "['Silhouette Score (Higher is better)',\n 'Calinski-Harabasz Index (Higher is better)',\n 'Davies-Bouldin Index (Lower is better)',\n 'Precision (Higher is better)',\n 'Matthews Correlation Coefficient (Higher is better)',\n 'Adjusted Rand Index (Higher is better)']", 'vertical_spacing': '(0.05)'}), "(rows=6, cols=1, shared_xaxes=True, subplot_titles=[\n 'Silhouette Score (Higher is better)',\n 'Calinski-Harabasz Index (Higher is better)',\n 'Davies-Bouldin Index (Lower is better)',\n 'Precision (Higher is better)',\n 'Matthews Correlation Coefficient (Higher is better)',\n 'Adjusted Rand Index (Higher is better)'], vertical_spacing=0.05)\n", (2096, 2453), False, 'from plotly.subplots import make_subplots\n'), ((685, 710), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (700, 710), False, 'import os\n'), ((917, 947), 'os.path.exists', 'os.path.exists', (['results_folder'], {}), '(results_folder)\n', (931, 947), False, 'import os\n'), ((953, 980), 'os.makedirs', 'os.makedirs', (['results_folder'], {}), '(results_folder)\n', (964, 980), False, 'import os\n'), ((989, 1016), 'os.path.exists', 'os.path.exists', (['data_folder'], {}), '(data_folder)\n', (1003, 1016), False, 'import os\n'), ((1022, 1046), 'os.makedirs', 'os.makedirs', (['data_folder'], {}), '(data_folder)\n', (1033, 1046), False, 'import os\n'), ((1055, 1082), 'os.path.exists', 'os.path.exists', (['temp_folder'], {}), '(temp_folder)\n', (1069, 1082), False, 'import os\n'), ((1088, 1112), 'os.makedirs', 'os.makedirs', (['temp_folder'], {}), '(temp_folder)\n', (1099, 1112), False, 'import os\n'), ((4274, 4314), 'clusterconfigurator.ClusterConfigurator', 'ClusterConfigurator', (['tempDf', 'dbTopConfig'], {}), '(tempDf, dbTopConfig)\n', (4293, 4314), False, 'from clusterconfigurator import ClusterConfigurator\n'), ((4563, 4604), 'clusterconfigurator.ClusterConfigurator', 'ClusterConfigurator', (['tempDf', 'tmaTopConfig'], {}), '(tempDf, tmaTopConfig)\n', (4582, 4604), False, 'from clusterconfigurator import ClusterConfigurator\n'), ((4851, 4891), 'clusterconfigurator.ClusterConfigurator', 'ClusterConfigurator', (['tempDf', 'imTopConfig'], {}), '(tempDf, imTopConfig)\n', (4870, 4891), False, 'from clusterconfigurator import ClusterConfigurator\n'), ((7183, 7298), 'pandas.read_excel', 'pd.read_excel', (['"""results/labeling/to_label.xlsx"""'], {'sheet_name': '"""Sheet1"""', 'usecols': '"""B,E,D,G"""', 'nrows': '(685)', 'index_col': '(0)'}), "('results/labeling/to_label.xlsx', sheet_name='Sheet1',\n usecols='B,E,D,G', nrows=685, index_col=0)\n", (7196, 7298), True, 'import pandas as pd\n'), ((9437, 9487), 'clusterconfigurator.ClusterConfigurator', 'ClusterConfigurator', (['df', 'bestConfigurations[smell]'], {}), '(df, bestConfigurations[smell])\n', (9456, 9487), False, 'from clusterconfigurator import ClusterConfigurator\n'), ((11608, 11767), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(1)', 'cols': '(6)', 'shared_yaxes': '(True)', 'subplot_titles': "['Duplicate Block', '', 'Too many Attributes', '',\n 'Insufficient Modularization', '']"}), "(rows=1, cols=6, shared_yaxes=True, subplot_titles=[\n 'Duplicate Block', '', 'Too many Attributes', '',\n 'Insufficient Modularization', ''])\n", (11621, 11767), False, 'from plotly.subplots import make_subplots\n'), ((12461, 12620), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(1)', 'cols': '(6)', 'shared_yaxes': '(True)', 'subplot_titles': "['Duplicate Block', '', 'Too many Attributes', '',\n 'Insufficient Modularization', '']"}), "(rows=1, cols=6, shared_yaxes=True, subplot_titles=[\n 'Duplicate Block', '', 'Too many Attributes', '',\n 'Insufficient Modularization', ''])\n", (12474, 12620), False, 'from plotly.subplots import make_subplots\n'), ((7502, 7549), 'rulebased.detector.duplicateblocks.evaluate_script_with_rule', 'duplicateblocks.evaluate_script_with_rule', (['path'], {}), '(path)\n', (7543, 7549), False, 'from rulebased.detector import duplicateblocks\n'), ((9724, 9765), 'sklearn.metrics.matthews_corrcoef', 'matthews_corrcoef', (['trueLabels', 'predLabels'], {}), '(trueLabels, predLabels)\n', (9741, 9765), False, 'from sklearn.metrics import matthews_corrcoef\n'), ((10549, 10612), 'cliffsDelta.cliffsDelta', 'cliffsDelta.cliffsDelta', (['scoreDict[pair[0]]', 'scoreDict[pair[1]]'], {}), '(scoreDict[pair[0]], scoreDict[pair[1]])\n', (10572, 10612), False, 'import cliffsDelta\n'), ((10709, 10794), 'os.path.join', 'os.path.join', (['results_folder', 'f"""utestresults{iters}iters{subSample}percent.xlsx"""'], {}), "(results_folder, f'utestresults{iters}iters{subSample}percent.xlsx'\n )\n", (10721, 10794), False, 'import os\n'), ((10847, 10932), 'os.path.join', 'os.path.join', (['results_folder', 'f"""effectresults{iters}iters{subSample}percent.xlsx"""'], {}), "(results_folder,\n f'effectresults{iters}iters{subSample}percent.xlsx')\n", (10859, 10932), False, 'import os\n'), ((11389, 11499), 'plotly.graph_objects.Box', 'go.Box', ([], {'y': 'scoreList', 'name': 'detector', 'boxpoints': '"""all"""', 'marker_color': 'colors[smell]', 'line_color': 'colors[smell]'}), "(y=scoreList, name=detector, boxpoints='all', marker_color=colors[\n smell], line_color=colors[smell])\n", (11395, 11499), True, 'import plotly.graph_objects as go\n'), ((7591, 7640), 'rulebased.detector.toomanyattributes.evaluate_script_with_rule', 'toomanyattributes.evaluate_script_with_rule', (['path'], {}), '(path)\n', (7634, 7640), False, 'from rulebased.detector import toomanyattributes\n'), ((9810, 9849), 'sklearn.metrics.precision_score', 'precision_score', (['trueLabels', 'predLabels'], {}), '(trueLabels, predLabels)\n', (9825, 9849), False, 'from sklearn.metrics import precision_score\n'), ((10431, 10459), 'numpy.array', 'np.array', (['scoreDict[pair[0]]'], {}), '(scoreDict[pair[0]])\n', (10439, 10459), True, 'import numpy as np\n'), ((10461, 10489), 'numpy.array', 'np.array', (['scoreDict[pair[1]]'], {}), '(scoreDict[pair[1]])\n', (10469, 10489), True, 'import numpy as np\n'), ((5706, 5784), 'os.path.join', 'os.path.join', (['temp_folder', 'f"""MCCandPrecisionFor{iters}iters{subSample}percent"""'], {}), "(temp_folder, f'MCCandPrecisionFor{iters}iters{subSample}percent')\n", (5718, 5784), False, 'import os\n'), ((7681, 7739), 'rulebased.detector.insufficientmodularization.evaluate_script_with_rule', 'insufficientmodularization.evaluate_script_with_rule', (['path'], {}), '(path)\n', (7733, 7739), False, 'from rulebased.detector import insufficientmodularization\n'), ((7832, 7876), 'os.path.join', 'os.path.join', (['temp_folder', '"""rulebasedlabels"""'], {}), "(temp_folder, 'rulebasedlabels')\n", (7844, 7876), False, 'import os\n'), ((8008, 8044), 'os.path.join', 'os.path.join', (['data_folder', 'blueprint'], {}), '(data_folder, blueprint)\n', (8020, 8044), False, 'import os\n'), ((8342, 8363), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (8354, 8363), True, 'import pandas as pd\n'), ((10673, 10697), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'uDict'}), '(data=uDict)\n', (10685, 10697), True, 'import pandas as pd\n'), ((10806, 10835), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'effectDict'}), '(data=effectDict)\n', (10818, 10835), True, 'import pandas as pd\n'), ((6990, 7068), 'os.path.join', 'os.path.join', (['temp_folder', 'f"""MCCandPrecisionFor{iters}iters{subSample}percent"""'], {}), "(temp_folder, f'MCCandPrecisionFor{iters}iters{subSample}percent')\n", (7002, 7068), False, 'import os\n'), ((8104, 8153), 'rulebased.detector.toomanyattributes.evaluate_script_with_rule', 'toomanyattributes.evaluate_script_with_rule', (['path'], {}), '(path)\n', (8147, 8153), False, 'from rulebased.detector import toomanyattributes\n'), ((8178, 8225), 'rulebased.detector.duplicateblocks.evaluate_script_with_rule', 'duplicateblocks.evaluate_script_with_rule', (['path'], {}), '(path)\n', (8219, 8225), False, 'from rulebased.detector import duplicateblocks\n'), ((8250, 8308), 'rulebased.detector.insufficientmodularization.evaluate_script_with_rule', 'insufficientmodularization.evaluate_script_with_rule', (['path'], {}), '(path)\n', (8302, 8308), False, 'from rulebased.detector import insufficientmodularization\n'), ((8438, 8482), 'os.path.join', 'os.path.join', (['temp_folder', '"""rulebasedlabels"""'], {}), "(temp_folder, 'rulebasedlabels')\n", (8450, 8482), False, 'import os\n')] |
"""script runs small example of the animation manager usage"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from mpl_animationmanager import AnimationManager
def fAnim(j, ax, lineColl):
'''define the modification animation function'''
ax.collections = [] # clean axes
ax.add_collection3d(lineColl[j]) # add new artist
# create figure
fig = plt.figure('3D wireframe example')
ax = fig.gca(projection='3d')
ax.set_axis_off()
# generate modification frames (passed as fargs)
numFrames = 300
X, Y, Z = axes3d.get_test_data(0.05)
for j in range(numFrames):
ax.plot_wireframe(X, Y, Z*np.cos(2*np.pi/numFrames*j), rstride=5, cstride=5)
fargs = ax.collections
ax.collections = []
# pass figure to animation manager
mng = AnimationManager(ax, fAnim, fargs, numFrames)
mng.run()
| [
"mpl_toolkits.mplot3d.axes3d.get_test_data",
"matplotlib.pyplot.figure",
"mpl_animationmanager.AnimationManager",
"numpy.cos"
] | [((409, 443), 'matplotlib.pyplot.figure', 'plt.figure', (['"""3D wireframe example"""'], {}), "('3D wireframe example')\n", (419, 443), True, 'import matplotlib.pyplot as plt\n'), ((573, 599), 'mpl_toolkits.mplot3d.axes3d.get_test_data', 'axes3d.get_test_data', (['(0.05)'], {}), '(0.05)\n', (593, 599), False, 'from mpl_toolkits.mplot3d import axes3d\n'), ((813, 858), 'mpl_animationmanager.AnimationManager', 'AnimationManager', (['ax', 'fAnim', 'fargs', 'numFrames'], {}), '(ax, fAnim, fargs, numFrames)\n', (829, 858), False, 'from mpl_animationmanager import AnimationManager\n'), ((657, 690), 'numpy.cos', 'np.cos', (['(2 * np.pi / numFrames * j)'], {}), '(2 * np.pi / numFrames * j)\n', (663, 690), True, 'import numpy as np\n')] |
__author__ = 'jeremy'
import os
import cv2
import numpy as np
import logging
logging.basicConfig(level=logging.INFO)
import json
from trendi import constants
from trendi.utils import imutils
from trendi import Utils
def convert_labels_dir(indir,outdir,jpgdir=None,converter=constants.fashionista_aug_zerobased_to_pixlevel_categories_v2,
suffix_in='.png',suffix_out='_pixlevelv2.bmp',for_webtool=False,
inlabels=constants.fashionista_categories_augmented_zero_based,
outlabels=constants.pixlevel_categories_v2, save_legends=True):
'''
convert e..g from paperdoll to ultimate21 or pixlevel_categories_v2 .
Optionally only convert R channel for use with webtool. Don't forget to convert back to all chans after done w webtool
:param dir:
:param converter:
:param input_suffix:
:param for_webtool:
:return:
'''
Utils.ensure_dir(outdir)
files = [os.path.join(indir,f) for f in os.listdir(indir) if suffix_in in f]
print('STARTING CONVERT - converting '+str(len(files))+' files in '+indir)
for f in files:
print('')
newname = os.path.join(outdir,os.path.basename(f))
newname = newname.replace(suffix_in,suffix_out)
print('converting {} to {} '.format(f,newname))
converted_arr = convert_labels(f,converter=converter,for_webtool=for_webtool,inlabels=inlabels,outlabels=outlabels)
cv2.imwrite(newname,converted_arr)
#raw_input('ret to cont')
if save_legends:
if jpgdir is None:
jpgdir=indir
orig_imagename=os.path.basename(f).replace(suffix_in,'.jpg')
orig_imagename=os.path.join(jpgdir,orig_imagename)
print('saving legend using {} '.format(orig_imagename))
imutils.show_mask_with_labels(converted_arr,outlabels,original_image=orig_imagename,save_images=True)
def convert_labels(filename_or_img_array,converter=constants.fashionista_aug_zerobased_to_pixlevel_categories_v2,
for_webtool=True,inlabels=constants.fashionista_categories_augmented_zero_based,
outlabels=constants.pixlevel_categories_v2):
'''
convert e..g from paperdoll to ultimate21 or pixlevel_categories_v2 .
Optionally only convert R channel for use with webtool. Don't forget to convert back to all chans after done w webtool
:param converter:
:param input_suffix:
:param for_webtool:
:return:
'''
if isinstance(filename_or_img_array,basestring):
img_arr = cv2.imread(filename_or_img_array)
filename = filename_or_img_array
else:
img_arr = filename_or_img_array
filename = None
if img_arr is None:
logging.debug('got null image in conversion_utils.convert_pd_output')
h,w = img_arr.shape[0:2]
out_arr = np.zeros((h,w,3),dtype=np.uint8)
for u in np.unique(img_arr):
logging.debug('in converter, u='+str(u)+'len='+str(len(converter)))
if u+1>len(converter):
print('index {} is past length {} of converter, forcing to 0'.format(u,len(converter)))
newindex=0
else:
newindex= converter[u]
if newindex==None:
newindex=0
try:
print('converting {} {} to {} {}'.format(u,inlabels[u],newindex,outlabels[newindex]))
except:
logging.warning('looks like index {} is greater than inlabel array length {}!!!'.format(u,len(inlabels)))
out_arr[img_arr==u] = newindex #B it would seem this can be replaced by out_arr[:,:,:]=img_arr, maybe :: is used here
if for_webtool:
out_arr[:,:,0:2] = 0
return out_arr
def count_values(mask,labels=None):
image_size = mask.shape[0]*mask.shape[1]
uniques = np.unique(mask)
pixelcounts = {}
if len(mask.shape) == 3:
mask = mask[:,:,0] #this should be chan 3 if its a webtool image
for unique in uniques:
pixelcount = len(mask[mask==unique])
ratio = float(pixelcount)/image_size
if labels is not None:
print('class {} {} count {} ratio {}'.format(unique,labels[unique],pixelcount,ratio))
else:
print('class {} count {} ratio {}'.format(unique,pixelcount,ratio))
pixelcounts[unique]=pixelcount
return pixelcounts
def test_many_conversions():
multilabel_to_ultimate21_conversion=constants.binary_classifier_categories_to_ultimate_21
multilabel_labels=constants.binary_classifier_categories
print('testing binary classifier to u21 cats')
print('ml2u21 conversion:'+str(multilabel_to_ultimate21_conversion))
print('ml labels:'+str(multilabel_labels))
for i in range(len(multilabel_labels)):
neurodoll_index = multilabel_to_ultimate21_conversion[i]
#print('nd index:'+str(neurodoll_index))
if neurodoll_index is None:
print('no mapping from index {} (label {}) to neurodoll'.format(i,multilabel_labels[i]))
continue
print('index {} webtoollabel {} newindex {} neurodoll_label {}'.format(i,
multilabel_labels[i],neurodoll_index,constants.ultimate_21[neurodoll_index]))
multilabel_to_ultimate21_conversion=constants.web_tool_categories_v1_to_ultimate_21
multilabel_labels=constants.web_tool_categories
print('testing webtool v2 to u21 cats')
print('ml2u21 conversion:'+str(multilabel_to_ultimate21_conversion))
print('ml labels:'+str(multilabel_labels))
for i in range(len(multilabel_labels)):
neurodoll_index = multilabel_to_ultimate21_conversion[i]
if neurodoll_index is None:
print('no mapping from index {} (label {}) to neurodoll'.format(i,multilabel_labels[i]))
continue
print('index {} webtoollabel {} newindex {} neurodoll_label {}'.format(i,
multilabel_labels[i],neurodoll_index,constants.ultimate_21[neurodoll_index]))
multilabel_to_ultimate21_conversion=constants.web_tool_categories_v2_to_ultimate_21
multilabel_labels=constants.web_tool_categories_v2
print('testing webtool v1 to u21 cats')
print('ml2u21 conversion:'+str(multilabel_to_ultimate21_conversion))
print('ml labels:'+str(multilabel_labels))
for i in range(len(multilabel_labels)):
neurodoll_index = multilabel_to_ultimate21_conversion[i]
if neurodoll_index is None:
print('no mapping from index {} (label {}) to neurodoll'.format(i,multilabel_labels[i]))
continue
print('index {} webtoollabel {} newindex {} neurodoll_label {}'.format(i,
multilabel_labels[i],neurodoll_index,constants.ultimate_21[neurodoll_index]))
converter=constants.fashionista_aug_zerobased_to_pixlevel_categories_v2
orig_labels=constants.fashionista_categories_augmented_zero_based
dest_labels=constants.pixlevel_categories_v2
print('testing fashionista aug 0-based to pixlevel_v2 cats')
for i in range(len(orig_labels)):
dest_index = converter[i]
if dest_index is None:
print('no mapping from index {} (label {}) to dest'.format(i,orig_labels[i]))
continue
print('index {} origlabel {} newindex {} destlabel {}'.format(i,
orig_labels[i],dest_index,dest_labels[dest_index]))
def test_convert(orig_labels,dest_labels,converter):
print('testing conversion')
for i in range(len(orig_labels)):
dest_index = converter[i]
if dest_index is None:
print('no mapping from index {} (label {}) to dest'.format(i,orig_labels[i]))
continue
print('index {} origlabel {} newindex {} destlabel {}'.format(i,
orig_labels[i],dest_index,dest_labels[dest_index]))
def gen_json(images_dir='data/pd_output',annotations_dir='data/pd_output',
outfile = 'data/pd_output.json',labels=constants.pixlevel_categories_v2,mask_suffix='_pixv2_webtool.png',
ignore_finished=True,finished_mask_suffix='_pixv2_webtool_finished_mask.png'):
images = [os.path.join(images_dir,f) for f in os.listdir(images_dir) if '.jpg' in f and not 'legend' in f]
the_dict = {'labels': labels, 'imageURLs':[], 'annotationURLs':[]}
for f in images:
print('looking at '+f)
annotation_file = os.path.basename(f).replace('.jpg',mask_suffix)
annotation_file = os.path.join(annotations_dir,annotation_file)
if ignore_finished:
maskname = annotation_file.replace(mask_suffix,finished_mask_suffix)
#print('finished maskname:'+maskname)
if os.path.isfile(maskname):
print('mask '+maskname+' exists, skipping')
continue
if not os.path.isfile(annotation_file):
print('could not find '+str(annotation_file))
continue
the_dict['imageURLs'].append(f)
the_dict['annotationURLs'].append(annotation_file)
print('added image '+f+' mask '+annotation_file)
with open(outfile,'w') as fp:
json.dump(the_dict,fp,indent=4)
if __name__ == "__main__":
# gen_json()
print('starting test')
#test_convert(constants.ultimate_21,constants.pixlevel_categories_v3,constants.ultimate_21_to_pixlevel_v3)
#test_convert(constants.fashionista_categories_augmented,constants.pixlevel_categories_v3,constants.fashionista_augmented_to_pixlevel_v3)
test_convert(constants.fashionista_categories_augmented_zero_based,constants.pixlevel_categories_v4_for_web,constants.fashionista_aug_zerobased_to_pixlevel_categories_v4_for_web)
| [
"trendi.Utils.ensure_dir",
"json.dump",
"trendi.utils.imutils.show_mask_with_labels",
"logging.debug",
"logging.basicConfig",
"os.path.basename",
"cv2.imwrite",
"numpy.zeros",
"cv2.imread",
"os.path.isfile",
"os.path.join",
"os.listdir",
"numpy.unique"
] | [((78, 117), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (97, 117), False, 'import logging\n'), ((916, 940), 'trendi.Utils.ensure_dir', 'Utils.ensure_dir', (['outdir'], {}), '(outdir)\n', (932, 940), False, 'from trendi import Utils\n'), ((2864, 2899), 'numpy.zeros', 'np.zeros', (['(h, w, 3)'], {'dtype': 'np.uint8'}), '((h, w, 3), dtype=np.uint8)\n', (2872, 2899), True, 'import numpy as np\n'), ((2910, 2928), 'numpy.unique', 'np.unique', (['img_arr'], {}), '(img_arr)\n', (2919, 2928), True, 'import numpy as np\n'), ((3795, 3810), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (3804, 3810), True, 'import numpy as np\n'), ((954, 976), 'os.path.join', 'os.path.join', (['indir', 'f'], {}), '(indir, f)\n', (966, 976), False, 'import os\n'), ((1442, 1477), 'cv2.imwrite', 'cv2.imwrite', (['newname', 'converted_arr'], {}), '(newname, converted_arr)\n', (1453, 1477), False, 'import cv2\n'), ((2570, 2603), 'cv2.imread', 'cv2.imread', (['filename_or_img_array'], {}), '(filename_or_img_array)\n', (2580, 2603), False, 'import cv2\n'), ((2751, 2820), 'logging.debug', 'logging.debug', (['"""got null image in conversion_utils.convert_pd_output"""'], {}), "('got null image in conversion_utils.convert_pd_output')\n", (2764, 2820), False, 'import logging\n'), ((8023, 8050), 'os.path.join', 'os.path.join', (['images_dir', 'f'], {}), '(images_dir, f)\n', (8035, 8050), False, 'import os\n'), ((8344, 8390), 'os.path.join', 'os.path.join', (['annotations_dir', 'annotation_file'], {}), '(annotations_dir, annotation_file)\n', (8356, 8390), False, 'import os\n'), ((9000, 9033), 'json.dump', 'json.dump', (['the_dict', 'fp'], {'indent': '(4)'}), '(the_dict, fp, indent=4)\n', (9009, 9033), False, 'import json\n'), ((985, 1002), 'os.listdir', 'os.listdir', (['indir'], {}), '(indir)\n', (995, 1002), False, 'import os\n'), ((1177, 1196), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (1193, 1196), False, 'import os\n'), ((1696, 1732), 'os.path.join', 'os.path.join', (['jpgdir', 'orig_imagename'], {}), '(jpgdir, orig_imagename)\n', (1708, 1732), False, 'import os\n'), ((1813, 1922), 'trendi.utils.imutils.show_mask_with_labels', 'imutils.show_mask_with_labels', (['converted_arr', 'outlabels'], {'original_image': 'orig_imagename', 'save_images': '(True)'}), '(converted_arr, outlabels, original_image=\n orig_imagename, save_images=True)\n', (1842, 1922), False, 'from trendi.utils import imutils\n'), ((8059, 8081), 'os.listdir', 'os.listdir', (['images_dir'], {}), '(images_dir)\n', (8069, 8081), False, 'import os\n'), ((8564, 8588), 'os.path.isfile', 'os.path.isfile', (['maskname'], {}), '(maskname)\n', (8578, 8588), False, 'import os\n'), ((8690, 8721), 'os.path.isfile', 'os.path.isfile', (['annotation_file'], {}), '(annotation_file)\n', (8704, 8721), False, 'import os\n'), ((8270, 8289), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (8286, 8289), False, 'import os\n'), ((1623, 1642), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (1639, 1642), False, 'import os\n')] |
import numpy as np
import pickle
from dias.dataIO.dataPreProcess import refine_gt
class IonoDataManager():
def __init__(self, cfgs):
"""load train and test list
"""
self.cfgs = cfgs
self.base_path = cfgs['Data']['BasePath']
train_list_file_path = self.base_path + cfgs['Data']['TrainListFile']
t_file = open(train_list_file_path,'r')
# default batch size = 1
self.train_data_list = list()
for line in t_file.readlines():
self.train_data_list.append(line)
t_file.close()
self.test_data_list = list()
test_list_file_path = self.base_path + cfgs['Data']['TestListFile']
t_file = open(test_list_file_path,'r')
for line in t_file.readlines():
self.test_data_list.append(line)
t_file.close()
self.pad_height = int(cfgs['Data']['PadHeight'])
self.pad_width = int(cfgs['Data']['PadWidth'])
self.channel_num = int(cfgs['Data']['ChannelNum'])
self.class_num = int(cfgs['Data']['ClassNum'])
def get_train_batch(self):
"""get train batch
"""
#print(len(self.train_data_list))
rand_id = int(np.random.randint(low=0,high=len(self.train_data_list),size=1))
x_train = np.zeros([1,self.pad_height,self.pad_height,self.channel_num])
y_train = np.zeros([1,self.pad_height,self.pad_height,self.class_num])
ori_x_name = self.base_path + self.train_data_list[rand_id].split(' ')[0]
ori_y_name = self.base_path + self.train_data_list[rand_id].split(' ')[1]
t_file = open(ori_x_name,'rb')
ori_x = pickle.load(t_file)
t_file.close()
t_file = open(ori_y_name,'rb')
ori_y = pickle.load(t_file)
t_file.close()
ori_height = np.shape(ori_x)[0]
ori_width = np.shape(ori_x)[1]
x_train[0,:ori_height,:ori_width,0] = ori_x[:,:,0]/np.max(ori_x[:,:,0])
x_train[0,:ori_height,:ori_width,1] = ori_x[:,:,1]/np.max(ori_x[:,:,1])
x_train[0,:ori_height,:ori_width,2] = ori_x[:,:,2]/np.max(ori_x[:,:,2])
y_train[0,:ori_height,:ori_width,:] = ori_y[:,:,:]/1.0
y_train[0,:,:,:] = refine_gt(y_train[0,:,:,:])
return x_train,y_train
def get_test_batch(self, t_id):
"""get train batch
"""
#print(len(self.train_data_list))
#rand_id = int(np.random.randint(low=0,high=len(self.train_data_list),size=1))
rand_id = t_id
x_test= np.zeros([1,self.pad_height,self.pad_height,self.channel_num])
y_test = np.zeros([1,self.pad_height,self.pad_height,self.class_num])
art_test = np.zeros([1,self.pad_height,self.pad_height,self.class_num])
ori_x_name = self.base_path + self.test_data_list[rand_id].split(' ')[0]
ori_y_name = self.base_path + self.test_data_list[rand_id].split(' ')[1]
art_y_name = self.base_path + self.test_data_list[rand_id].split(' ')[2].rstrip()
t_file = open(ori_x_name,'rb')
ori_x = pickle.load(t_file)
t_file.close()
t_file = open(ori_y_name,'rb')
ori_y = pickle.load(t_file)
t_file.close()
t_file = open(art_y_name,'rb')
art_y = pickle.load(t_file)
t_file.close()
ori_height = np.shape(ori_x)[0]
ori_width = np.shape(ori_x)[1]
x_test[0,:ori_height,:ori_width,0] = ori_x[:,:,0]/np.max(ori_x[:,:,0])
x_test[0,:ori_height,:ori_width,1] = ori_x[:,:,1]/np.max(ori_x[:,:,1])
x_test[0,:ori_height,:ori_width,2] = ori_x[:,:,2]/np.max(ori_x[:,:,2])
y_test[0,:ori_height,:ori_width,:] = ori_y[:,:,:]/1.0
#y_test[0,:,:,:] = refine_gt(y_test[0,:,:,:])
art_test[0,:ori_height,:ori_width,:] = art_y[:,:,:]/1.0
#art_test[0,:,:,:] = refine_gt(art_test[0,:,:,:])
return x_test, y_test, art_test
def get_scale_only(self, id):
"""get train batch
"""
#print(len(self.train_data_list))
#rand_id = int(np.random.randint(low=0,high=len(self.train_data_list),size=1))
rand_id = id
x_test= np.zeros([1,self.pad_height,self.pad_height,self.channel_num])
ori_x_name = self.base_path + self.test_data_list[rand_id].split(' ')[0][:-1]
t_file = open(ori_x_name,'rb')
ori_x = pickle.load(t_file)
t_file.close()
ori_height = np.shape(ori_x)[0]
ori_width = np.shape(ori_x)[1]
x_test[0,:ori_height,:ori_width,0] = ori_x[:,:,0]/np.max(ori_x[:,:,0])
x_test[0,:ori_height,:ori_width,1] = ori_x[:,:,1]/np.max(ori_x[:,:,1])
x_test[0,:ori_height,:ori_width,2] = ori_x[:,:,2]/np.max(ori_x[:,:,2])
return x_test
if __name__ == '__main__':
import yaml
cfgs = yaml.load(open('C:/Users/wangj/Documents/GitHub/SmartRadarAssistant/DIonoAutoScaler/example_config.yaml','r'), Loader=yaml.BaseLoader)
dataManager = IonoDataManager(cfgs)
x_train, y_train = dataManager.get_train_batch()
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(1,2,1)
plt.imshow(x_train[0,:,:,:])
plt.subplot(1,2,2)
plt.imshow(y_train[0,:,:,:])
plt.show() | [
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"numpy.shape",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.max",
"dias.dataIO.dataPreProcess.refine_gt"
] | [((5042, 5054), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5052, 5054), True, 'import matplotlib.pyplot as plt\n'), ((5059, 5079), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (5070, 5079), True, 'import matplotlib.pyplot as plt\n'), ((5082, 5113), 'matplotlib.pyplot.imshow', 'plt.imshow', (['x_train[0, :, :, :]'], {}), '(x_train[0, :, :, :])\n', (5092, 5113), True, 'import matplotlib.pyplot as plt\n'), ((5115, 5135), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (5126, 5135), True, 'import matplotlib.pyplot as plt\n'), ((5138, 5169), 'matplotlib.pyplot.imshow', 'plt.imshow', (['y_train[0, :, :, :]'], {}), '(y_train[0, :, :, :])\n', (5148, 5169), True, 'import matplotlib.pyplot as plt\n'), ((5171, 5181), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5179, 5181), True, 'import matplotlib.pyplot as plt\n'), ((1280, 1345), 'numpy.zeros', 'np.zeros', (['[1, self.pad_height, self.pad_height, self.channel_num]'], {}), '([1, self.pad_height, self.pad_height, self.channel_num])\n', (1288, 1345), True, 'import numpy as np\n'), ((1361, 1424), 'numpy.zeros', 'np.zeros', (['[1, self.pad_height, self.pad_height, self.class_num]'], {}), '([1, self.pad_height, self.pad_height, self.class_num])\n', (1369, 1424), True, 'import numpy as np\n'), ((1644, 1663), 'pickle.load', 'pickle.load', (['t_file'], {}), '(t_file)\n', (1655, 1663), False, 'import pickle\n'), ((1742, 1761), 'pickle.load', 'pickle.load', (['t_file'], {}), '(t_file)\n', (1753, 1761), False, 'import pickle\n'), ((2195, 2225), 'dias.dataIO.dataPreProcess.refine_gt', 'refine_gt', (['y_train[0, :, :, :]'], {}), '(y_train[0, :, :, :])\n', (2204, 2225), False, 'from dias.dataIO.dataPreProcess import refine_gt\n'), ((2507, 2572), 'numpy.zeros', 'np.zeros', (['[1, self.pad_height, self.pad_height, self.channel_num]'], {}), '([1, self.pad_height, self.pad_height, self.channel_num])\n', (2515, 2572), True, 'import numpy as np\n'), ((2587, 2650), 'numpy.zeros', 'np.zeros', (['[1, self.pad_height, self.pad_height, self.class_num]'], {}), '([1, self.pad_height, self.pad_height, self.class_num])\n', (2595, 2650), True, 'import numpy as np\n'), ((2667, 2730), 'numpy.zeros', 'np.zeros', (['[1, self.pad_height, self.pad_height, self.class_num]'], {}), '([1, self.pad_height, self.pad_height, self.class_num])\n', (2675, 2730), True, 'import numpy as np\n'), ((3038, 3057), 'pickle.load', 'pickle.load', (['t_file'], {}), '(t_file)\n', (3049, 3057), False, 'import pickle\n'), ((3136, 3155), 'pickle.load', 'pickle.load', (['t_file'], {}), '(t_file)\n', (3147, 3155), False, 'import pickle\n'), ((3234, 3253), 'pickle.load', 'pickle.load', (['t_file'], {}), '(t_file)\n', (3245, 3253), False, 'import pickle\n'), ((4123, 4188), 'numpy.zeros', 'np.zeros', (['[1, self.pad_height, self.pad_height, self.channel_num]'], {}), '([1, self.pad_height, self.pad_height, self.channel_num])\n', (4131, 4188), True, 'import numpy as np\n'), ((4329, 4348), 'pickle.load', 'pickle.load', (['t_file'], {}), '(t_file)\n', (4340, 4348), False, 'import pickle\n'), ((1807, 1822), 'numpy.shape', 'np.shape', (['ori_x'], {}), '(ori_x)\n', (1815, 1822), True, 'import numpy as np\n'), ((1846, 1861), 'numpy.shape', 'np.shape', (['ori_x'], {}), '(ori_x)\n', (1854, 1861), True, 'import numpy as np\n'), ((1924, 1946), 'numpy.max', 'np.max', (['ori_x[:, :, 0]'], {}), '(ori_x[:, :, 0])\n', (1930, 1946), True, 'import numpy as np\n'), ((2004, 2026), 'numpy.max', 'np.max', (['ori_x[:, :, 1]'], {}), '(ori_x[:, :, 1])\n', (2010, 2026), True, 'import numpy as np\n'), ((2084, 2106), 'numpy.max', 'np.max', (['ori_x[:, :, 2]'], {}), '(ori_x[:, :, 2])\n', (2090, 2106), True, 'import numpy as np\n'), ((3299, 3314), 'numpy.shape', 'np.shape', (['ori_x'], {}), '(ori_x)\n', (3307, 3314), True, 'import numpy as np\n'), ((3338, 3353), 'numpy.shape', 'np.shape', (['ori_x'], {}), '(ori_x)\n', (3346, 3353), True, 'import numpy as np\n'), ((3415, 3437), 'numpy.max', 'np.max', (['ori_x[:, :, 0]'], {}), '(ori_x[:, :, 0])\n', (3421, 3437), True, 'import numpy as np\n'), ((3494, 3516), 'numpy.max', 'np.max', (['ori_x[:, :, 1]'], {}), '(ori_x[:, :, 1])\n', (3500, 3516), True, 'import numpy as np\n'), ((3573, 3595), 'numpy.max', 'np.max', (['ori_x[:, :, 2]'], {}), '(ori_x[:, :, 2])\n', (3579, 3595), True, 'import numpy as np\n'), ((4394, 4409), 'numpy.shape', 'np.shape', (['ori_x'], {}), '(ori_x)\n', (4402, 4409), True, 'import numpy as np\n'), ((4433, 4448), 'numpy.shape', 'np.shape', (['ori_x'], {}), '(ori_x)\n', (4441, 4448), True, 'import numpy as np\n'), ((4510, 4532), 'numpy.max', 'np.max', (['ori_x[:, :, 0]'], {}), '(ori_x[:, :, 0])\n', (4516, 4532), True, 'import numpy as np\n'), ((4589, 4611), 'numpy.max', 'np.max', (['ori_x[:, :, 1]'], {}), '(ori_x[:, :, 1])\n', (4595, 4611), True, 'import numpy as np\n'), ((4668, 4690), 'numpy.max', 'np.max', (['ori_x[:, :, 2]'], {}), '(ori_x[:, :, 2])\n', (4674, 4690), True, 'import numpy as np\n')] |
import threading
import numpy as np
import multiprocessing
import time
import Queue
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class Iterator(object):
"""Abstract base class for image data iterators.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size=1, shuffle=True, seed=10):
self.n = n
self.batch_size = batch_size
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_generator = self._flow_index(n, batch_size, shuffle, seed)
def reset(self):
self.batch_index = 0
def _flow_index(self, n, batch_size=32, shuffle=False, seed=None):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if seed is not None:
np.random.seed(seed + self.total_batches_seen)
if self.batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (self.batch_index * batch_size) % n
if n > current_index + batch_size:
current_batch_size = batch_size
self.batch_index += 1
else:
current_batch_size = n - current_index
self.batch_index = 0
self.total_batches_seen += 1
logger.debug("_flow_index")
yield (index_array[current_index: current_index + current_batch_size], current_index, current_batch_size)
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
class CocoGenerator(Iterator):
def __init__(self, data,
batch_size=1, shuffle=False, seed=None, sorted_index=None):
self.data = data
super(CocoGenerator, self).__init__(
len(data), batch_size, shuffle, seed)
self.sorted_index = sorted_index
def _flow_index(self, n, batch_size=32, shuffle=False, seed=None):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if seed is not None:
np.random.seed(seed + self.total_batches_seen)
if self.batch_index == 0:
if self.sorted_index is None:
index_array = np.arange(n)
else:
index_array = self.sorted_index
if shuffle:
index_array = np.random.permutation(n)
current_index = (self.batch_index * batch_size) % n
if n > current_index + batch_size:
current_batch_size = batch_size
self.batch_index += 1
else:
current_batch_size = n - current_index
self.batch_index = 0
self.total_batches_seen += 1
yield (index_array[current_index: current_index + current_batch_size], current_index, current_batch_size)
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array, current_index, current_batch_size = next(
self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
logger.debug('Next executed')
logger.debug('hehehe' + str(index_array))
logger.debug(current_index)
logger.debug(current_batch_size)
data = [self.data[i] for i in index_array]
data = [i for i in data if i is not None]
logger.debug([d['im_name'] for d in data])
return data
class Enqueuer(object):
def __init__(self, generator, use_multiprocessing=False, shuffle=False, wait_time=0.05, random_seed=None):
self._generator = generator
self._use_multiprocessing = use_multiprocessing
self.queue = None
self._stop_event = None
self._threads = []
self.wait_time = wait_time
self.random_seed = random_seed
if self._use_multiprocessing:
self.lock = multiprocessing.Lock()
else:
self.lock = threading.Lock()
def start(self, workers=3, max_queue_size=10):
logger.debug('start')
def data_generator_task():
logger.debug("task start")
logger.debug("_stop_event %s" % str(self._stop_event.is_set()))
while not self._stop_event.is_set():
try:
logger.debug("Queue size %d " % self.queue.qsize())
logger.debug("use_multiprocessing %s" %
str(self._use_multiprocessing))
if self._use_multiprocessing or self.queue.qsize() < max_queue_size:
generator_output = next(self._generator)
self.queue.put(generator_output)
else:
time.sleep(self.wait_time)
except Exception:
self._stop_event.set()
raise
try:
logger.debug("try")
if self._use_multiprocessing:
self.queue = multiprocessing.Queue(maxsize=max_queue_size)
self._stop_event = multiprocessing.Event()
else:
self.queue = Queue.Queue()
self._stop_event = threading.Event()
for _ in range(workers):
if self._use_multiprocessing:
np.random.seed(self.random_seed)
thread = multiprocessing.Process(
target=data_generator_task)
thread.daemon = True
if self.random_seed is not None:
self.random_seed += 1
else:
thread = threading.Thread(target=data_generator_task)
self._threads.append(thread)
thread.start()
except Exception as e:
logger.debug(e)
self.stop()
raise
def is_running(self):
return self._stop_event is not None and not self._stop_event.is_set()
def stop(self, timeout=None):
if self.is_running():
self._stop_event.set()
for thread in self._threads:
if thread.is_alive():
if self._use_multiprocessing:
thread.terminate()
else:
thread.join(timeout)
if self._use_multiprocessing:
if self.queue is not None:
self.queue.close()
self._threads = []
self._stop_event = None
self.queue = None
def get(self):
while self.is_running():
logger.debug("Next")
logger.debug("queue.empty : %s" % str(self.queue.empty()))
if not self.queue.empty():
inputs = self.queue.get()
if inputs is not None:
yield inputs
else:
time.sleep(self.wait_time)
pass
| [
"threading.Thread",
"numpy.random.seed",
"logging.basicConfig",
"multiprocessing.Lock",
"Queue.Queue",
"time.sleep",
"threading.Lock",
"numpy.arange",
"threading.Event",
"multiprocessing.Queue",
"numpy.random.permutation",
"multiprocessing.Event",
"multiprocessing.Process",
"logging.getLog... | [((100, 121), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (119, 121), False, 'import logging\n'), ((131, 158), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (148, 158), False, 'import logging\n'), ((768, 784), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (782, 784), False, 'import threading\n'), ((4590, 4612), 'multiprocessing.Lock', 'multiprocessing.Lock', ([], {}), '()\n', (4610, 4612), False, 'import multiprocessing\n'), ((4651, 4667), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (4665, 4667), False, 'import threading\n'), ((1113, 1159), 'numpy.random.seed', 'np.random.seed', (['(seed + self.total_batches_seen)'], {}), '(seed + self.total_batches_seen)\n', (1127, 1159), True, 'import numpy as np\n'), ((1228, 1240), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1237, 1240), True, 'import numpy as np\n'), ((2556, 2602), 'numpy.random.seed', 'np.random.seed', (['(seed + self.total_batches_seen)'], {}), '(seed + self.total_batches_seen)\n', (2570, 2602), True, 'import numpy as np\n'), ((5676, 5721), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {'maxsize': 'max_queue_size'}), '(maxsize=max_queue_size)\n', (5697, 5721), False, 'import multiprocessing\n'), ((5757, 5780), 'multiprocessing.Event', 'multiprocessing.Event', ([], {}), '()\n', (5778, 5780), False, 'import multiprocessing\n'), ((5828, 5841), 'Queue.Queue', 'Queue.Queue', ([], {}), '()\n', (5839, 5841), False, 'import Queue\n'), ((5877, 5894), 'threading.Event', 'threading.Event', ([], {}), '()\n', (5892, 5894), False, 'import threading\n'), ((7520, 7546), 'time.sleep', 'time.sleep', (['self.wait_time'], {}), '(self.wait_time)\n', (7530, 7546), False, 'import time\n'), ((1303, 1327), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (1324, 1327), True, 'import numpy as np\n'), ((2721, 2733), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2730, 2733), True, 'import numpy as np\n'), ((2871, 2895), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (2892, 2895), True, 'import numpy as np\n'), ((5999, 6031), 'numpy.random.seed', 'np.random.seed', (['self.random_seed'], {}), '(self.random_seed)\n', (6013, 6031), True, 'import numpy as np\n'), ((6061, 6112), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'data_generator_task'}), '(target=data_generator_task)\n', (6084, 6112), False, 'import multiprocessing\n'), ((6330, 6374), 'threading.Thread', 'threading.Thread', ([], {'target': 'data_generator_task'}), '(target=data_generator_task)\n', (6346, 6374), False, 'import threading\n'), ((5429, 5455), 'time.sleep', 'time.sleep', (['self.wait_time'], {}), '(self.wait_time)\n', (5439, 5455), False, 'import time\n')] |
import getopt
import sys
import joblib
import numpy
import pandas
from IPython.display import display
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import Perceptron
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from preprocessing import DataFrameImputer
from preprocessing import DataFrameOneHotEncoder
from preprocessing import binarize
class EmpathyClassification:
def __init__(self, dataFile='data/responses.csv', outputFile='data/bestModel.pkl',
testSetOutputFile='data/testSet.csv', randomSeed=42, testSetSize=0.2):
self.dataFile = dataFile
self.outputFile = outputFile
self.testSetOutputFile = testSetOutputFile
self.seed = randomSeed
self.testSize = testSetSize
self.preprocessor = None
# Define Classifiers
self.classifiers = [
('Most-frequent Classifier (Baseline)', DummyClassifier(), [
{
'strategy': ['most_frequent']
}
]),
('KNN', KNeighborsClassifier(n_jobs=-1), [
{
'n_neighbors': range(1, 21)
}
]),
('Logistic Regression', LogisticRegression(max_iter=10000), [
{
'C': numpy.logspace(1e-4, 10, num=50),
'solver': ['liblinear']
},
{
'C': numpy.logspace(1e-4, 10, num=50),
'solver': ['lbfgs'],
'n_jobs': [-1]
}
]),
('Gaussian Naive Bayes', GaussianNB(), [{}]),
('Perceptron', Perceptron(random_state=self.seed, tol=1e-3), [
{
'penalty': ['l1', 'l2', 'elasticnet'],
'max_iter': [30, 35, 40, 45, 50, 75, 100, 250, 500, 1000, 1500],
'eta0': [0.1 * i for i in range(1, 11)]
}
]),
('Decision Tree', DecisionTreeClassifier(random_state=self.seed), [
{
'criterion': ['gini', 'entropy'],
'max_depth': [10, 15, 20, 30, 50, 100, 150, 200, None]
}
]),
('Random Forest', RandomForestClassifier(random_state=self.seed, n_jobs=-1), [
{'n_estimators': range(50, 501, 10), 'criterion': ['gini', 'entropy']}
]),
('SVM', SVC(random_state=self.seed), [
{
'kernel': ['linear', 'rbf'],
'C': [0.01 * i for i in range(1, 101, 5)],
'gamma': ['auto', 'scale']
},
{
'kernel': ['poly'],
'C': [0.01 * i for i in range(1, 101, 5)],
'gamma': ['auto', 'scale'],
'degree': range(2, 6)
}
])
]
self.gridSearches = []
self.results = None
self.bestModels = {}
self.bestOverallModel = None
self.scoreResults = None
def loadData(self, filename=None):
csv = pandas.read_csv(self.dataFile if filename is None else filename)
# Drop rows where dependent variable is missing
csv.dropna(subset=['Empathy'], inplace=True)
# Separate dependent and independent variables
Yall = csv['Empathy']
Xall = csv.drop(labels=['Empathy'], axis=1)
# Binarize dependent variable s.t. y=[1,2,3] => 0, and y=[4,5] => 1
Yall = binarize(Yall, threshold=3)
return Xall, Yall
def splitTrainAndTestSet(self, Xall, Yall):
return train_test_split(Xall, Yall, test_size=0.2, random_state=self.seed)
def doPreprocessing(self, Xtrain, Ytrain):
# Impute missing values with mode and one-hot encode categorical variables
self.preprocessor = Pipeline([
('imputer', DataFrameImputer()),
('onehot', DataFrameOneHotEncoder()),
('scaling', MinMaxScaler()),
('feature_selection', SelectFromModel(
estimator=RandomForestClassifier(
random_state=self.seed,
criterion='entropy',
n_estimators=70, n_jobs=-1
)
))
])
self.preprocessor.fit(Xtrain, Ytrain)
return self.preprocessor
def applyPreprocessing(self, X):
return self.preprocessor.transform(X)
def trainClassifiers(self, Xtrain, Ytrain):
results = {"Classifier": [], "Best Parameters": [], "CV Accuracy": []}
for name, classifier, params in self.classifiers:
print("\n\nTraining {} ...".format(name))
gridSearch = GridSearchCV(classifier, param_grid=params,
cv=StratifiedKFold(n_splits=8, random_state=self.seed),
scoring='accuracy', n_jobs=-1,
iid=True)
gridSearch.fit(Xtrain, Ytrain)
self.gridSearches.append(gridSearch)
results["Classifier"].append(name)
results["Best Parameters"].append(gridSearch.best_params_)
results["CV Accuracy"].append(gridSearch.best_score_)
self.bestModels[name] = gridSearch.best_estimator_
print("CV Accuracy:", gridSearch.best_score_)
print("Best parameters:", gridSearch.best_params_)
self.results = pandas.DataFrame(results)
def findBestPerformingModel(self):
bestPerformingModelName = self.results.iloc[self.results["CV Accuracy"].idxmax()]['Classifier']
print("Best performing model:", bestPerformingModelName)
self.bestOverallModel = self.bestModels[bestPerformingModelName]
return self.bestOverallModel
def scoreClassifiers(self, Xtest, Ytest):
results = {"Classifier": [], "CV Accuracy": [], "Test Accuracy": []}
for index, gridSearch in enumerate(self.gridSearches):
name, _, _ = self.classifiers[index]
print("\n\nScoring {} ...".format(name))
accuracy = gridSearch.best_estimator_.score(Xtest, Ytest)
results["Classifier"].append(name)
results["CV Accuracy"].append(gridSearch.best_score_)
results["Test Accuracy"].append(accuracy)
print("CV Accuracy was", gridSearch.best_score_)
print("Test Accuracy is", accuracy)
self.scoreResults = pandas.DataFrame(results)
def writeTestSet(self, Xtest, Ytest):
df = Xtest.copy()
df['Empathy'] = Ytest
df.to_csv(self.testSetOutputFile, index=False)
def saveModel(self):
saveData = {
'preprocessor': self.preprocessor,
'bestOverallModel': self.bestOverallModel,
'gridSearches': self.gridSearches
}
joblib.dump(saveData, self.outputFile)
print("Best performing model saved at:", self.outputFile)
def loadModel(self, modelFile=None):
if modelFile is None:
modelFile = self.outputFile
data = joblib.load(modelFile)
self.preprocessor = data["preprocessor"]
self.bestOverallModel = data['bestOverallModel']
self.gridSearches = data['gridSearches']
print("Best performing model loaded from:", modelFile)
def main(mode='test', dataFile='testSet.csv', modelFile='bestModel.pkl'):
if (mode == 'train'):
# Create instance of EmpathyClassification
classification = EmpathyClassification(dataFile=dataFile, outputFile=modelFile)
# Load dataset
Xall, Yall = classification.loadData()
# Split dataset into test and train
Xtrain, Xtest, Ytrain, Ytest = classification.splitTrainAndTestSet(Xall, Yall)
classification.writeTestSet(Xtest, Ytest)
# Fit preprocessor
classification.doPreprocessing(Xtrain, Ytrain)
# Apply results of preprocessing
print("Number of features before preprocessing:", Xtrain.shape[1])
Xtrain = classification.applyPreprocessing(Xtrain)
Xtest = classification.applyPreprocessing(Xtest)
print("Number of features after preprocessing:", Xtrain.shape[1])
# Train classifiers
classification.trainClassifiers(Xtrain, Ytrain)
print("\n\nResults:\n")
display(classification.results)
# Dump trained model and preprocessing objects
bestModel = classification.findBestPerformingModel()
classification.saveModel()
# Score models on test set
classification.scoreClassifiers(Xtest, Ytest)
print("\nSummary of scores on test set:")
display(classification.scoreResults)
# Test baseline model on test set
accuracy = classification.gridSearches[0].best_estimator_.score(Xtest, Ytest)
print("\nAccuracy of most-frequent (baseline) classifier on test set:", accuracy)
# Test best model on test set
accuracy = bestModel.score(Xtest, Ytest)
print("Accuracy of best performing classifier on test set:", accuracy)
elif mode == 'test':
# Load test set
classification = EmpathyClassification(testSetOutputFile=dataFile, outputFile=modelFile)
Xtest, Ytest = classification.loadData(dataFile)
# Load trained model
classification.loadModel()
# Apply preprocess steps
Xtest = classification.applyPreprocessing(Xtest)
# Score models on test set
classification.scoreClassifiers(Xtest, Ytest)
print("\nSummary of scores on test set:")
display(classification.scoreResults)
# Score baseline model
accuracy = classification.gridSearches[0].best_estimator_.score(Xtest, Ytest)
print("\nAccuracy of most-frequent (baseline) classifier on test set:", accuracy)
# Score best model on test data
accuracy = classification.bestOverallModel.score(Xtest, Ytest)
print("Accuracy of best performing classifier on test set:", accuracy)
else:
print("Invalid mode:", mode)
def usage():
print(
"Usage:\n\tpy main.py --mode=<train|test> --dataset=<path to responses.csv | path to test set> --model=<path to load/write trained model> [-h --help]")
if __name__ == '__main__':
argv = sys.argv[1:]
# argv = "--mode=train --dataset=data/responses.csv --model=data/bestModel.pkl".split(' ')
# argv = "--mode=test --dataset=data/testSet.csv --model=data/bestModel.pkl".split(' ')
dataFile = 'data/responses.csv'
modelFile = 'data/bestModel.pkl'
mode = 'train'
try:
opts, args = getopt.getopt(argv, 'hm:d:l:', ['help', 'mode=', 'dataset=', 'model='])
except getopt.GetoptError as err:
print(err)
usage()
sys.exit(2)
if len(opts) == 0:
usage()
sys.exit()
for option, value in opts:
if option in ('-h', '--help'):
usage()
sys.exit()
elif option in ('-m', '--mode'):
if value.lower() in ('train', 'test'):
mode = value
else:
sys.exit("Unknown mode: mode must be either 'train' ot 'test'")
elif option in ('-d', '--dataset'):
dataFile = value
elif option in ('-l', '--model'):
modelFile = value
else:
sys.exit("Unknown option: {}".format(option))
main(mode, dataFile, modelFile)
| [
"getopt.getopt",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"preprocessing.DataFrameOneHotEncoder",
"sklearn.preprocessing.MinMaxScaler",
"joblib.dump",
"numpy.logspace",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.svm.SVC",
"pandas.DataFrame",
"sklearn.dummy.DummyClassif... | [((3659, 3723), 'pandas.read_csv', 'pandas.read_csv', (['(self.dataFile if filename is None else filename)'], {}), '(self.dataFile if filename is None else filename)\n', (3674, 3723), False, 'import pandas\n'), ((4064, 4091), 'preprocessing.binarize', 'binarize', (['Yall'], {'threshold': '(3)'}), '(Yall, threshold=3)\n', (4072, 4091), False, 'from preprocessing import binarize\n'), ((4183, 4250), 'sklearn.model_selection.train_test_split', 'train_test_split', (['Xall', 'Yall'], {'test_size': '(0.2)', 'random_state': 'self.seed'}), '(Xall, Yall, test_size=0.2, random_state=self.seed)\n', (4199, 4250), False, 'from sklearn.model_selection import train_test_split\n'), ((6005, 6030), 'pandas.DataFrame', 'pandas.DataFrame', (['results'], {}), '(results)\n', (6021, 6030), False, 'import pandas\n'), ((7016, 7041), 'pandas.DataFrame', 'pandas.DataFrame', (['results'], {}), '(results)\n', (7032, 7041), False, 'import pandas\n'), ((7409, 7447), 'joblib.dump', 'joblib.dump', (['saveData', 'self.outputFile'], {}), '(saveData, self.outputFile)\n', (7420, 7447), False, 'import joblib\n'), ((7643, 7665), 'joblib.load', 'joblib.load', (['modelFile'], {}), '(modelFile)\n', (7654, 7665), False, 'import joblib\n'), ((8895, 8926), 'IPython.display.display', 'display', (['classification.results'], {}), '(classification.results)\n', (8902, 8926), False, 'from IPython.display import display\n'), ((9227, 9263), 'IPython.display.display', 'display', (['classification.scoreResults'], {}), '(classification.scoreResults)\n', (9234, 9263), False, 'from IPython.display import display\n'), ((11193, 11264), 'getopt.getopt', 'getopt.getopt', (['argv', '"""hm:d:l:"""', "['help', 'mode=', 'dataset=', 'model=']"], {}), "(argv, 'hm:d:l:', ['help', 'mode=', 'dataset=', 'model='])\n", (11206, 11264), False, 'import getopt\n'), ((11406, 11416), 'sys.exit', 'sys.exit', ([], {}), '()\n', (11414, 11416), False, 'import sys\n'), ((10158, 10194), 'IPython.display.display', 'display', (['classification.scoreResults'], {}), '(classification.scoreResults)\n', (10165, 10194), False, 'from IPython.display import display\n'), ((11346, 11357), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (11354, 11357), False, 'import sys\n'), ((11520, 11530), 'sys.exit', 'sys.exit', ([], {}), '()\n', (11528, 11530), False, 'import sys\n'), ((1423, 1440), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {}), '()\n', (1438, 1440), False, 'from sklearn.dummy import DummyClassifier\n'), ((1566, 1597), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (1586, 1597), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1737, 1771), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'max_iter': '(10000)'}), '(max_iter=10000)\n', (1755, 1771), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2139, 2151), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (2149, 2151), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((2187, 2232), 'sklearn.linear_model.Perceptron', 'Perceptron', ([], {'random_state': 'self.seed', 'tol': '(0.001)'}), '(random_state=self.seed, tol=0.001)\n', (2197, 2232), False, 'from sklearn.linear_model import Perceptron\n'), ((2521, 2567), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': 'self.seed'}), '(random_state=self.seed)\n', (2543, 2567), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2782, 2839), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': 'self.seed', 'n_jobs': '(-1)'}), '(random_state=self.seed, n_jobs=-1)\n', (2804, 2839), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2966, 2993), 'sklearn.svm.SVC', 'SVC', ([], {'random_state': 'self.seed'}), '(random_state=self.seed)\n', (2969, 2993), False, 'from sklearn.svm import SVC\n'), ((4446, 4464), 'preprocessing.DataFrameImputer', 'DataFrameImputer', ([], {}), '()\n', (4462, 4464), False, 'from preprocessing import DataFrameImputer\n'), ((4490, 4514), 'preprocessing.DataFrameOneHotEncoder', 'DataFrameOneHotEncoder', ([], {}), '()\n', (4512, 4514), False, 'from preprocessing import DataFrameOneHotEncoder\n'), ((4541, 4555), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (4553, 4555), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((5349, 5400), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(8)', 'random_state': 'self.seed'}), '(n_splits=8, random_state=self.seed)\n', (5364, 5400), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((11686, 11749), 'sys.exit', 'sys.exit', (['"""Unknown mode: mode must be either \'train\' ot \'test\'"""'], {}), '("Unknown mode: mode must be either \'train\' ot \'test\'")\n', (11694, 11749), False, 'import sys\n'), ((1818, 1852), 'numpy.logspace', 'numpy.logspace', (['(0.0001)', '(10)'], {'num': '(50)'}), '(0.0001, 10, num=50)\n', (1832, 1852), False, 'import numpy\n'), ((1958, 1992), 'numpy.logspace', 'numpy.logspace', (['(0.0001)', '(10)'], {'num': '(50)'}), '(0.0001, 10, num=50)\n', (1972, 1992), False, 'import numpy\n'), ((4635, 4734), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': 'self.seed', 'criterion': '"""entropy"""', 'n_estimators': '(70)', 'n_jobs': '(-1)'}), "(random_state=self.seed, criterion='entropy',\n n_estimators=70, n_jobs=-1)\n", (4657, 4734), False, 'from sklearn.ensemble import RandomForestClassifier\n')] |
import glob
import os
from pathlib import Path
from librosa import feature
import pandas as pd
import librosa
import datetime
import numpy as np
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import svm
from sklearn import metrics
from tensorflow.keras import metrics
import pickle
listdir = []
labels = []
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.wrappers.scikit_learn import KerasRegressor
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras import regularizers
import matplotlib.pyplot as plt
from tensorflow.keras import backend as K
for file in Path().rglob('*.flac'):#search files in that have extension .flac files
print(file.name)
x = file.name
labels.append((x.split('-'))[0])#split filename with '-' and select element with index 0
listdir.append(Path.resolve(file.absolute()))#resolve path to the audio files
df_all_data = pd.DataFrame() #Create a pandas dataframe
df_all_data['labels'] = labels #create label tables in pandas dataframe
df_all_data['audio_files'] = listdir #create audio files table in pandas dataframe
print(df_all_data.head())
df_all_data = df_all_data.sample(frac=1).reset_index(drop=True)
print(df_all_data.head())
print(df_all_data['labels'].value_counts(normalize=True))
def extract_features(files):
# Sets the name to be the path to where the file is in my computer
#file_name = os.path.join(os.path.abspath('voice')+'/'+str(files.file))
file_name = files.audio_files
# Loads the audio file as a floating point time series and assigns the default sample rate
# Sample rate is set to 22050 by default
X, sample_rate = librosa.load(file_name, res_type='kaiser_fast')
# print(sample_rate)
# Generate Mel-frequency cepstral coefficients (MFCCs) from a time series
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T,axis=0)
# We add also the classes of each file as a label at the end
label = files.labels
return mfccs, label
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
startTime = datetime.datetime.now()
features_label = df_all_data.apply(extract_features, axis=1)
print('time to extract features :'+str(datetime.datetime.now() - startTime))
labels = []
features = []
for feat, label in features_label:
features.append(feat)
labels.append(label)
print('len of features ')
print(len(features))
print('len of labels set() : ')
print(len(set(labels)))
X = np.array(features)
y = np.array(labels)
lb = LabelEncoder()
y = to_categorical(lb.fit_transform(y))
print(X.shape)
print(y.shape)
# ss = StandardScaler()
# X = ss.fit_transform(X)
# print(X[0])
X_train, X_test, Y_train, Y_test = train_test_split(X,y, test_size=0.25, shuffle=True)
X_train = X[:int(0.65* len(X))]
y_train = y[:int(0.65* len(X))]
# print('len of labels y_train set() : ', len(set(y_train)))
X_val = X[int(0.65* len(X)):int(0.75*len(X))]
y_val = y[int(0.65* len(X)):int(0.75*len(X))]
'''
classifier = svm.SVC(kernel='poly')
print('training the SVM model with poly kernel')
startTime = datetime.datetime.now()
classifier.fit(X_train, Y_train)
print('time to train :'+str(datetime.datetime.now() - startTime))
y_predicted = classifier.predict(X_test)
'''
model = Sequential()
model.add(Dense(40, input_shape=(40,), activation = 'linear'))
model.add(Dropout(0.1))
model.add(Dense(180, activation = 'linear'))
model.add(Dropout(0.25))
model.add(Dense(512, activation = 'linear'))
model.add(Dropout(0.5))
model.add(Dense(392, activation = 'linear'))
model.add(Dropout(0.5))
model.add(Dense(len(set(labels)), activation = 'softmax'))
model.compile(loss='categorical_crossentropy', metrics=['accuracy',f1_m,precision_m, recall_m], optimizer='adam')
early_stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=100, verbose=1, mode='auto')
history = model.fit(X_train, y_train, batch_size=256, epochs=400,
validation_data=(X_val, y_val),
callbacks=[early_stop])
# evaluate the model
loss, accuracy, f1_score, precision, recall = model.evaluate(X_test, Y_test, verbose=0)
# print(loss, accuracy, f1_score, precision, recall)
# print('Accuracy :', metrics.accuracy_score(Y_test, y_predicted))
# print('Recall :', metrics.recall_score(Y_test, y_predicted, average='macro'))
# print('Precision :', metrics.accuracy_score(Y_test, y_predicted))
# with open('trainedSvmModel.pickle', 'wb') as f:
# # pickle.dump(classifier, f)
# preds = model.predict_classes(X_test)
# print(preds)
# # print(Y_test.maxarg())
# preds = lb.inverse_transform(preds)
# # y_predicted = lb.inverse_transform(preds)
# y_predicted = np.argmax(Y_test, axis =0 )
# print(y_predicted)
# print(Y_test)
# print('Accuracy :', metrics.accuracy_score(Y_test, y_predicted))
# print('Recall :', metrics.recall_score(Y_test, y_predicted, average='macro'))
# print('Precision :', metrics.accuracy_score(Y_test, y_predicted))
# Check out our train accuracy and validation accuracy over epochs.
print(history.history.keys())
train_accuracy = history.history['accuracy']
val_accuracy = history.history['val_accuracy']
precision_ma = history.history['precision_m']
recall_ma = history.history['recall_m']
# Set figure size.
plt.figure(figsize=(12, 8))
# Generate line plot of training, testing loss over epochs.
plt.plot(train_accuracy, label='Training Accuracy', color='#185fad')
plt.plot(val_accuracy, label='Validation Accuracy', color='orange')
plt.plot(precision_ma, label='Precision', color='red')
plt.plot(recall_ma, label='Recall', color='green')
# Set title
plt.title('Training and Validation Accuracy by Epoch', fontsize = 25)
plt.xlabel('Epoch', fontsize = 18)
plt.ylabel('Categorical Crossentropy', fontsize = 18)
plt.xticks(range(0,400,20), range(0,400,20))
plt.legend(fontsize = 18)
plt.show() | [
"matplotlib.pyplot.title",
"tensorflow.keras.layers.Dense",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.figure",
"pathlib.Path",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.backend.epsilon",
"librosa.feature.mfcc",
"tensorflow.keras.callbacks.EarlyStopping",
"pandas.... | [((1128, 1142), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1140, 1142), True, 'import pandas as pd\n'), ((2924, 2947), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2945, 2947), False, 'import datetime\n'), ((3309, 3327), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (3317, 3327), True, 'import numpy as np\n'), ((3333, 3349), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (3341, 3349), True, 'import numpy as np\n'), ((3356, 3370), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (3368, 3370), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((3544, 3596), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.25)', 'shuffle': '(True)'}), '(X, y, test_size=0.25, shuffle=True)\n', (3560, 3596), False, 'from sklearn.model_selection import train_test_split\n'), ((4097, 4109), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4107, 4109), False, 'from tensorflow.keras.models import Sequential\n'), ((4607, 4695), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'patience': '(100)', 'verbose': '(1)', 'mode': '"""auto"""'}), "(monitor='val_loss', min_delta=0, patience=100, verbose=1,\n mode='auto')\n", (4620, 4695), False, 'from tensorflow.keras.callbacks import EarlyStopping\n'), ((6105, 6132), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (6115, 6132), True, 'import matplotlib.pyplot as plt\n'), ((6194, 6262), 'matplotlib.pyplot.plot', 'plt.plot', (['train_accuracy'], {'label': '"""Training Accuracy"""', 'color': '"""#185fad"""'}), "(train_accuracy, label='Training Accuracy', color='#185fad')\n", (6202, 6262), True, 'import matplotlib.pyplot as plt\n'), ((6263, 6330), 'matplotlib.pyplot.plot', 'plt.plot', (['val_accuracy'], {'label': '"""Validation Accuracy"""', 'color': '"""orange"""'}), "(val_accuracy, label='Validation Accuracy', color='orange')\n", (6271, 6330), True, 'import matplotlib.pyplot as plt\n'), ((6331, 6385), 'matplotlib.pyplot.plot', 'plt.plot', (['precision_ma'], {'label': '"""Precision"""', 'color': '"""red"""'}), "(precision_ma, label='Precision', color='red')\n", (6339, 6385), True, 'import matplotlib.pyplot as plt\n'), ((6386, 6436), 'matplotlib.pyplot.plot', 'plt.plot', (['recall_ma'], {'label': '"""Recall"""', 'color': '"""green"""'}), "(recall_ma, label='Recall', color='green')\n", (6394, 6436), True, 'import matplotlib.pyplot as plt\n'), ((6449, 6516), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Accuracy by Epoch"""'], {'fontsize': '(25)'}), "('Training and Validation Accuracy by Epoch', fontsize=25)\n", (6458, 6516), True, 'import matplotlib.pyplot as plt\n'), ((6519, 6551), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {'fontsize': '(18)'}), "('Epoch', fontsize=18)\n", (6529, 6551), True, 'import matplotlib.pyplot as plt\n'), ((6554, 6605), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Categorical Crossentropy"""'], {'fontsize': '(18)'}), "('Categorical Crossentropy', fontsize=18)\n", (6564, 6605), True, 'import matplotlib.pyplot as plt\n'), ((6653, 6676), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(18)'}), '(fontsize=18)\n', (6663, 6676), True, 'import matplotlib.pyplot as plt\n'), ((6679, 6689), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6687, 6689), True, 'import matplotlib.pyplot as plt\n'), ((1880, 1927), 'librosa.load', 'librosa.load', (['file_name'], {'res_type': '"""kaiser_fast"""'}), "(file_name, res_type='kaiser_fast')\n", (1892, 1927), False, 'import librosa\n'), ((4121, 4170), 'tensorflow.keras.layers.Dense', 'Dense', (['(40)'], {'input_shape': '(40,)', 'activation': '"""linear"""'}), "(40, input_shape=(40,), activation='linear')\n", (4126, 4170), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((4184, 4196), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (4191, 4196), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((4209, 4240), 'tensorflow.keras.layers.Dense', 'Dense', (['(180)'], {'activation': '"""linear"""'}), "(180, activation='linear')\n", (4214, 4240), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((4254, 4267), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (4261, 4267), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((4282, 4313), 'tensorflow.keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""linear"""'}), "(512, activation='linear')\n", (4287, 4313), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((4327, 4339), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4334, 4339), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((4356, 4387), 'tensorflow.keras.layers.Dense', 'Dense', (['(392)'], {'activation': '"""linear"""'}), "(392, activation='linear')\n", (4361, 4387), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((4401, 4413), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4408, 4413), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((824, 830), 'pathlib.Path', 'Path', ([], {}), '()\n', (828, 830), False, 'from pathlib import Path\n'), ((2054, 2106), 'librosa.feature.mfcc', 'librosa.feature.mfcc', ([], {'y': 'X', 'sr': 'sample_rate', 'n_mfcc': '(40)'}), '(y=X, sr=sample_rate, n_mfcc=40)\n', (2074, 2106), False, 'import librosa\n'), ((2304, 2333), 'tensorflow.keras.backend.clip', 'K.clip', (['(y_true * y_pred)', '(0)', '(1)'], {}), '(y_true * y_pred, 0, 1)\n', (2310, 2333), True, 'from tensorflow.keras import backend as K\n'), ((2375, 2395), 'tensorflow.keras.backend.clip', 'K.clip', (['y_true', '(0)', '(1)'], {}), '(y_true, 0, 1)\n', (2381, 2395), True, 'from tensorflow.keras import backend as K\n'), ((2450, 2461), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (2459, 2461), True, 'from tensorflow.keras import backend as K\n'), ((2550, 2579), 'tensorflow.keras.backend.clip', 'K.clip', (['(y_true * y_pred)', '(0)', '(1)'], {}), '(y_true * y_pred, 0, 1)\n', (2556, 2579), True, 'from tensorflow.keras import backend as K\n'), ((2622, 2642), 'tensorflow.keras.backend.clip', 'K.clip', (['y_pred', '(0)', '(1)'], {}), '(y_pred, 0, 1)\n', (2628, 2642), True, 'from tensorflow.keras import backend as K\n'), ((2701, 2712), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (2710, 2712), True, 'from tensorflow.keras import backend as K\n'), ((2895, 2906), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (2904, 2906), True, 'from tensorflow.keras import backend as K\n'), ((3048, 3071), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3069, 3071), False, 'import datetime\n')] |
import sys
import wandb
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from torch.utils.data import DataLoader
from dataset import GrooveMidiDatasetTap2Drum, process_dataset
from utils import eval_log_freq, update_dict_with_tapped
import pickle
# Adding to path repos located at same level in order to use their functions without packaging
sys.path.insert(1, "../../BaseGrooveTransformers/")
sys.path.insert(1, "../BaseGrooveTransformers/")
from models.train import *
sys.path.insert(1, "../../GrooveEvaluator/")
sys.path.insert(1, "../GrooveEvaluator/")
from GrooveEvaluator.evaluator import Evaluator
sys.path.insert(1, "../../preprocessed_dataset/")
sys.path.insert(1, "../preprocessed_dataset/")
from Subset_Creators.subsetters import GrooveMidiSubsetter
# Uncomment following 2 lines to run locally for testing, without pushing anything online
# import os
# os.environ["WANDB_MODE"]="offline"
sys.path.insert(1, "../../hvo_sequence/")
sys.path.insert(1, "../hvo_sequence/")
from hvo_sequence.drum_mappings import ROLAND_REDUCED_MAPPING
from hvo_sequence.hvo_seq import *
if __name__ == "__main__":
# Default model configuration in case the yaml file cannot be loaded
hyperparameter_defaults = dict(
optimizer_algorithm="sgd",
d_model=128,
n_heads=8,
dropout=0.1,
num_encoder_decoder_layers=1,
learning_rate=1e-3,
batch_size=64,
dim_feedforward=512, # multiple of d_model
epochs=1,
loss_hit_penalty_multiplier=1,
train_eval=1,
test_eval=1,
validation_eval=1,
load_evaluator=1
)
# Load configuration from file and select project to push data to
wandb_run = wandb.init(config="configs/hearty_sweep_4.yaml", project="transformer_groove_tap2drum")
# Load parameters onto dictionary
params = {
# Model parameters
"model": {
# Optimizer algorithm to be used (e.g. sgd or Adam)
"optimizer": wandb.config.optimizer_algorithm,
# Dimension the data is mapped to after first linear layer
"d_model": wandb.config.d_model,
# Number of heads in the Multi-Head Attention mechanism
"n_heads": wandb.config.n_heads,
# Dimension of the feedforward NN
"dim_feedforward": wandb.config.dim_feedforward,
# Dropout (probability of a connection to turn off during training - helps model not to overfit)
"dropout": wandb.config.dropout,
# Number of encoder/decoder layers - as we are using encoder-only, decoder is not really relevant for this
# experiment
"num_encoder_layers": wandb.config.num_encoder_decoder_layers,
"num_decoder_layers": wandb.config.num_encoder_decoder_layers,
# Maximum number of timesteps in sequences
"max_len": 32,
# Embedding size of the source and target sequences (9 voices in drum mapping * 3)
"embedding_size_src": 27,
"embedding_size_tgt": 27,
# Whether we are using encoder-only or encoder-decoder architecture
"encoder_only": True,
# Value [0-1] that will multiply the losses where there are no hits expected, forcing the model to learn
# better the positions where hits are expected
"loss_hit_penalty_multiplier": wandb.config.loss_hit_penalty_multiplier,
# Torch device
"device": "cuda" if torch.cuda.is_available() else "cpu"
},
"training": {
# Learning rate of the model
"learning_rate": wandb.config.learning_rate,
# Number of examples per batch of data
"batch_size": wandb.config.batch_size
},
# Datasets info
"train_dataset": {
# Path to the pickled preprocessed dataset
"pickle_source_path": "../../preprocessed_dataset/datasets_extracted_locally/GrooveMidi/hvo_0.4.5"
"/Processed_On_14_06_2021_at_14_26_hrs",
# Name of the subset (training)
"subset": "GrooveMIDI_processed_train",
"metadata_csv_filename": "metadata.csv",
"hvo_pickle_filename": "hvo_sequence_data.obj",
# Filter to select sequences, more can be added (see metadata file)
"filters": {
"beat_type": ["beat"],
"time_signature": ["4-4"]
},
# Maximum length (timesteps) of sequence
"max_len": 32
},
"test_dataset": {
"pickle_source_path": "../../preprocessed_dataset/datasets_extracted_locally/GrooveMidi/hvo_0.4.5"
"/Processed_On_14_06_2021_at_14_26_hrs",
"subset": "GrooveMIDI_processed_test",
"metadata_csv_filename": "metadata.csv",
"hvo_pickle_filename": "hvo_sequence_data.obj",
"filters": {
"beat_type": ["beat"],
"time_signature": ["4-4"]
},
"max_len": 32
},
"validation_dataset": {
"pickle_source_path": "../../preprocessed_dataset/datasets_extracted_locally/GrooveMidi/hvo_0.4.5"
"/Processed_On_14_06_2021_at_14_26_hrs",
"subset": "GrooveMIDI_processed_validation",
"metadata_csv_filename": "metadata.csv",
"hvo_pickle_filename": "hvo_sequence_data.obj",
"filters": {
"beat_type": ["beat"],
"time_signature": ["4-4"]
},
"max_len": 32
},
# Parameters to create tapped sequences when loading the dataset
"tappify_params": {
# Name of the voice mapping where we want to put all the hits, vels and offsets
"tapped_sequence_voice": "HH_CLOSED",
# Whether or not we want the input to collapse from 9 voices (only hits on one of them) to one voice -
# if this is changed, the embedding size src should be changed to 3 among other things
"tapped_sequence_collapsed": False,
# Param to choose which velocity to keep in case of multiple notes at a single time step. Check
# flatten_voices method in hvo_sequence repository
"tapped_sequence_velocity_mode": 1,
# Param to choose which offset to keep in case of multiple notes at a single time step. Check
# flatten_voices method in hvo_sequence repository
"tapped_sequence_offset_mode": 3
},
# Load evaluator from local file, to use the same examples across runs and compare results
"load_evaluator": wandb.config.load_evaluator,
# Turn on or off the evaluators (by default on)
"train_eval": wandb.config.train_eval,
"test_eval": wandb.config.test_eval,
"validation_eval": wandb.config.validation_eval,
# Use a pretrained model or not
# Option 1. Train model from scratch
"load_model": None # if we don't want to load any model, set to None
# Option 2. Load locally saved model
#"load_model": {
# "location": "local",
# "dir": "./wandb/run-20210609_162149-1tsi1g1n/files/saved_models/",
# "file_pattern": "transformer_run_{}_Epoch_{}.Model"
#}
# Option 3. Load model saved in wandb
#"load_model": {
# "location": "wandb",
# "dir": "marinaniet0/tap2drum/1tsi1g1n/",
# "file_pattern": "saved_models/transformer_run_{}_Epoch_{}.Model",
# "epoch": 51,
# "run": "1tsi1g1n"
#}
}
# PYTORCH LOSS FUNCTIONS
# BCE used for hit loss
BCE_fn = torch.nn.BCEWithLogitsLoss(reduction='none')
# MSE used for velocities and offsets losses
MSE_fn = torch.nn.MSELoss(reduction='none')
# Initialize model with parameters, get back model, optimizer and current epoch
model, optimizer, ep = initialize_model(params)
# Keep track of model
wandb.watch(model)
# DATASET LOADING FOR TRAINING
# Get training subset from pickled dataset
_, subset_list = GrooveMidiSubsetter(pickle_source_path=params["train_dataset"]["pickle_source_path"],
subset=params["train_dataset"]["subset"],
hvo_pickle_filename=params["train_dataset"]["hvo_pickle_filename"],
list_of_filter_dicts_for_subsets=[params["train_dataset"]["filters"]]).create_subsets()
# Get sequences and tapped sequences as tensors, load them onto the torch device and save in gmd variable
gmd = GrooveMidiDatasetTap2Drum(subset=subset_list[0], subset_info=params["train_dataset"],
tappify_params=params["tappify_params"], max_len=params["train_dataset"]["max_len"])
# Load dataset with torch DataLoader
dataloader = DataLoader(gmd, batch_size=params["training"]["batch_size"], shuffle=True)
# Get number of epochs from wandb config
eps = wandb.config.epochs
# INITIALIZE EVALUATORS
# If any evaluator is on, generate genre filters
if params["train_eval"] or params["test_eval"] or params["validation_eval"]:
styles = ["hiphop", "funk", "reggae", "soul", "latin", "jazz", "pop", "afrobeat", "highlife", "punk", "rock"]
list_of_filter_dicts_for_subsets = []
for style in styles:
list_of_filter_dicts_for_subsets.append(
{"style_primary": [style], "beat_type": ["beat"], "time_signature": ["4-4"]}
)
# If train evaluator is on
if params["train_eval"]:
# Loading from local file (generated with gen_eval.py)
if params["load_evaluator"]:
train_evaluator = pickle.load(open('../evaluators/train.evaluator', 'rb'))
# ... or setting it up from scratch
else:
# TRAIN EVALUATOR
train_evaluator = Evaluator(
pickle_source_path=params["train_dataset"]["pickle_source_path"],
set_subfolder=params["train_dataset"]["subset"],
hvo_pickle_filename=params["train_dataset"]["hvo_pickle_filename"],
list_of_filter_dicts_for_subsets=list_of_filter_dicts_for_subsets,
max_hvo_shape=(32, 27),
n_samples_to_use=11,
n_samples_to_synthesize_visualize_per_subset=10,
disable_tqdm=False,
analyze_heatmap=True,
analyze_global_features=True,
_identifier="Train_Set"
)
# Get ground truths of subset
train_evaluator_subset = train_evaluator.get_ground_truth_hvo_sequences()
metadata_train = pd.read_csv(os.path.join(params["train_dataset"]["pickle_source_path"],
params["train_dataset"]["subset"],
params["train_dataset"]["metadata_csv_filename"]))
print("Generating inputs for train evaluator...")
# Calculate tapped sequences for those ground truths
train_eval_inputs, _, _ = process_dataset(train_evaluator_subset, metadata=metadata_train,
max_len=params["train_dataset"]["max_len"],
tappify_params=params["tappify_params"])
print("Inputs for train evaluator generated.")
if params["test_eval"]:
if params["load_evaluator"]:
test_evaluator = pickle.load(open('../evaluators/test.evaluator', 'rb'))
else:
# TEST EVALUATOR
test_evaluator = Evaluator(
pickle_source_path=params["test_dataset"]["pickle_source_path"],
set_subfolder=params["test_dataset"]["subset"],
hvo_pickle_filename=params["test_dataset"]["hvo_pickle_filename"],
list_of_filter_dicts_for_subsets=list_of_filter_dicts_for_subsets,
max_hvo_shape=(32, 27),
n_samples_to_use=11,
n_samples_to_synthesize_visualize_per_subset=10,
disable_tqdm=False,
analyze_heatmap=True,
analyze_global_features=True,
_identifier="Test_Set"
)
test_evaluator_subset = test_evaluator.get_ground_truth_hvo_sequences()
metadata_test = pd.read_csv(os.path.join(params["test_dataset"]["pickle_source_path"],
params["test_dataset"]["subset"],
params["test_dataset"]["metadata_csv_filename"]))
print("\nGenerating inputs for test evaluator...")
test_eval_inputs, test_eval_gt, _ = process_dataset(test_evaluator_subset, metadata=metadata_test,
max_len=params["test_dataset"]["max_len"],
tappify_params=params["tappify_params"])
if params["validation_eval"]:
if params["load_evaluator"]:
validation_evaluator = pickle.load(open('../evaluators/validation.evaluator', 'rb'))
else:
# VALIDATION EVALUATOR
validation_evaluator = Evaluator(
pickle_source_path=params["validation_dataset"]["pickle_source_path"],
set_subfolder=params["validation_dataset"]["subset"],
hvo_pickle_filename=params["validation_dataset"]["hvo_pickle_filename"],
list_of_filter_dicts_for_subsets=list_of_filter_dicts_for_subsets,
max_hvo_shape=(32, 27),
n_samples_to_use=11,
n_samples_to_synthesize_visualize_per_subset=10,
disable_tqdm=False,
analyze_heatmap=True,
analyze_global_features=True,
_identifier="Validation_Set"
)
validation_evaluator_subset = validation_evaluator.get_ground_truth_hvo_sequences()
metadata_test = pd.read_csv(os.path.join(params["validation_dataset"]["pickle_source_path"],
params["validation_dataset"]["subset"],
params["validation_dataset"]["metadata_csv_filename"]))
print("\nGenerating inputs for validation evaluator...")
validation_eval_inputs, validation_eval_gt, _ = process_dataset(validation_evaluator_subset,
metadata=metadata_test,
max_len=params["validation_dataset"]["max_len"],
tappify_params=params["tappify_params"])
print("Inputs for validation evaluator generated.")
# GENERATE FREQUENCY LOG ARRAYS - how often evaluator information should be logged onto wandb
epoch_save_partial, epoch_save_all = eval_log_freq(total_epochs=eps, initial_epochs_lim=10, initial_step_partial=1,
initial_step_all=1, secondary_step_partial=5,
secondary_step_all=5)
# ONLY EVAL ON LAST EPOCH - uncomment if you only need to log evaluator info at the very end
# epoch_save_partial, epoch_save_all = [eps-1], [eps-1]
print("\nPartial evaluation saved on epoch(s) ", str(epoch_save_partial))
print("Full evaluation saved on epoch(s) ", str(epoch_save_all))
print("\nTraining model...")
try:
# TRAINING LOOP
for i in np.arange(eps):
ep += 1
# Setting recalculate to True only on first epoch to avoid logging ground truths on every logging epoch
recalculate_gt = True if ep == 1 else False
# Whether the model should be saved or not in this epoch
save_model = (i in epoch_save_partial or i in epoch_save_all)
print(f"\nEpoch {ep}\n-------------------------------")
# Calling training loop in BaseGrooveTransformers
train_loop(dataloader=dataloader, groove_transformer=model, opt=optimizer, epoch=ep, loss_fn=calculate_loss,
bce_fn=BCE_fn, mse_fn=MSE_fn, save=save_model, device=params["model"]["device"],
encoder_only=params["model"]["encoder_only"],
hit_loss_penalty=params["model"]["loss_hit_penalty_multiplier"],
test_inputs=test_eval_inputs, test_gt=test_eval_gt,
validation_inputs=validation_eval_inputs, validation_gt=validation_eval_gt)
print("-------------------------------\n")
# If we need to do any logging at all..
if i in epoch_save_partial or i in epoch_save_all:
# EVAL TRAIN
# --------------------------------------------------------------------------------------------------
if params["train_eval"]:
train_evaluator._identifier = 'Train_Set'
# Get predictions for train selected subset
train_eval_pred = torch.cat(model.predict(train_eval_inputs, use_thres=True, thres=0.5), dim=2)
train_eval_pred_hvo_array = train_eval_pred.cpu().detach().numpy()
# Add predictions to evaluator
train_evaluator.add_predictions(train_eval_pred_hvo_array)
# Evaluate accuracies and MSEs
train_acc_h = train_evaluator.get_hits_accuracies(drum_mapping=ROLAND_REDUCED_MAPPING)
train_mse_v = train_evaluator.get_velocity_errors(drum_mapping=ROLAND_REDUCED_MAPPING)
train_mse_o = train_evaluator.get_micro_timing_errors(drum_mapping=ROLAND_REDUCED_MAPPING)
# train_rhythmic_distances = train_evaluator.get_rhythmic_distances()
# Log
wandb.log(train_acc_h, commit=False)
wandb.log(train_mse_v, commit=False)
wandb.log(train_mse_o)
# wandb.log(train_rhythmic_distances, commit=False)
# Generate velocity heatmaps and global probability distributions
if i in epoch_save_all:
train_evaluator._identifier = 'Train_Set_Epoch_{}'.format(ep)
# Heatmaps train
train_heatmaps_global_features = train_evaluator.get_wandb_logging_media(
sf_paths=["../../hvo_sequence/hvo_sequence/soundfonts/Standard_Drum_Kit.sf2"],
recalculate_ground_truth=recalculate_gt)
if recalculate_gt:
# Adding tapped audios and piano rolls! :)
train_heatmaps_global_features =\
update_dict_with_tapped(sf_path="../../hvo_sequence/hvo_sequence/soundfonts/"
"Standard_Drum_Kit.sf2",
evaluator=train_evaluator,
evaluator_id="Train",
wandb_dict=train_heatmaps_global_features)
if len(train_heatmaps_global_features.keys()) > 0:
wandb.log(train_heatmaps_global_features, commit=False)
train_evaluator.dump(path="misc/train_set_evaluator_run_{}_Epoch_{}.Eval".format(wandb_run.name, ep))
#---------------------------------------------------------------------------------------------------
wandb.log({"epoch": ep})
# EVAL TEST
#---------------------------------------------------------------------------------------------------
if params["test_eval"]:
test_evaluator._identifier = 'Test_Set'
test_eval_pred = torch.cat(model.predict(test_eval_inputs, use_thres=True, thres=0.5), dim=2)
test_eval_pred_hvo_array = test_eval_pred.cpu().detach().numpy()
test_evaluator.add_predictions(test_eval_pred_hvo_array)
# Evaluate
test_acc_h = test_evaluator.get_hits_accuracies(drum_mapping=ROLAND_REDUCED_MAPPING)
test_mse_v = test_evaluator.get_velocity_errors(drum_mapping=ROLAND_REDUCED_MAPPING)
test_mse_o = test_evaluator.get_micro_timing_errors(drum_mapping=ROLAND_REDUCED_MAPPING)
# rhythmic_distances = test_evaluator.get_rhythmic_distances()
# Log
wandb.log(test_acc_h, commit=False)
wandb.log(test_mse_v, commit=False)
wandb.log(test_mse_o)
# wandb.log(rhythmic_distances, commit=False)
if i in epoch_save_all:
test_evaluator._identifier = 'Test_Set_Epoch_{}'.format(ep)
# Heatmaps test
test_heatmaps_global_features = test_evaluator.get_wandb_logging_media(
sf_paths=["../../hvo_sequence/hvo_sequence/soundfonts/Standard_Drum_Kit.sf2"],
recalculate_ground_truth=recalculate_gt)
if recalculate_gt:
# Adding tapped audios and piano rolls
test_heatmaps_global_features =\
update_dict_with_tapped(sf_path="../../hvo_sequence/hvo_sequence/soundfonts/"
"Standard_Drum_Kit.sf2",
evaluator=test_evaluator,
evaluator_id="Test",
wandb_dict=test_heatmaps_global_features)
if len(test_heatmaps_global_features.keys()) > 0:
wandb.log(test_heatmaps_global_features, commit=False)
test_evaluator.dump(path="misc/test_set_evaluator_run_{}_Epoch_{}.Eval".format(wandb_run.name, ep))
#---------------------------------------------------------------------------------------------------
wandb.log({"epoch": ep})
# EVAL VALIDATION
#---------------------------------------------------------------------------------------------------
if params["validation_eval"]:
validation_evaluator._identifier = 'Validation_Set'
validation_eval_pred = torch.cat(model.predict(validation_eval_inputs, use_thres=True, thres=0.5),
dim=2)
validation_eval_pred_hvo_array = validation_eval_pred.cpu().detach().numpy()
validation_evaluator.add_predictions(validation_eval_pred_hvo_array)
# Evaluate
validation_acc_h = validation_evaluator.get_hits_accuracies(drum_mapping=ROLAND_REDUCED_MAPPING)
validation_mse_v = validation_evaluator.get_velocity_errors(drum_mapping=ROLAND_REDUCED_MAPPING)
validation_mse_o = validation_evaluator.get_micro_timing_errors(drum_mapping=ROLAND_REDUCED_MAPPING)
# rhythmic_distances = validation_evaluator.get_rhythmic_distances()
# Log
wandb.log(validation_acc_h, commit=False)
wandb.log(validation_mse_v, commit=False)
wandb.log(validation_mse_o)
# wandb.log(rhythmic_distances, commit=False)
if i in epoch_save_all:
validation_evaluator._identifier = 'Validation_Set_Epoch_{}'.format(ep)
# Heatmaps validation
validation_heatmaps_global_features = validation_evaluator.get_wandb_logging_media(
sf_paths=["../../hvo_sequence/hvo_sequence/soundfonts/Standard_Drum_Kit.sf2"],
recalculate_ground_truth=recalculate_gt)
if recalculate_gt:
# Adding tapped audios and piano rolls
validation_heatmaps_global_features =\
update_dict_with_tapped(sf_path="../../hvo_sequence/hvo_sequence/soundfonts/"
"Standard_Drum_Kit.sf2",
evaluator=validation_evaluator,
evaluator_id="Validation",
wandb_dict=validation_heatmaps_global_features)
if len(validation_heatmaps_global_features.keys()) > 0:
wandb.log(validation_heatmaps_global_features, commit=False)
validation_evaluator.dump(path="misc/validation_set_evaluator_run_{}_Epoch_{}.Eval".format(
wandb_run.name, ep))
#---------------------------------------------------------------------------------------------------
wandb.log({"epoch": ep})
finally:
print("\nDone!")
wandb.finish()
| [
"dataset.GrooveMidiDatasetTap2Drum",
"wandb.log",
"torch.nn.MSELoss",
"torch.nn.BCEWithLogitsLoss",
"torch.utils.data.DataLoader",
"wandb.finish",
"wandb.watch",
"Subset_Creators.subsetters.GrooveMidiSubsetter",
"dataset.process_dataset",
"utils.update_dict_with_tapped",
"sys.path.insert",
"Gr... | [((369, 420), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../BaseGrooveTransformers/"""'], {}), "(1, '../../BaseGrooveTransformers/')\n", (384, 420), False, 'import sys\n'), ((421, 469), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../BaseGrooveTransformers/"""'], {}), "(1, '../BaseGrooveTransformers/')\n", (436, 469), False, 'import sys\n'), ((497, 541), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../GrooveEvaluator/"""'], {}), "(1, '../../GrooveEvaluator/')\n", (512, 541), False, 'import sys\n'), ((542, 583), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../GrooveEvaluator/"""'], {}), "(1, '../GrooveEvaluator/')\n", (557, 583), False, 'import sys\n'), ((632, 681), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../preprocessed_dataset/"""'], {}), "(1, '../../preprocessed_dataset/')\n", (647, 681), False, 'import sys\n'), ((682, 728), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../preprocessed_dataset/"""'], {}), "(1, '../preprocessed_dataset/')\n", (697, 728), False, 'import sys\n'), ((929, 970), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../hvo_sequence/"""'], {}), "(1, '../../hvo_sequence/')\n", (944, 970), False, 'import sys\n'), ((971, 1009), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../hvo_sequence/"""'], {}), "(1, '../hvo_sequence/')\n", (986, 1009), False, 'import sys\n'), ((1727, 1819), 'wandb.init', 'wandb.init', ([], {'config': '"""configs/hearty_sweep_4.yaml"""', 'project': '"""transformer_groove_tap2drum"""'}), "(config='configs/hearty_sweep_4.yaml', project=\n 'transformer_groove_tap2drum')\n", (1737, 1819), False, 'import wandb\n'), ((7780, 7824), 'torch.nn.BCEWithLogitsLoss', 'torch.nn.BCEWithLogitsLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (7806, 7824), False, 'import torch\n'), ((7887, 7921), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (7903, 7921), False, 'import torch\n'), ((8089, 8107), 'wandb.watch', 'wandb.watch', (['model'], {}), '(model)\n', (8100, 8107), False, 'import wandb\n'), ((8740, 8920), 'dataset.GrooveMidiDatasetTap2Drum', 'GrooveMidiDatasetTap2Drum', ([], {'subset': 'subset_list[0]', 'subset_info': "params['train_dataset']", 'tappify_params': "params['tappify_params']", 'max_len': "params['train_dataset']['max_len']"}), "(subset=subset_list[0], subset_info=params[\n 'train_dataset'], tappify_params=params['tappify_params'], max_len=\n params['train_dataset']['max_len'])\n", (8765, 8920), False, 'from dataset import GrooveMidiDatasetTap2Drum, process_dataset\n'), ((9006, 9080), 'torch.utils.data.DataLoader', 'DataLoader', (['gmd'], {'batch_size': "params['training']['batch_size']", 'shuffle': '(True)'}), "(gmd, batch_size=params['training']['batch_size'], shuffle=True)\n", (9016, 9080), False, 'from torch.utils.data import DataLoader\n'), ((15540, 15691), 'utils.eval_log_freq', 'eval_log_freq', ([], {'total_epochs': 'eps', 'initial_epochs_lim': '(10)', 'initial_step_partial': '(1)', 'initial_step_all': '(1)', 'secondary_step_partial': '(5)', 'secondary_step_all': '(5)'}), '(total_epochs=eps, initial_epochs_lim=10, initial_step_partial\n =1, initial_step_all=1, secondary_step_partial=5, secondary_step_all=5)\n', (15553, 15691), False, 'from utils import eval_log_freq, update_dict_with_tapped\n'), ((16186, 16200), 'numpy.arange', 'np.arange', (['eps'], {}), '(eps)\n', (16195, 16200), True, 'import numpy as np\n'), ((26212, 26226), 'wandb.finish', 'wandb.finish', ([], {}), '()\n', (26224, 26226), False, 'import wandb\n'), ((8212, 8491), 'Subset_Creators.subsetters.GrooveMidiSubsetter', 'GrooveMidiSubsetter', ([], {'pickle_source_path': "params['train_dataset']['pickle_source_path']", 'subset': "params['train_dataset']['subset']", 'hvo_pickle_filename': "params['train_dataset']['hvo_pickle_filename']", 'list_of_filter_dicts_for_subsets': "[params['train_dataset']['filters']]"}), "(pickle_source_path=params['train_dataset'][\n 'pickle_source_path'], subset=params['train_dataset']['subset'],\n hvo_pickle_filename=params['train_dataset']['hvo_pickle_filename'],\n list_of_filter_dicts_for_subsets=[params['train_dataset']['filters']])\n", (8231, 8491), False, 'from Subset_Creators.subsetters import GrooveMidiSubsetter\n'), ((11355, 11514), 'dataset.process_dataset', 'process_dataset', (['train_evaluator_subset'], {'metadata': 'metadata_train', 'max_len': "params['train_dataset']['max_len']", 'tappify_params': "params['tappify_params']"}), "(train_evaluator_subset, metadata=metadata_train, max_len=\n params['train_dataset']['max_len'], tappify_params=params['tappify_params']\n )\n", (11370, 11514), False, 'from dataset import GrooveMidiDatasetTap2Drum, process_dataset\n'), ((13094, 13245), 'dataset.process_dataset', 'process_dataset', (['test_evaluator_subset'], {'metadata': 'metadata_test', 'max_len': "params['test_dataset']['max_len']", 'tappify_params': "params['tappify_params']"}), "(test_evaluator_subset, metadata=metadata_test, max_len=\n params['test_dataset']['max_len'], tappify_params=params['tappify_params'])\n", (13109, 13245), False, 'from dataset import GrooveMidiDatasetTap2Drum, process_dataset\n'), ((3509, 3534), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3532, 3534), False, 'import torch\n'), ((10077, 10557), 'GrooveEvaluator.evaluator.Evaluator', 'Evaluator', ([], {'pickle_source_path': "params['train_dataset']['pickle_source_path']", 'set_subfolder': "params['train_dataset']['subset']", 'hvo_pickle_filename': "params['train_dataset']['hvo_pickle_filename']", 'list_of_filter_dicts_for_subsets': 'list_of_filter_dicts_for_subsets', 'max_hvo_shape': '(32, 27)', 'n_samples_to_use': '(11)', 'n_samples_to_synthesize_visualize_per_subset': '(10)', 'disable_tqdm': '(False)', 'analyze_heatmap': '(True)', 'analyze_global_features': '(True)', '_identifier': '"""Train_Set"""'}), "(pickle_source_path=params['train_dataset']['pickle_source_path'],\n set_subfolder=params['train_dataset']['subset'], hvo_pickle_filename=\n params['train_dataset']['hvo_pickle_filename'],\n list_of_filter_dicts_for_subsets=list_of_filter_dicts_for_subsets,\n max_hvo_shape=(32, 27), n_samples_to_use=11,\n n_samples_to_synthesize_visualize_per_subset=10, disable_tqdm=False,\n analyze_heatmap=True, analyze_global_features=True, _identifier='Train_Set'\n )\n", (10086, 10557), False, 'from GrooveEvaluator.evaluator import Evaluator\n'), ((11925, 12396), 'GrooveEvaluator.evaluator.Evaluator', 'Evaluator', ([], {'pickle_source_path': "params['test_dataset']['pickle_source_path']", 'set_subfolder': "params['test_dataset']['subset']", 'hvo_pickle_filename': "params['test_dataset']['hvo_pickle_filename']", 'list_of_filter_dicts_for_subsets': 'list_of_filter_dicts_for_subsets', 'max_hvo_shape': '(32, 27)', 'n_samples_to_use': '(11)', 'n_samples_to_synthesize_visualize_per_subset': '(10)', 'disable_tqdm': '(False)', 'analyze_heatmap': '(True)', 'analyze_global_features': '(True)', '_identifier': '"""Test_Set"""'}), "(pickle_source_path=params['test_dataset']['pickle_source_path'],\n set_subfolder=params['test_dataset']['subset'], hvo_pickle_filename=\n params['test_dataset']['hvo_pickle_filename'],\n list_of_filter_dicts_for_subsets=list_of_filter_dicts_for_subsets,\n max_hvo_shape=(32, 27), n_samples_to_use=11,\n n_samples_to_synthesize_visualize_per_subset=10, disable_tqdm=False,\n analyze_heatmap=True, analyze_global_features=True, _identifier='Test_Set')\n", (11934, 12396), False, 'from GrooveEvaluator.evaluator import Evaluator\n'), ((14983, 15150), 'dataset.process_dataset', 'process_dataset', (['validation_evaluator_subset'], {'metadata': 'metadata_test', 'max_len': "params['validation_dataset']['max_len']", 'tappify_params': "params['tappify_params']"}), "(validation_evaluator_subset, metadata=metadata_test,\n max_len=params['validation_dataset']['max_len'], tappify_params=params[\n 'tappify_params'])\n", (14998, 15150), False, 'from dataset import GrooveMidiDatasetTap2Drum, process_dataset\n'), ((13670, 14172), 'GrooveEvaluator.evaluator.Evaluator', 'Evaluator', ([], {'pickle_source_path': "params['validation_dataset']['pickle_source_path']", 'set_subfolder': "params['validation_dataset']['subset']", 'hvo_pickle_filename': "params['validation_dataset']['hvo_pickle_filename']", 'list_of_filter_dicts_for_subsets': 'list_of_filter_dicts_for_subsets', 'max_hvo_shape': '(32, 27)', 'n_samples_to_use': '(11)', 'n_samples_to_synthesize_visualize_per_subset': '(10)', 'disable_tqdm': '(False)', 'analyze_heatmap': '(True)', 'analyze_global_features': '(True)', '_identifier': '"""Validation_Set"""'}), "(pickle_source_path=params['validation_dataset'][\n 'pickle_source_path'], set_subfolder=params['validation_dataset'][\n 'subset'], hvo_pickle_filename=params['validation_dataset'][\n 'hvo_pickle_filename'], list_of_filter_dicts_for_subsets=\n list_of_filter_dicts_for_subsets, max_hvo_shape=(32, 27),\n n_samples_to_use=11, n_samples_to_synthesize_visualize_per_subset=10,\n disable_tqdm=False, analyze_heatmap=True, analyze_global_features=True,\n _identifier='Validation_Set')\n", (13679, 14172), False, 'from GrooveEvaluator.evaluator import Evaluator\n'), ((18557, 18593), 'wandb.log', 'wandb.log', (['train_acc_h'], {'commit': '(False)'}), '(train_acc_h, commit=False)\n', (18566, 18593), False, 'import wandb\n'), ((18614, 18650), 'wandb.log', 'wandb.log', (['train_mse_v'], {'commit': '(False)'}), '(train_mse_v, commit=False)\n', (18623, 18650), False, 'import wandb\n'), ((18671, 18693), 'wandb.log', 'wandb.log', (['train_mse_o'], {}), '(train_mse_o)\n', (18680, 18693), False, 'import wandb\n'), ((20382, 20406), 'wandb.log', 'wandb.log', (["{'epoch': ep}"], {}), "({'epoch': ep})\n", (20391, 20406), False, 'import wandb\n'), ((21411, 21446), 'wandb.log', 'wandb.log', (['test_acc_h'], {'commit': '(False)'}), '(test_acc_h, commit=False)\n', (21420, 21446), False, 'import wandb\n'), ((21467, 21502), 'wandb.log', 'wandb.log', (['test_mse_v'], {'commit': '(False)'}), '(test_mse_v, commit=False)\n', (21476, 21502), False, 'import wandb\n'), ((21523, 21544), 'wandb.log', 'wandb.log', (['test_mse_o'], {}), '(test_mse_o)\n', (21532, 21544), False, 'import wandb\n'), ((23124, 23148), 'wandb.log', 'wandb.log', (["{'epoch': ep}"], {}), "({'epoch': ep})\n", (23133, 23148), False, 'import wandb\n'), ((24308, 24349), 'wandb.log', 'wandb.log', (['validation_acc_h'], {'commit': '(False)'}), '(validation_acc_h, commit=False)\n', (24317, 24349), False, 'import wandb\n'), ((24370, 24411), 'wandb.log', 'wandb.log', (['validation_mse_v'], {'commit': '(False)'}), '(validation_mse_v, commit=False)\n', (24379, 24411), False, 'import wandb\n'), ((24432, 24459), 'wandb.log', 'wandb.log', (['validation_mse_o'], {}), '(validation_mse_o)\n', (24441, 24459), False, 'import wandb\n'), ((26141, 26165), 'wandb.log', 'wandb.log', (["{'epoch': ep}"], {}), "({'epoch': ep})\n", (26150, 26165), False, 'import wandb\n'), ((19507, 19712), 'utils.update_dict_with_tapped', 'update_dict_with_tapped', ([], {'sf_path': '"""../../hvo_sequence/hvo_sequence/soundfonts/Standard_Drum_Kit.sf2"""', 'evaluator': 'train_evaluator', 'evaluator_id': '"""Train"""', 'wandb_dict': 'train_heatmaps_global_features'}), "(sf_path=\n '../../hvo_sequence/hvo_sequence/soundfonts/Standard_Drum_Kit.sf2',\n evaluator=train_evaluator, evaluator_id='Train', wandb_dict=\n train_heatmaps_global_features)\n", (19530, 19712), False, 'from utils import eval_log_freq, update_dict_with_tapped\n'), ((20062, 20117), 'wandb.log', 'wandb.log', (['train_heatmaps_global_features'], {'commit': '(False)'}), '(train_heatmaps_global_features, commit=False)\n', (20071, 20117), False, 'import wandb\n'), ((22256, 22458), 'utils.update_dict_with_tapped', 'update_dict_with_tapped', ([], {'sf_path': '"""../../hvo_sequence/hvo_sequence/soundfonts/Standard_Drum_Kit.sf2"""', 'evaluator': 'test_evaluator', 'evaluator_id': '"""Test"""', 'wandb_dict': 'test_heatmaps_global_features'}), "(sf_path=\n '../../hvo_sequence/hvo_sequence/soundfonts/Standard_Drum_Kit.sf2',\n evaluator=test_evaluator, evaluator_id='Test', wandb_dict=\n test_heatmaps_global_features)\n", (22279, 22458), False, 'from utils import eval_log_freq, update_dict_with_tapped\n'), ((22807, 22861), 'wandb.log', 'wandb.log', (['test_heatmaps_global_features'], {'commit': '(False)'}), '(test_heatmaps_global_features, commit=False)\n', (22816, 22861), False, 'import wandb\n'), ((25207, 25427), 'utils.update_dict_with_tapped', 'update_dict_with_tapped', ([], {'sf_path': '"""../../hvo_sequence/hvo_sequence/soundfonts/Standard_Drum_Kit.sf2"""', 'evaluator': 'validation_evaluator', 'evaluator_id': '"""Validation"""', 'wandb_dict': 'validation_heatmaps_global_features'}), "(sf_path=\n '../../hvo_sequence/hvo_sequence/soundfonts/Standard_Drum_Kit.sf2',\n evaluator=validation_evaluator, evaluator_id='Validation', wandb_dict=\n validation_heatmaps_global_features)\n", (25230, 25427), False, 'from utils import eval_log_freq, update_dict_with_tapped\n'), ((25781, 25841), 'wandb.log', 'wandb.log', (['validation_heatmaps_global_features'], {'commit': '(False)'}), '(validation_heatmaps_global_features, commit=False)\n', (25790, 25841), False, 'import wandb\n')] |
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample Generate GPT2"""
import os
import sys
import numpy as np
import torch
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir)))
from megatron.text_generation_utils import pad_batch, get_batch
from megatron import get_args
from megatron import print_rank_0
from megatron import get_tokenizer
from megatron.checkpointing import load_checkpoint
from megatron.initialize import initialize_megatron
from megatron.model import GPT2Model
from megatron.training import get_model
from megatron.text_generation_utils import generate_and_write_samples_unconditional
from megatron.text_generation_utils import generate_samples_input_from_file
from megatron.text_generation_utils import generate_samples_interactive
# from megatron.model.transformer import LayerNorm
def model_provider():
"""Build the model."""
print_rank_0('building GPT2 model ...')
model = GPT2Model(num_tokentypes=0, parallel_output=False)
return model
def add_text_generate_args(parser):
"""Text generation arguments."""
group = parser.add_argument_group(title='text generation')
group.add_argument("--temperature", type=float, default=1.0,
help='Sampling temperature.')
group.add_argument("--greedy", action='store_true', default=False,
help='Use greedy sampling.')
group.add_argument("--top_p", type=float, default=0.0,
help='Top p sampling.')
group.add_argument("--top_k", type=int, default=5,
help='Top k sampling.')
group.add_argument("--out-seq-length", type=int, default=1024,
help='Size of the output generated text.')
group.add_argument("--sample-input-file", type=str, default=None,
help='Get input from file instead of interactive mode, '
'each line is an input.')
group.add_argument("--sample-output-file", type=str, default=None,
help='Output file got from --sample-input-file')
group.add_argument("--num-samples", type=int, default=0,
help='Number of samples to generate unconditionally, '
'defaults to 0 and interactive conditional sampling')
group.add_argument("--genfile", type=str,
help='Output file when generating unconditionally')
group.add_argument("--recompute", action='store_true',
help='During generation recompute all attention '
'instead of using previously computed keys/values.')
return parser
def generate(model, context_tokens, args, tokenizer, max_num=50):
valid_length = len(context_tokens)
context_tokens_, context_lengths = pad_batch([context_tokens],
tokenizer.pad_id, args)
context_tokens_tensor = torch.cuda.LongTensor(context_tokens_)
tokens, attention_mask, position_ids = get_batch(context_tokens_tensor)
type_ids = None
bs,_ = tokens.shape
cnt = 0
while valid_length < args.seq_length:
with torch.no_grad():
logits = model(tokens,
position_ids,
attention_mask,
tokentype_ids=type_ids,
forward_method_parallel_output=False)
logits = logits[:,:,:tokenizer.vocab_size].cpu()
logits = logits.numpy()
logits = logits.reshape(bs, args.seq_length, -1)
probs = logits[0, valid_length-1, :]
p_args = probs.argsort()[::-1][:args.top_k]
p = probs[p_args]
p = p / sum(p)
for i in range(1000):
target_index = np.random.choice(len(p), p=p)
if p_args[target_index] != tokenizer.unk:
break
if p_args[target_index] == tokenizer.eod or \
valid_length == args.seq_length-1 or cnt>=max_num:
outputs = tokens.cpu().numpy()
break
tokens[0][valid_length] = p_args[target_index]
valid_length += 1
cnt += 1
length = np.sum(outputs != tokenizer.pad_id)
outputs = outputs[0][:length]
return outputs
def main():
"""Main program."""
initialize_megatron(extra_args_provider=add_text_generate_args,
args_defaults={'tokenizer_type': 'GPT2BPETokenizer'})
# Set up model and load checkpoint.
model = get_model(model_provider)
model.eval()
args = get_args()
if args.load is not None:
_ = load_checkpoint(model, None, None)
samples = ['上联:瑞风播福泽,事业具昌盛千家乐',
'四川的省会是?',
'上联:春雨润人间,社会和谐万象新',
'''书生:羌笛何须怨杨柳,春风不度玉门关。
飞云:(这诗怎么这么耳熟?且过去跟他聊聊如何。)
书生:小兄弟,要不要一起喝一杯?
飞云:你请我呀?你若是请我,我便和你喝一杯;你若不请我,我便一个人去喝。
书生:小兄弟,看你年纪轻轻,不至于这么势利吧?
飞云:''',
'张无忌拿出屠龙宝刀,手起刀落,周芷若掉了一颗门牙,身旁的赵敏喜极而泣,',
'人工智能成为国际竞争的新焦点。人工智能是引领未来的战略性技术,世界主要发达国家把发展人工智能作为提升国家竞争力、维护国家安全的重大战略,加紧出台规划和政策,围绕核心技术、顶尖人才、标准规范等强化部署,力图在新一轮国际科技竞争中掌握主导权。当前,',
'中国和美国和日本和法国和加拿大和澳大利亚的首都分别是哪里?']
for sample in samples:
raw_text = sample
tokenizer = get_tokenizer()
context_tokens = tokenizer.tokenize(raw_text)
output_ids = generate(model, context_tokens, args, tokenizer)
output_samples = tokenizer.convert_ids_to_tokens(output_ids.tolist())
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
print('Input is:', sample)
print('Output is:', output_samples[len(sample):], flush=True)
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
while 1:
sample = input("Tell Pangu-alpha what you want to generate:")
raw_text = sample
tokenizer = get_tokenizer()
context_tokens = tokenizer.tokenize(raw_text)
output_ids = generate(model, context_tokens, args, tokenizer)
output_samples = tokenizer.convert_ids_to_tokens(output_ids.tolist())
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
print('Input is:', sample)
print('Output is:', output_samples[len(sample):], flush=True)
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
return
if __name__ == "__main__":
main()
| [
"numpy.sum",
"megatron.initialize.initialize_megatron",
"megatron.text_generation_utils.pad_batch",
"megatron.get_args",
"os.path.dirname",
"megatron.get_tokenizer",
"megatron.checkpointing.load_checkpoint",
"megatron.model.GPT2Model",
"torch.cuda.LongTensor",
"megatron.training.get_model",
"meg... | [((1528, 1567), 'megatron.print_rank_0', 'print_rank_0', (['"""building GPT2 model ..."""'], {}), "('building GPT2 model ...')\n", (1540, 1567), False, 'from megatron import print_rank_0\n'), ((1580, 1630), 'megatron.model.GPT2Model', 'GPT2Model', ([], {'num_tokentypes': '(0)', 'parallel_output': '(False)'}), '(num_tokentypes=0, parallel_output=False)\n', (1589, 1630), False, 'from megatron.model import GPT2Model\n'), ((3423, 3474), 'megatron.text_generation_utils.pad_batch', 'pad_batch', (['[context_tokens]', 'tokenizer.pad_id', 'args'], {}), '([context_tokens], tokenizer.pad_id, args)\n', (3432, 3474), False, 'from megatron.text_generation_utils import pad_batch, get_batch\n'), ((3552, 3590), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['context_tokens_'], {}), '(context_tokens_)\n', (3573, 3590), False, 'import torch\n'), ((3634, 3666), 'megatron.text_generation_utils.get_batch', 'get_batch', (['context_tokens_tensor'], {}), '(context_tokens_tensor)\n', (3643, 3666), False, 'from megatron.text_generation_utils import pad_batch, get_batch\n'), ((4782, 4817), 'numpy.sum', 'np.sum', (['(outputs != tokenizer.pad_id)'], {}), '(outputs != tokenizer.pad_id)\n', (4788, 4817), True, 'import numpy as np\n'), ((4914, 5035), 'megatron.initialize.initialize_megatron', 'initialize_megatron', ([], {'extra_args_provider': 'add_text_generate_args', 'args_defaults': "{'tokenizer_type': 'GPT2BPETokenizer'}"}), "(extra_args_provider=add_text_generate_args,\n args_defaults={'tokenizer_type': 'GPT2BPETokenizer'})\n", (4933, 5035), False, 'from megatron.initialize import initialize_megatron\n'), ((5109, 5134), 'megatron.training.get_model', 'get_model', (['model_provider'], {}), '(model_provider)\n', (5118, 5134), False, 'from megatron.training import get_model\n'), ((5164, 5174), 'megatron.get_args', 'get_args', ([], {}), '()\n', (5172, 5174), False, 'from megatron import get_args\n'), ((5217, 5251), 'megatron.checkpointing.load_checkpoint', 'load_checkpoint', (['model', 'None', 'None'], {}), '(model, None, None)\n', (5232, 5251), False, 'from megatron.checkpointing import load_checkpoint\n'), ((5815, 5830), 'megatron.get_tokenizer', 'get_tokenizer', ([], {}), '()\n', (5828, 5830), False, 'from megatron import get_tokenizer\n'), ((6422, 6437), 'megatron.get_tokenizer', 'get_tokenizer', ([], {}), '()\n', (6435, 6437), False, 'from megatron import get_tokenizer\n'), ((753, 778), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (768, 778), False, 'import os\n'), ((3779, 3794), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3792, 3794), False, 'import torch\n')] |
from os import getcwd, listdir
import numpy as np
from scipy import signal as sp
from higrid.utils import wavread
def emulatescene(insig, gain, irspath):
"""
Emulates a scene by convolving a source input signal with em32 AIRs
:param insig: (Single-channel) input signal
:param gain: Gain (scalar)
:param irspath: Path to the AIRs to be used
:return: 32 channels of audio from an emulated em32 recording.
"""
dr = listdir(irspath)
dr = sorted(dr)
wv = wavread(irspath + '/' + dr[0])
ir = np.zeros((32,wv[0].shape[0]))
for ind in range(32):
wv = wavread(irspath + '/' + dr[ind])
ir[ind,:] += wv[0].reshape((wv[0].shape[0]))
sz = len(insig)
out = np.zeros((32, sz))
for ind in range(32):
out[ind,:] = sp.fftconvolve(gain * insig, ir[ind,:], mode='same')
return out
def emptyscene(sha = (32, 48000)):
"""
Returns an empty scene
:param sha: Tuple containing number of channels and number of samples (nchan, samples) (default = (32, 48000))
:return: An empty scene containing nchan channels and the given number of samples (empty numpy array)
"""
sge = np.zeros(sha)
return sge
def combinescene(sg1, sg2):
"""
Linearly combines two scenes pertaining to em32 recordings
:param sg1: Scene 1 (32 x samples numpy array)
:param sg2: Scene 2 (32 x samples numpy array)
:return: Combined scene (32 x samples numpy array)
"""
sgo = np.zeros(sg1.shape)
for ind in range(32):
sgo[ind,:] = sg1[ind,:] + sg2[ind,:]
return sgo
def composescene(filelist, dirset, samples=(0, 96000), roomstr='ii-s05'):
"""
Compose an emulated scene using a number of anechoic sound signals and measured AIRs
:param filelist: List of files to be used
:param dirset: Set containing tuples with (X, Y, Z) as the AIR indices
:param samples: Start and end points of samples to be prococessed as a tuple (sstart, send)
:param roomstr: Used to select from a specific directory (default is 'ii-s05' as we only provided AIRs for that room)
:return: 32 channels of audio from an emulated em32 recording.
"""
drset = dirset.copy()
assert len(filelist) == len(drset)
numsamp = samples[1] - samples[0]
sgo = emptyscene((32, numsamp))
for item in filelist:
dr = drset.pop()
drtxt = str(dr[0]) + str(dr[1]) + str(dr[2])
snd = wavread(getcwd() + '/data/sdata/anechoic/' + item)
snd = snd[0].reshape((snd[0].shape[0]))[samples[0]:samples[1]]
gain = np.sqrt((dr[0]-3.0)**2 + (dr[1]-3.0)**2 + ((dr[2]-2.0)*0.6)**2)
sg = emulatescene(snd, gain, getcwd() +'/data/rdata/'+ roomstr + '/' + drtxt)
sgo = combinescene(sgo, sg)
return sgo
def realrec(dirpath, prefix, samples):
"""
Create a scene from real em32 recordings
:param dirpath: Path containing the em32 recordings
:param prefix:
:param samples: Number of samples to use
:return: 32 channel em32 recording
"""
sg = emptyscene((32,samples))
for ind in range(32):
snd = wavread(dirpath + prefix + str(ind+1) + '.wav')
snd = snd[0].reshape((snd[0].shape[0]))[0:samples]
sg[ind,:] = snd
return sg | [
"higrid.utils.wavread",
"os.getcwd",
"numpy.zeros",
"scipy.signal.fftconvolve",
"os.listdir",
"numpy.sqrt"
] | [((448, 464), 'os.listdir', 'listdir', (['irspath'], {}), '(irspath)\n', (455, 464), False, 'from os import getcwd, listdir\n'), ((494, 524), 'higrid.utils.wavread', 'wavread', (["(irspath + '/' + dr[0])"], {}), "(irspath + '/' + dr[0])\n", (501, 524), False, 'from higrid.utils import wavread\n'), ((534, 564), 'numpy.zeros', 'np.zeros', (['(32, wv[0].shape[0])'], {}), '((32, wv[0].shape[0]))\n', (542, 564), True, 'import numpy as np\n'), ((720, 738), 'numpy.zeros', 'np.zeros', (['(32, sz)'], {}), '((32, sz))\n', (728, 738), True, 'import numpy as np\n'), ((1165, 1178), 'numpy.zeros', 'np.zeros', (['sha'], {}), '(sha)\n', (1173, 1178), True, 'import numpy as np\n'), ((1470, 1489), 'numpy.zeros', 'np.zeros', (['sg1.shape'], {}), '(sg1.shape)\n', (1478, 1489), True, 'import numpy as np\n'), ((603, 635), 'higrid.utils.wavread', 'wavread', (["(irspath + '/' + dr[ind])"], {}), "(irspath + '/' + dr[ind])\n", (610, 635), False, 'from higrid.utils import wavread\n'), ((786, 839), 'scipy.signal.fftconvolve', 'sp.fftconvolve', (['(gain * insig)', 'ir[ind, :]'], {'mode': '"""same"""'}), "(gain * insig, ir[ind, :], mode='same')\n", (800, 839), True, 'from scipy import signal as sp\n'), ((2557, 2634), 'numpy.sqrt', 'np.sqrt', (['((dr[0] - 3.0) ** 2 + (dr[1] - 3.0) ** 2 + ((dr[2] - 2.0) * 0.6) ** 2)'], {}), '((dr[0] - 3.0) ** 2 + (dr[1] - 3.0) ** 2 + ((dr[2] - 2.0) * 0.6) ** 2)\n', (2564, 2634), True, 'import numpy as np\n'), ((2428, 2436), 'os.getcwd', 'getcwd', ([], {}), '()\n', (2434, 2436), False, 'from os import getcwd, listdir\n'), ((2658, 2666), 'os.getcwd', 'getcwd', ([], {}), '()\n', (2664, 2666), False, 'from os import getcwd, listdir\n')] |
import numpy as np
from fractions import Fraction
abcd=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
a=np.array([(3,3),(2,5)])
print(a)
ste=input("Enter the string to encode:")
if len(ste)%2!=0:
ste=ste+ste[0]
print(ste)
i=0
cipher=""
while i<len(ste):
st=abcd.index(ste[i])
sd=abcd.index(ste[i+1])
b=np.array([(st),(sd)])
num=(np.matmul(a,b))%52
cipher=cipher+abcd[num[0]]+abcd[num[1]]
i=i+2
print("cipher text is:",cipher)
#decryption
deta=int(np.linalg.det(a))
adj=np.array([(5,-3), (-2,3)])
def inv(k1):
c=1
diff=0
i=1
j=2
if k1%52==1:
return 1
elif (k1*2)%52==1:
return 2
else:
diff=((k1*3)%52)-((k1*2)%52)
while True:
c+=1
diff=diff+k1
diff=diff%52
if diff==1:
break
return c
inv=(inv(deta))%52
new=np.dot(inv,adj)
i=0
plain=""
while i<len(cipher):
st=abcd.index(cipher[i])
sd=abcd.index(cipher[i+1])
b=np.array([(st),(sd)])
num=(np.matmul(new,b))%52
plain=plain+abcd[num[0]]
plain=plain+abcd[num[1]]
i=i+2
print("plain text is:",plain)
| [
"numpy.dot",
"numpy.linalg.det",
"numpy.array",
"numpy.matmul"
] | [((270, 296), 'numpy.array', 'np.array', (['[(3, 3), (2, 5)]'], {}), '([(3, 3), (2, 5)])\n', (278, 296), True, 'import numpy as np\n'), ((683, 711), 'numpy.array', 'np.array', (['[(5, -3), (-2, 3)]'], {}), '([(5, -3), (-2, 3)])\n', (691, 711), True, 'import numpy as np\n'), ((1072, 1088), 'numpy.dot', 'np.dot', (['inv', 'adj'], {}), '(inv, adj)\n', (1078, 1088), True, 'import numpy as np\n'), ((495, 513), 'numpy.array', 'np.array', (['[st, sd]'], {}), '([st, sd])\n', (503, 513), True, 'import numpy as np\n'), ((660, 676), 'numpy.linalg.det', 'np.linalg.det', (['a'], {}), '(a)\n', (673, 676), True, 'import numpy as np\n'), ((1194, 1212), 'numpy.array', 'np.array', (['[st, sd]'], {}), '([st, sd])\n', (1202, 1212), True, 'import numpy as np\n'), ((527, 542), 'numpy.matmul', 'np.matmul', (['a', 'b'], {}), '(a, b)\n', (536, 542), True, 'import numpy as np\n'), ((1226, 1243), 'numpy.matmul', 'np.matmul', (['new', 'b'], {}), '(new, b)\n', (1235, 1243), True, 'import numpy as np\n')] |
import random
import numpy as np
import matplotlib.pyplot as plt
class ColoredPath:
COLORMAPS = plt.colormaps()
def __init__(self, path, shape) -> None:
self.path = path
self.img = np.zeros((shape[0],shape[1],3))
def get_colored_path(self, cmap=None):
if cmap is None: cmap = random.choice(self.COLORMAPS)
mpl_cmap = plt.cm.get_cmap(cmap)
path_length = len(self.path)
for idx,point in enumerate(self.path):
self.img[point] = mpl_cmap(idx/path_length)[:3]
return self.img
| [
"random.choice",
"numpy.zeros",
"matplotlib.pyplot.cm.get_cmap",
"matplotlib.pyplot.colormaps"
] | [((101, 116), 'matplotlib.pyplot.colormaps', 'plt.colormaps', ([], {}), '()\n', (114, 116), True, 'import matplotlib.pyplot as plt\n'), ((206, 239), 'numpy.zeros', 'np.zeros', (['(shape[0], shape[1], 3)'], {}), '((shape[0], shape[1], 3))\n', (214, 239), True, 'import numpy as np\n'), ((363, 384), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['cmap'], {}), '(cmap)\n', (378, 384), True, 'import matplotlib.pyplot as plt\n'), ((314, 343), 'random.choice', 'random.choice', (['self.COLORMAPS'], {}), '(self.COLORMAPS)\n', (327, 343), False, 'import random\n')] |
import torch
from pytorch_toolbelt.utils.torch_utils import count_parameters
from torch import nn
from xview.dataset import OUTPUT_MASK_KEY
from xview.losses import ArcFaceLoss2d, OHEMCrossEntropyLoss
from xview.models.deeplab import resnet34_deeplab128
from xview.models.fpn_v2 import (
resnet101_fpncatv2_256,
densenet201_fpncatv2_256,
efficientb4_fpncatv2_256,
inceptionv4_fpncatv2_256,
)
from xview.models.hrnet_arc import hrnet18_arc
from xview.models.segcaps import SegCaps
from xview.models.unet import resnet18_unet32
from xview.models.unetv2 import inceptionv4_unet_v2, resnet101_unet_v2
def test_ohem_ce():
x = torch.randn((8, 5, 128, 128)).cuda()
y = torch.randint(0, 5, (8, 128, 128)).long().cuda()
loss = OHEMCrossEntropyLoss()
l = loss(x, y)
print(l)
def test_conv_transpose():
x = torch.randn((1, 32, 128, 128)).cuda()
module = nn.ConvTranspose2d(32, 5, kernel_size=8, stride=4, padding=2).cuda()
y = module(x)
print(y.size())
@torch.no_grad()
def test_hrnet18_arc():
x = torch.randn((1, 6, 256, 256))
net = hrnet18_arc().eval()
out = net(x)
tgt = torch.randint(0, 5, (1, 256, 256)).long()
criterion = ArcFaceLoss2d()
loss = criterion(out[OUTPUT_MASK_KEY], tgt)
print(out)
@torch.no_grad()
def test_resnet18_unet():
x = torch.randn((1, 6, 256, 256))
net = resnet18_unet32().eval()
print(count_parameters(net))
out = net(x)
print(out)
@torch.no_grad()
def test_resnet34_deeplab128():
x = torch.randn((1, 6, 512, 512))
net = resnet34_deeplab128().eval()
print(count_parameters(net))
out = net(x)
print(out)
def test_seg_caps():
net = SegCaps(num_classes=5)
print(count_parameters(net))
x = torch.randn((4, 3, 256, 256))
y = net(x)
print(y.size())
def test_selim_unet():
from xview.models.selim.unet import DensenetUnet
d = DensenetUnet(5, backbone_arch="densenet121")
d.eval()
import numpy as np
with torch.no_grad():
images = torch.from_numpy(np.zeros((16, 3, 256, 256), dtype="float32"))
i = d(images)
print(i.shape)
print(d)
@torch.no_grad()
def test_inception_unet_like_selim():
d = inceptionv4_unet_v2().cuda().eval()
print(count_parameters(d))
print(d.decoder.decoder_features)
print(d.decoder.bottlenecks)
print(d.decoder.decoder_stages)
images = torch.rand(4, 6, 512, 512).cuda()
i = d(images)
print(i[OUTPUT_MASK_KEY].size())
@torch.no_grad()
def test_inception_unet_like_selim():
d = resnet101_unet_v2().cuda().eval()
print(count_parameters(d))
print(d.decoder.decoder_features)
print(d.decoder.bottlenecks)
print(d.decoder.decoder_stages)
images = torch.rand(4, 6, 512, 512).cuda()
i = d(images)
print(i[OUTPUT_MASK_KEY].size())
@torch.no_grad()
def test_resnet101_fpncatv2_256():
d = resnet101_fpncatv2_256().cuda().eval()
print(count_parameters(d))
images = torch.rand(2, 6, 512, 512).cuda()
i = d(images)
print(i[OUTPUT_MASK_KEY].size())
@torch.no_grad()
def test_densenet201_fpncatv2_256():
d = densenet201_fpncatv2_256().cuda().eval()
print(count_parameters(d))
images = torch.rand(4, 6, 512, 512).cuda()
i = d(images)
print(i[OUTPUT_MASK_KEY].size())
@torch.no_grad()
def test_inceptionv4_fpncatv2_256():
d = inceptionv4_fpncatv2_256().cuda().eval()
print(count_parameters(d))
images = torch.rand(2, 6, 512, 512).cuda()
i = d(images)
print(i[OUTPUT_MASK_KEY].size())
@torch.no_grad()
def test_efficientb4_fpncatv2_256():
d = efficientb4_fpncatv2_256().cuda().eval()
print(count_parameters(d))
images = torch.rand(4, 6, 512, 512).cuda()
i = d(images)
print(i[OUTPUT_MASK_KEY].size())
| [
"xview.models.hrnet_arc.hrnet18_arc",
"xview.models.selim.unet.DensenetUnet",
"xview.losses.OHEMCrossEntropyLoss",
"xview.models.unetv2.resnet101_unet_v2",
"torch.randn",
"xview.models.unet.resnet18_unet32",
"xview.losses.ArcFaceLoss2d",
"torch.no_grad",
"pytorch_toolbelt.utils.torch_utils.count_par... | [((1005, 1020), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1018, 1020), False, 'import torch\n'), ((1283, 1298), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1296, 1298), False, 'import torch\n'), ((1466, 1481), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1479, 1481), False, 'import torch\n'), ((2155, 2170), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2168, 2170), False, 'import torch\n'), ((2498, 2513), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2511, 2513), False, 'import torch\n'), ((2839, 2854), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2852, 2854), False, 'import torch\n'), ((3074, 3089), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3087, 3089), False, 'import torch\n'), ((3313, 3328), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3326, 3328), False, 'import torch\n'), ((3552, 3567), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3565, 3567), False, 'import torch\n'), ((750, 772), 'xview.losses.OHEMCrossEntropyLoss', 'OHEMCrossEntropyLoss', ([], {}), '()\n', (770, 772), False, 'from xview.losses import ArcFaceLoss2d, OHEMCrossEntropyLoss\n'), ((1053, 1082), 'torch.randn', 'torch.randn', (['(1, 6, 256, 256)'], {}), '((1, 6, 256, 256))\n', (1064, 1082), False, 'import torch\n'), ((1200, 1215), 'xview.losses.ArcFaceLoss2d', 'ArcFaceLoss2d', ([], {}), '()\n', (1213, 1215), False, 'from xview.losses import ArcFaceLoss2d, OHEMCrossEntropyLoss\n'), ((1333, 1362), 'torch.randn', 'torch.randn', (['(1, 6, 256, 256)'], {}), '((1, 6, 256, 256))\n', (1344, 1362), False, 'import torch\n'), ((1522, 1551), 'torch.randn', 'torch.randn', (['(1, 6, 512, 512)'], {}), '((1, 6, 512, 512))\n', (1533, 1551), False, 'import torch\n'), ((1689, 1711), 'xview.models.segcaps.SegCaps', 'SegCaps', ([], {'num_classes': '(5)'}), '(num_classes=5)\n', (1696, 1711), False, 'from xview.models.segcaps import SegCaps\n'), ((1753, 1782), 'torch.randn', 'torch.randn', (['(4, 3, 256, 256)'], {}), '((4, 3, 256, 256))\n', (1764, 1782), False, 'import torch\n'), ((1905, 1949), 'xview.models.selim.unet.DensenetUnet', 'DensenetUnet', (['(5)'], {'backbone_arch': '"""densenet121"""'}), "(5, backbone_arch='densenet121')\n", (1917, 1949), False, 'from xview.models.selim.unet import DensenetUnet\n'), ((1408, 1429), 'pytorch_toolbelt.utils.torch_utils.count_parameters', 'count_parameters', (['net'], {}), '(net)\n', (1424, 1429), False, 'from pytorch_toolbelt.utils.torch_utils import count_parameters\n'), ((1601, 1622), 'pytorch_toolbelt.utils.torch_utils.count_parameters', 'count_parameters', (['net'], {}), '(net)\n', (1617, 1622), False, 'from pytorch_toolbelt.utils.torch_utils import count_parameters\n'), ((1722, 1743), 'pytorch_toolbelt.utils.torch_utils.count_parameters', 'count_parameters', (['net'], {}), '(net)\n', (1738, 1743), False, 'from pytorch_toolbelt.utils.torch_utils import count_parameters\n'), ((1996, 2011), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2009, 2011), False, 'import torch\n'), ((2263, 2282), 'pytorch_toolbelt.utils.torch_utils.count_parameters', 'count_parameters', (['d'], {}), '(d)\n', (2279, 2282), False, 'from pytorch_toolbelt.utils.torch_utils import count_parameters\n'), ((2604, 2623), 'pytorch_toolbelt.utils.torch_utils.count_parameters', 'count_parameters', (['d'], {}), '(d)\n', (2620, 2623), False, 'from pytorch_toolbelt.utils.torch_utils import count_parameters\n'), ((2947, 2966), 'pytorch_toolbelt.utils.torch_utils.count_parameters', 'count_parameters', (['d'], {}), '(d)\n', (2963, 2966), False, 'from pytorch_toolbelt.utils.torch_utils import count_parameters\n'), ((3186, 3205), 'pytorch_toolbelt.utils.torch_utils.count_parameters', 'count_parameters', (['d'], {}), '(d)\n', (3202, 3205), False, 'from pytorch_toolbelt.utils.torch_utils import count_parameters\n'), ((3425, 3444), 'pytorch_toolbelt.utils.torch_utils.count_parameters', 'count_parameters', (['d'], {}), '(d)\n', (3441, 3444), False, 'from pytorch_toolbelt.utils.torch_utils import count_parameters\n'), ((3664, 3683), 'pytorch_toolbelt.utils.torch_utils.count_parameters', 'count_parameters', (['d'], {}), '(d)\n', (3680, 3683), False, 'from pytorch_toolbelt.utils.torch_utils import count_parameters\n'), ((644, 673), 'torch.randn', 'torch.randn', (['(8, 5, 128, 128)'], {}), '((8, 5, 128, 128))\n', (655, 673), False, 'import torch\n'), ((842, 872), 'torch.randn', 'torch.randn', (['(1, 32, 128, 128)'], {}), '((1, 32, 128, 128))\n', (853, 872), False, 'import torch\n'), ((894, 955), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(32)', '(5)'], {'kernel_size': '(8)', 'stride': '(4)', 'padding': '(2)'}), '(32, 5, kernel_size=8, stride=4, padding=2)\n', (912, 955), False, 'from torch import nn\n'), ((1093, 1106), 'xview.models.hrnet_arc.hrnet18_arc', 'hrnet18_arc', ([], {}), '()\n', (1104, 1106), False, 'from xview.models.hrnet_arc import hrnet18_arc\n'), ((1142, 1176), 'torch.randint', 'torch.randint', (['(0)', '(5)', '(1, 256, 256)'], {}), '(0, 5, (1, 256, 256))\n', (1155, 1176), False, 'import torch\n'), ((1373, 1390), 'xview.models.unet.resnet18_unet32', 'resnet18_unet32', ([], {}), '()\n', (1388, 1390), False, 'from xview.models.unet import resnet18_unet32\n'), ((1562, 1583), 'xview.models.deeplab.resnet34_deeplab128', 'resnet34_deeplab128', ([], {}), '()\n', (1581, 1583), False, 'from xview.models.deeplab import resnet34_deeplab128\n'), ((2047, 2091), 'numpy.zeros', 'np.zeros', (['(16, 3, 256, 256)'], {'dtype': '"""float32"""'}), "((16, 3, 256, 256), dtype='float32')\n", (2055, 2091), True, 'import numpy as np\n'), ((2406, 2432), 'torch.rand', 'torch.rand', (['(4)', '(6)', '(512)', '(512)'], {}), '(4, 6, 512, 512)\n', (2416, 2432), False, 'import torch\n'), ((2747, 2773), 'torch.rand', 'torch.rand', (['(4)', '(6)', '(512)', '(512)'], {}), '(4, 6, 512, 512)\n', (2757, 2773), False, 'import torch\n'), ((2982, 3008), 'torch.rand', 'torch.rand', (['(2)', '(6)', '(512)', '(512)'], {}), '(2, 6, 512, 512)\n', (2992, 3008), False, 'import torch\n'), ((3221, 3247), 'torch.rand', 'torch.rand', (['(4)', '(6)', '(512)', '(512)'], {}), '(4, 6, 512, 512)\n', (3231, 3247), False, 'import torch\n'), ((3460, 3486), 'torch.rand', 'torch.rand', (['(2)', '(6)', '(512)', '(512)'], {}), '(2, 6, 512, 512)\n', (3470, 3486), False, 'import torch\n'), ((3699, 3725), 'torch.rand', 'torch.rand', (['(4)', '(6)', '(512)', '(512)'], {}), '(4, 6, 512, 512)\n', (3709, 3725), False, 'import torch\n'), ((689, 723), 'torch.randint', 'torch.randint', (['(0)', '(5)', '(8, 128, 128)'], {}), '(0, 5, (8, 128, 128))\n', (702, 723), False, 'import torch\n'), ((2217, 2238), 'xview.models.unetv2.inceptionv4_unet_v2', 'inceptionv4_unet_v2', ([], {}), '()\n', (2236, 2238), False, 'from xview.models.unetv2 import inceptionv4_unet_v2, resnet101_unet_v2\n'), ((2560, 2579), 'xview.models.unetv2.resnet101_unet_v2', 'resnet101_unet_v2', ([], {}), '()\n', (2577, 2579), False, 'from xview.models.unetv2 import inceptionv4_unet_v2, resnet101_unet_v2\n'), ((2898, 2922), 'xview.models.fpn_v2.resnet101_fpncatv2_256', 'resnet101_fpncatv2_256', ([], {}), '()\n', (2920, 2922), False, 'from xview.models.fpn_v2 import resnet101_fpncatv2_256, densenet201_fpncatv2_256, efficientb4_fpncatv2_256, inceptionv4_fpncatv2_256\n'), ((3135, 3161), 'xview.models.fpn_v2.densenet201_fpncatv2_256', 'densenet201_fpncatv2_256', ([], {}), '()\n', (3159, 3161), False, 'from xview.models.fpn_v2 import resnet101_fpncatv2_256, densenet201_fpncatv2_256, efficientb4_fpncatv2_256, inceptionv4_fpncatv2_256\n'), ((3374, 3400), 'xview.models.fpn_v2.inceptionv4_fpncatv2_256', 'inceptionv4_fpncatv2_256', ([], {}), '()\n', (3398, 3400), False, 'from xview.models.fpn_v2 import resnet101_fpncatv2_256, densenet201_fpncatv2_256, efficientb4_fpncatv2_256, inceptionv4_fpncatv2_256\n'), ((3613, 3639), 'xview.models.fpn_v2.efficientb4_fpncatv2_256', 'efficientb4_fpncatv2_256', ([], {}), '()\n', (3637, 3639), False, 'from xview.models.fpn_v2 import resnet101_fpncatv2_256, densenet201_fpncatv2_256, efficientb4_fpncatv2_256, inceptionv4_fpncatv2_256\n')] |
import pandas as pd
from utils_dr_pre_word_simi import *
import os
from utils import *
from transformers import *
from dataset import Dataset_dr
import torch
import numpy as np
TRAIN_DR = './con_rew_data/para/train.csv'
DEV_DR = './con_rew_data/para/dev.csv'
TEST_DR = './con_rew_data/para/test.csv'
fw = open('verb_no_simi.txt', 'w')
VER_MAG_RATE = 1.5
VER_ADD_VAL = 5
batchsize_dr = 4
device_dr = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
verb2simi = load_word2simi()
tokenizer_dr = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
token_dict_dr = {
'bos_token': '<start>',
'eos_token': '<end>',
'pad_token': '<pad>',
'cls_token': '<cls>',
'additional_special_tokens': ['<pos>', '<neg>', '<equal>', '<VERB>']
}
num_added_token_dr = tokenizer_dr.add_special_tokens(token_dict_dr)
print(tokenizer_dr.vocab_size)
cats = ['pos', 'neg', 'equal']
def agen_vector(tokenizer, num_added, multi=True):
agen_vectors = {}
for label, verbset in agen_v.items():
if multi:
vector = torch.ones(tokenizer.vocab_size + num_added)
else:
vector = torch.zeros(tokenizer.vocab_size + num_added)
for v in verbset:
forms = infi2allforms(v)
for form in forms:
v_li = tokenizer.encode(form)
if multi:
vector[v_li[0]] *= VER_MAG_RATE
else:
vector[v_li[0]] = VER_ADD_VAL
agen_vectors[label] = vector
return agen_vectors
def infi2allforms(word):
res = []
row = verb_form[verb_form[0] == word]
if row.empty:
res.append(word)
return res
row = row.dropna(axis=1)
for col in row.columns:
res.append(row[col].iloc[0])
return res
def get_he_df(df):
df['cri'] = df['sen'].str.replace(' ', '')
df['cri'] = df['cri'].str.lower()
he_df = pd.read_csv(ROC_TEST_HE)
he_df['cri'] = he_df['sen'].str.replace(' ', '')
he_df['cri'] = he_df['cri'].str.lower()
df = df[df['cri'].isin(he_df['cri'])]
print(len(df.index))
return df
def simi_word(verb, descat):
'''
at train and gen time, get the simi verb with descat
get the infi form of word
'''
infi = word_infinitive(verb)
row = verb2simi[verb2simi['verb'] == infi]
li = row[descat].tolist()
if len(li) > 0:
return li[0]
fw.write(verb+'\n')
return verb
def extract_args(sen, para, train_time):
if para:
sen_del = sen['oridel']
descat = sen['paracat']
verbs = sen['paraverbs']
para_sen = sen['parasen']
else:
sen_del = sen['sendel']
descat = sen['oricat']
verbs = sen['verbs']
para_sen = sen['sen']
if not train_time:
descat = sen['descat']
return sen_del, descat, verbs, para_sen
def sen_in(sen, noi_idx, train_time=True, para=False):
sen_idx = sen[0]
sen = sen[1]
sen_del, descat, verbs, para_sen = extract_args(sen, para, train_time)
ori_verbs = verbs.split()
add_verbs = ''
if sen_idx in noi_idx:
for v in ori_verbs:
add_verbs += simi_word(v, descat)
else:
add_verbs = verbs
newsen = '<start> ' + sen_del
if not train_time:
newsen = newsen + '<cls> ' + descat + '<start>'
else:
newsen += '<cls> <' + descat + '> <start> ' + para_sen + ' <end>'
tok_li = tokenizer_dr.encode(newsen, add_special_tokens=False)
return tok_li, add_verbs
def sen_in_retr(sen, df, method):
senavg = df[df['sen']==sen]['glove_avg']
df['glove_avg'] = df['glove_avg'] - senavg
def parse_file_dr(file, noi_frac=0.1, train_time=True, para=False):
path = os.path.abspath(file)
with open(path,encoding='UTF-8') as f:
df = pd.read_csv(f)
noi_df = df.sample(frac=noi_frac)
if train_time:
tok_li = [sen_in(sen, noi_df.index, train_time=train_time, para=para) for sen in df.iterrows()]
tok_li = np.array(tok_li)
df['v_supplied'] = tok_li[:, 1]
tok_li = tok_li[:, 0]
else:
cats = ['pos', 'equal', 'neg']
tok_li = []
retdf = pd.DataFrame()
if False:
df = dev_he(df)
for cat in cats:
subdf = df.copy()
subdf['descat'] = cat
subdf['cat'] = df['oricat']
tem = [sen_in(sen, subdf.index, train_time=train_time, para=para) for sen in subdf.iterrows()]
tem = np.array(tem)
subdf['v_supplied'] = tem[:, 1]
tem = tem[:, 0]
tok_li.extend(tem)
retdf = retdf.append(subdf)
if not train_time:
return tok_li, retdf
tok_li = add_pad(tok_li, tokenizer_dr)
dataset = Dataset_dr(list_IDs=tok_li)
return dataset
def get_label_dr(tokenizer, x, g=False):
label = x.clone()
start_inds = ((x == tokenizer.bos_token_id).nonzero())
end_inds = ((x == tokenizer.eos_token_id).nonzero())
for i in range(x.size()[0]):
# do not include the last cls token
end_pos = i
if g:
end_pos = 2 * i + 1
startind = start_inds[2*i+1][1].item() + 1
endind = end_inds[end_pos][1].item() + 1
# do not include second start
label[i][0:startind] = torch.FloatTensor([-1 for _ in range(startind)])
# include end token
label[i][endind:] = torch.FloatTensor([-1 for _ in range(max_sen_len - endind)])
return label
def parse_model_inputs_dr(local_labels):
x = local_labels # b * s
label = get_label_dr(tokenizer_dr, x)
return x, label
| [
"pandas.DataFrame",
"torch.ones",
"os.path.abspath",
"dataset.Dataset_dr",
"pandas.read_csv",
"torch.cuda.is_available",
"numpy.array",
"torch.zeros"
] | [((1956, 1980), 'pandas.read_csv', 'pd.read_csv', (['ROC_TEST_HE'], {}), '(ROC_TEST_HE)\n', (1967, 1980), True, 'import pandas as pd\n'), ((3817, 3838), 'os.path.abspath', 'os.path.abspath', (['file'], {}), '(file)\n', (3832, 3838), False, 'import os\n'), ((447, 472), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (470, 472), False, 'import torch\n'), ((3897, 3911), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (3908, 3911), True, 'import pandas as pd\n'), ((4973, 5000), 'dataset.Dataset_dr', 'Dataset_dr', ([], {'list_IDs': 'tok_li'}), '(list_IDs=tok_li)\n', (4983, 5000), False, 'from dataset import Dataset_dr\n'), ((1082, 1126), 'torch.ones', 'torch.ones', (['(tokenizer.vocab_size + num_added)'], {}), '(tokenizer.vocab_size + num_added)\n', (1092, 1126), False, 'import torch\n'), ((1164, 1209), 'torch.zeros', 'torch.zeros', (['(tokenizer.vocab_size + num_added)'], {}), '(tokenizer.vocab_size + num_added)\n', (1175, 1209), False, 'import torch\n'), ((4110, 4126), 'numpy.array', 'np.array', (['tok_li'], {}), '(tok_li)\n', (4118, 4126), True, 'import numpy as np\n'), ((4312, 4326), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4324, 4326), True, 'import pandas as pd\n'), ((4667, 4680), 'numpy.array', 'np.array', (['tem'], {}), '(tem)\n', (4675, 4680), True, 'import numpy as np\n')] |
import numpy as np
import cv2
import imutils
import datetime
import base64
import re
import time
import socketio
import eventlet
import os
from pyzbar import pyzbar
from django.shortcuts import render
from django.conf import settings
from django.http import HttpResponseRedirect
from django.urls import reverse
from webptools import dwebp
from django.conf import settings
from .models import Video, Section
from django.utils import timezone
# from engineio.payload import Payload
# Payload.max_decode_packets = 500
# from django.shortcuts import render
# [LIVE STREAMING]: Async mode & thread.
# Set async_mode to 'threading', 'eventlet', 'gevent' or 'gevent_uwsgi' to
# force a mode else, the best mode is selected automatically from what's
# installed - Reference: https://github.com/miguelgrinberg/python-socketio/blob/master/examples/server/wsgi/django_example/socketio_app/views.py.
async_mode = None # Asynchronous mode.
sio = socketio.Server(async_mode=async_mode)
thread = None
def index(request):
if request.user.is_authenticated:
return render(
request,
"qr_bar_decoder/index.html",
)
else:
return HttpResponseRedirect(f"{reverse('login')}?next={reverse('qr_bar_decoder:index')}")
@sio.event
def connect(sid, environment): # "Infinite loops prevents the client connections.
print(f"connect {sid}")
frames = []
"""
[NOTES]: I don't know why this woker does not miss any frame.
"""
out = None
"""
[NOTES]: Must follow all steps to record videos.
"""
@sio.on("START SECTION")
def process_start_section(sid, data):
section_id = data
global out
# print("Before exception")
try:
os.mkdir(f"{settings.MEDIA_ROOT}/{section_id}")
except FileExistsError: # Built-in exception.
print("FileExistsError: [WinError 183] Cannot create a file when that file already exists")
# print("After exception...")
out = cv2.VideoWriter(f"{settings.MEDIA_ROOT}/{section_id}/capture.mp4", 0x00000021, 24, (settings.VIDEO_WIDTH, settings.VIDEO_HEIGHT)) # https://stackoverflow.com/questions/49530857/python-opencv-video-format-play-in-browser
s = Section.objects.get(pk=section_id)
capture = Video(name=timezone.now(), url=f"{settings.DOMAIN_NAME}{settings.MEDIA_URL}{section_id}/capture.mp4", section=s)
capture.save()
@sio.on("STOP SECTION")
def process_stop_section(sid, data):
global frames
for frame in frames:
out.write(frame)
frames = []
out.release()
# counter = 0
@sio.on("Live Streaming Package")
def process_live_streaming_package(sid, data):
# global counter
# print(counter)
# if counter == 50:
# return
# else:
# counter += 1
# sio.emit("barcode", {"data": "Sonic coder"}, room=sid) # Socket.IO room: https://python-socketio.readthedocs.io/en/latest/server.html#rooms.
# with open("capture.webp", "wb") as f:
# f.write((base64.b64decode(re.sub("^data:image/webp;base64,", "", data))))
img = base64.b64decode(re.sub("^data:image/webp;base64,", "", data))
# dwebp("capture.webp", "frame.png", "-o")
# frame = cv2.imread("frame.png")
# out.write(frame)
npimg = np.fromstring(img, dtype=np.uint8)
image = imutils.resize(cv2.imdecode(npimg, 1), width=settings.VIDEO_WIDTH)
frames.append(image)
# image = cv2.imdecode(npimg, 1)
# image = cv2.imread("capture.png") # Deprecated method.
# print(image)
barcodes = pyzbar.decode(image) # symbols=[pyzbar.ZBarSymbol.CODE128]
# print(barcodes)
csv = open("./barcodes.csv", "w")
found = set()
for barcode in barcodes:
print(barcode)
(x, y, w, h) = barcode.rect
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
barcode_data = barcode.data.decode("utf-8")
# sio.emit("Barcodes", {"data": barcode_data}, room=sid) # Socket.IO room: https://python-socketio.readthedocs.io/en/latest/server.html#rooms.
sio.emit("Barcodes", {"data": barcode_data})
barcode_type = barcode.type
text = "{} ({})".format(barcode_data, barcode_type)
cv2.putText(image, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
print("[INFO] Found {} barcode: {}".format(barcode_type, barcode_data))
if barcode_data not in found:
csv.write("{},{}\n".format(datetime.datetime.now(), barcode_data))
csv.flush()
found.add(barcode_data)
| [
"os.mkdir",
"cv2.putText",
"django.utils.timezone.now",
"pyzbar.pyzbar.decode",
"socketio.Server",
"cv2.imdecode",
"datetime.datetime.now",
"cv2.rectangle",
"django.urls.reverse",
"cv2.VideoWriter",
"django.shortcuts.render",
"re.sub",
"numpy.fromstring"
] | [((943, 981), 'socketio.Server', 'socketio.Server', ([], {'async_mode': 'async_mode'}), '(async_mode=async_mode)\n', (958, 981), False, 'import socketio\n'), ((1854, 1979), 'cv2.VideoWriter', 'cv2.VideoWriter', (['f"""{settings.MEDIA_ROOT}/{section_id}/capture.mp4"""', '(33)', '(24)', '(settings.VIDEO_WIDTH, settings.VIDEO_HEIGHT)'], {}), "(f'{settings.MEDIA_ROOT}/{section_id}/capture.mp4', 33, 24,\n (settings.VIDEO_WIDTH, settings.VIDEO_HEIGHT))\n", (1869, 1979), False, 'import cv2\n'), ((3038, 3072), 'numpy.fromstring', 'np.fromstring', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (3051, 3072), True, 'import numpy as np\n'), ((3293, 3313), 'pyzbar.pyzbar.decode', 'pyzbar.decode', (['image'], {}), '(image)\n', (3306, 3313), False, 'from pyzbar import pyzbar\n'), ((1062, 1106), 'django.shortcuts.render', 'render', (['request', '"""qr_bar_decoder/index.html"""'], {}), "(request, 'qr_bar_decoder/index.html')\n", (1068, 1106), False, 'from django.shortcuts import render\n'), ((1625, 1672), 'os.mkdir', 'os.mkdir', (['f"""{settings.MEDIA_ROOT}/{section_id}"""'], {}), "(f'{settings.MEDIA_ROOT}/{section_id}')\n", (1633, 1672), False, 'import os\n'), ((2883, 2927), 're.sub', 're.sub', (['"""^data:image/webp;base64,"""', '""""""', 'data'], {}), "('^data:image/webp;base64,', '', data)\n", (2889, 2927), False, 'import re\n'), ((3098, 3120), 'cv2.imdecode', 'cv2.imdecode', (['npimg', '(1)'], {}), '(npimg, 1)\n', (3110, 3120), False, 'import cv2\n'), ((3500, 3560), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x, y)', '(x + w, y + h)', '(0, 0, 255)', '(2)'], {}), '(image, (x, y), (x + w, y + h), (0, 0, 255), 2)\n', (3513, 3560), False, 'import cv2\n'), ((3889, 3977), 'cv2.putText', 'cv2.putText', (['image', 'text', '(x, y - 10)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 0, 255)', '(2)'], {}), '(image, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0,\n 255), 2)\n', (3900, 3977), False, 'import cv2\n'), ((2137, 2151), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2149, 2151), False, 'from django.utils import timezone\n'), ((1158, 1174), 'django.urls.reverse', 'reverse', (['"""login"""'], {}), "('login')\n", (1165, 1174), False, 'from django.urls import reverse\n'), ((1182, 1213), 'django.urls.reverse', 'reverse', (['"""qr_bar_decoder:index"""'], {}), "('qr_bar_decoder:index')\n", (1189, 1213), False, 'from django.urls import reverse\n'), ((4111, 4134), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4132, 4134), False, 'import datetime\n')] |
import numpy as np
import math
from math import pi
from math import log
import torch
import torch.nn.functional as F
from .submodule import calc_A_re, calc_A_im
from .submodule import display_result
def DP_DRT(freq_vec, Z_exp, lambda_limit=1e-4, learning_rate=1e-5, display=False):
gamma, R_inf = train_model(freq_vec, Z_exp, lambda_limit, learning_rate)
if display is True:
display_result(freq_vec, Z_exp, gamma, R_inf)
return gamma, R_inf
def train_model(freq_vec, Z_exp, lambda_limit, learning_rate):
N_freqs = len(freq_vec)
A_re = calc_A_re(freq_vec)
A_im = calc_A_im(freq_vec)
# transform impedance variables & DRT matrices into tensors
Z_exp_re_torch = torch.from_numpy(np.real(Z_exp)).type(torch.FloatTensor).reshape(1,N_freqs)
Z_exp_im_torch = torch.from_numpy(np.imag(Z_exp)).type(torch.FloatTensor).reshape(1,N_freqs)
A_re_torch = torch.from_numpy(A_re.T).type(torch.FloatTensor)
A_im_torch = torch.from_numpy(A_im.T).type(torch.FloatTensor)
# create Deep Prior model
vanilla_model = make_dp_model(N_freqs)
model = vanilla_model()
# model variables: random constant for input node (zeta), learning rate, optimizer
zeta = torch.randn(1, 1)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Regularization/stopping criteria:
# 1.lambd = |(new_loss-old_loss)/old_loss| < 1e-4
# 2. max_iteration = 100,001
iteration=0
lambd=1
old_loss=1
while iteration < 100001 and lambd > lambda_limit:
# Forward pass: compute predicted y by passing x to the model.
gamma_torch = model(zeta)
# Compute the loss
loss = loss_fn(gamma_torch, Z_exp_re_torch, Z_exp_im_torch, A_re_torch, A_im_torch)
# zero all gradients (purge any cache)
optimizer.zero_grad()
# compute the gradient of the loss with respect to model parameters
loss.backward()
# Update the optimizer
optimizer.step()
# Stop conditions:
iteration = iteration +1
if iteration > 5000:
lambd = abs((loss.item()-old_loss)/old_loss)
old_loss = loss.item()
if iteration >= 100000:
print("Max iteration reached")
else:
print("Early stop. Number of iteration: ",str(iteration))
gamma = gamma_torch.detach().numpy().reshape(-1)
R_inf = gamma[-1]
gamma = gamma[:-1]
return gamma, R_inf
def make_dp_model(N_freqs):
"""
Create the base deep prior model, "vanilla model", returns as a class object
"""
D_in = 1
D_out = N_freqs+1
class vanilla_model(torch.nn.Module):
def __init__(self):
super(vanilla_model, self).__init__()
self.fct_1 = torch.nn.Linear(D_in, N_freqs)
self.fct_2 = torch.nn.Linear(N_freqs, N_freqs)
self.fct_3 = torch.nn.Linear(N_freqs, N_freqs)
self.fct_4 = torch.nn.Linear(N_freqs, D_out)
# initialize the weight parameters
torch.nn.init.zeros_(self.fct_1.weight)
torch.nn.init.zeros_(self.fct_2.weight)
torch.nn.init.zeros_(self.fct_3.weight)
torch.nn.init.zeros_(self.fct_4.weight)
# forward
def forward(self, zeta):
h = F.elu(self.fct_1(zeta))
h = F.elu(self.fct_2(h))
h = F.elu(self.fct_3(h))
gamma_pred = F.softplus(self.fct_4(h), beta = 5)
return gamma_pred
return vanilla_model
def loss_fn(output, Z_exp_re_torch, Z_exp_im_torch, A_re_torch, A_im_torch):
"""
Loss function of the DRT fit
"""
MSE_re = torch.sum((output[:, -1] + torch.mm(output[:, 0:-1], A_re_torch) - Z_exp_re_torch)**2)
MSE_im = torch.sum((torch.mm(output[:, 0:-1], A_im_torch) - Z_exp_im_torch)**2)
MSE = MSE_re + MSE_im
return MSE | [
"torch.randn",
"torch.mm",
"torch.nn.init.zeros_",
"numpy.imag",
"numpy.real",
"torch.nn.Linear",
"torch.from_numpy"
] | [((1206, 1223), 'torch.randn', 'torch.randn', (['(1)', '(1)'], {}), '(1, 1)\n', (1217, 1223), False, 'import torch\n'), ((892, 916), 'torch.from_numpy', 'torch.from_numpy', (['A_re.T'], {}), '(A_re.T)\n', (908, 916), False, 'import torch\n'), ((958, 982), 'torch.from_numpy', 'torch.from_numpy', (['A_im.T'], {}), '(A_im.T)\n', (974, 982), False, 'import torch\n'), ((2731, 2761), 'torch.nn.Linear', 'torch.nn.Linear', (['D_in', 'N_freqs'], {}), '(D_in, N_freqs)\n', (2746, 2761), False, 'import torch\n'), ((2787, 2820), 'torch.nn.Linear', 'torch.nn.Linear', (['N_freqs', 'N_freqs'], {}), '(N_freqs, N_freqs)\n', (2802, 2820), False, 'import torch\n'), ((2846, 2879), 'torch.nn.Linear', 'torch.nn.Linear', (['N_freqs', 'N_freqs'], {}), '(N_freqs, N_freqs)\n', (2861, 2879), False, 'import torch\n'), ((2905, 2936), 'torch.nn.Linear', 'torch.nn.Linear', (['N_freqs', 'D_out'], {}), '(N_freqs, D_out)\n', (2920, 2936), False, 'import torch\n'), ((2996, 3035), 'torch.nn.init.zeros_', 'torch.nn.init.zeros_', (['self.fct_1.weight'], {}), '(self.fct_1.weight)\n', (3016, 3035), False, 'import torch\n'), ((3048, 3087), 'torch.nn.init.zeros_', 'torch.nn.init.zeros_', (['self.fct_2.weight'], {}), '(self.fct_2.weight)\n', (3068, 3087), False, 'import torch\n'), ((3100, 3139), 'torch.nn.init.zeros_', 'torch.nn.init.zeros_', (['self.fct_3.weight'], {}), '(self.fct_3.weight)\n', (3120, 3139), False, 'import torch\n'), ((3152, 3191), 'torch.nn.init.zeros_', 'torch.nn.init.zeros_', (['self.fct_4.weight'], {}), '(self.fct_4.weight)\n', (3172, 3191), False, 'import torch\n'), ((3729, 3766), 'torch.mm', 'torch.mm', (['output[:, 0:-1]', 'A_im_torch'], {}), '(output[:, 0:-1], A_im_torch)\n', (3737, 3766), False, 'import torch\n'), ((3645, 3682), 'torch.mm', 'torch.mm', (['output[:, 0:-1]', 'A_re_torch'], {}), '(output[:, 0:-1], A_re_torch)\n', (3653, 3682), False, 'import torch\n'), ((719, 733), 'numpy.real', 'np.real', (['Z_exp'], {}), '(Z_exp)\n', (726, 733), True, 'import numpy as np\n'), ((816, 830), 'numpy.imag', 'np.imag', (['Z_exp'], {}), '(Z_exp)\n', (823, 830), True, 'import numpy as np\n')] |
import numpy as np
from tqdm.auto import tqdm
import iisignature
from joblib import Parallel, delayed
import utils
class Price(object):
"""Base class for data (i.e. price models)."""
def __init__(self, *args, **kwargs):
pass
@staticmethod
def _sig(path, order):
return np.r_[1., iisignature.sig(utils.transform(path), order)]
def generate(self):
"""Generate a sample path."""
raise NotImplementedError("Generator not implemented")
def _generate(self, seed):
np.random.seed(seed)
return self.generate()
def _generate_paths(self, n_paths=1000):
paths = Parallel(n_jobs=-1)(delayed(self._generate)(seed) \
for seed in tqdm(range(n_paths), desc="Building paths"))
return paths
def build(self, *args, n_paths=1000, order=6, **kwargs):
"""Builds paths and ES."""
# Create paths
paths = self._generate_paths(*args, n_paths=n_paths, **kwargs)
signals = None
if isinstance(paths[0], tuple):
signals = [path[0] for path in paths]
paths = [path[1] for path in paths]
# Compute signatures
sigs = Parallel(n_jobs=-1)(delayed(self._sig)(path, order) \
for path in tqdm(paths, desc="Computing signatures"))
# Compute ES
ES = np.mean(sigs, axis=0)
if signals is None:
return np.array(paths), ES
else:
return np.array(signals), np.array(paths), ES
| [
"numpy.random.seed",
"tqdm.auto.tqdm",
"utils.transform",
"numpy.array",
"numpy.mean",
"joblib.Parallel",
"joblib.delayed"
] | [((531, 551), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (545, 551), True, 'import numpy as np\n'), ((1391, 1412), 'numpy.mean', 'np.mean', (['sigs'], {'axis': '(0)'}), '(sigs, axis=0)\n', (1398, 1412), True, 'import numpy as np\n'), ((645, 664), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (653, 664), False, 'from joblib import Parallel, delayed\n'), ((1213, 1232), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (1221, 1232), False, 'from joblib import Parallel, delayed\n'), ((1461, 1476), 'numpy.array', 'np.array', (['paths'], {}), '(paths)\n', (1469, 1476), True, 'import numpy as np\n'), ((1514, 1531), 'numpy.array', 'np.array', (['signals'], {}), '(signals)\n', (1522, 1531), True, 'import numpy as np\n'), ((1533, 1548), 'numpy.array', 'np.array', (['paths'], {}), '(paths)\n', (1541, 1548), True, 'import numpy as np\n'), ((332, 353), 'utils.transform', 'utils.transform', (['path'], {}), '(path)\n', (347, 353), False, 'import utils\n'), ((665, 688), 'joblib.delayed', 'delayed', (['self._generate'], {}), '(self._generate)\n', (672, 688), False, 'from joblib import Parallel, delayed\n'), ((1233, 1251), 'joblib.delayed', 'delayed', (['self._sig'], {}), '(self._sig)\n', (1240, 1251), False, 'from joblib import Parallel, delayed\n'), ((1314, 1354), 'tqdm.auto.tqdm', 'tqdm', (['paths'], {'desc': '"""Computing signatures"""'}), "(paths, desc='Computing signatures')\n", (1318, 1354), False, 'from tqdm.auto import tqdm\n')] |
from typing import Callable
import numpy as np
from ..situation import Situation
from ..utils import get_rng
def play_strategies(game,
strategies,
*,
rng=None,
seed=None,
start: Situation = None,
stop_when: Callable = None,
max_moves: int = None):
"""
Generate a play based on given strategies (one per player), return the last state.
Starts from a given state or `self.start()`. Plays until a terminal state is hit,
`stop_when(hist)` is True or for at most `max_moves` actions (whenever given).
"""
moves = 0
rng = get_rng(rng=rng, seed=seed)
if len(strategies) != game.players:
raise ValueError("One strategy per player required")
if start is None:
start = game.start()
sit = start
while not sit.is_terminal():
if stop_when is not None and stop_when(sit):
break
if max_moves is not None and moves >= max_moves:
break
if sit.is_chance():
dist = sit.chance
else:
p = sit.player
dist = strategies[p].strategy(sit)
assert len(dist) == len(sit.actions)
ai = rng.choice(len(sit.actions), p=dist)
sit = game.play(sit, sit.actions[ai])
moves += 1
return sit
def sample_payoff(game, strategies, iterations=100, *, start=None, rng=None, seed=None):
"""
Play the game `iterations` times using `strategies`.
Returns `(mean payoffs, payoff variances)` as two numpy arrays.
"""
rng = get_rng(rng, seed)
if start is None:
start = game.start()
payoffs = [
play_strategies(game, strategies, start=start, rng=rng).payoff for i in range(iterations)
]
return (np.mean(payoffs, axis=0), np.var(payoffs, axis=0))
| [
"numpy.mean",
"numpy.var"
] | [((1834, 1858), 'numpy.mean', 'np.mean', (['payoffs'], {'axis': '(0)'}), '(payoffs, axis=0)\n', (1841, 1858), True, 'import numpy as np\n'), ((1860, 1883), 'numpy.var', 'np.var', (['payoffs'], {'axis': '(0)'}), '(payoffs, axis=0)\n', (1866, 1883), True, 'import numpy as np\n')] |
import argparse
import time
import os
import time
import logging, numpy as np
from src import utils as U
from utils import Model, update_parameters
logging.getLogger().setLevel(logging.INFO)
def init_parser():
parser = argparse.ArgumentParser(description='Method for Skeleton-based Action Recognition')
# Setting
parser.add_argument('--config', '-c', type=str, default='', help='ID of the using config', required=True)
parser.add_argument('--gpus', '-g', type=int, nargs='+', default=[], help='Using GPUs')
parser.add_argument('--seed', '-s', type=int, default=1, help='Random seed')
parser.add_argument('--pretrained_path', '-pp', type=str, default='', help='Path to pretrained models')
parser.add_argument('--work_dir', '-w', type=str, default='', help='Work dir')
parser.add_argument('--no_progress_bar', '-np', default=False, action='store_true', help='Do not show progress bar')
parser.add_argument('--delay_hours', '-dh', type=float, default=0, help='Delay to run')
# Processing
parser.add_argument('--debug', '-db', default=False, action='store_true', help='Debug')
parser.add_argument('--resume', '-r', default=False, action='store_true', help='Resume from checkpoint')
parser.add_argument('--evaluate', '-e', default=False, action='store_true', help='Evaluate')
parser.add_argument('--extract', '-ex', default=False, action='store_true', help='Extract')
parser.add_argument('--visualize', '-v', default=False, action='store_true', help='Visualization')
parser.add_argument('--generate_data', '-gd', default=False, action='store_true', help='Generate skeleton data')
# Visualization
parser.add_argument('--visualization_class', '-vc', type=int, default=0, help='Class: 1 ~ 60, 0 means true class')
parser.add_argument('--visualization_sample', '-vs', type=int, default=0, help='Sample: 0 ~ batch_size-1')
parser.add_argument('--visualization_frames', '-vf', type=int, nargs='+', default=[], help='Frame: 0 ~ max_frame-1')
# Dataloader
parser.add_argument('--dataset', '-d', type=str, default='', help='Select dataset')
parser.add_argument('--dataset_args', default=dict(), help='Args for creating dataset')
# Model
parser.add_argument('--model_type', '-mt', type=str, default='', help='Args for creating model')
parser.add_argument('--model_args', default=dict(), help='Args for creating model')
# Optimizer
parser.add_argument('--optimizer', '-o', type=str, default='', help='Initial optimizer')
parser.add_argument('--optimizer_args', default=dict(), help='Args for optimizer')
# LR_Scheduler
parser.add_argument('--lr_scheduler', '-ls', type=str, default='', help='Initial learning rate scheduler')
parser.add_argument('--scheduler_args', default=dict(), help='Args for scheduler')
return parser
if __name__ == '__main__':
os.chdir(os.getcwd())
# Loading Parameters
parser = init_parser()
args, _ = parser.parse_known_args()
# Updating Parameters (cmd > yaml > default)
args = update_parameters(parser, args)
# load action recognition model
model = Model(args, './workdir')
action_model = './workdir/2022-01-25 17-18-33/2001_EfficientGCN-B4_my_dataset.pth.tar'
model.load(action_model)
total_predtime = 0
logging.info('Making predictions on random generated data')
for i in range(50):
start = time.time()
rand_kps = np.random.randn(2,600,18,1)
model.preprocess(rand_kps)
actions = model.predict()
print(actions)
pred_time = time.time() - start
total_predtime += pred_time
logging.info('Average prediction time : {}'.format(total_predtime/50))
| [
"utils.Model",
"argparse.ArgumentParser",
"utils.update_parameters",
"os.getcwd",
"numpy.random.randn",
"time.time",
"logging.info",
"logging.getLogger"
] | [((229, 317), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Method for Skeleton-based Action Recognition"""'}), "(description=\n 'Method for Skeleton-based Action Recognition')\n", (252, 317), False, 'import argparse\n'), ((3066, 3097), 'utils.update_parameters', 'update_parameters', (['parser', 'args'], {}), '(parser, args)\n', (3083, 3097), False, 'from utils import Model, update_parameters\n'), ((3147, 3171), 'utils.Model', 'Model', (['args', '"""./workdir"""'], {}), "(args, './workdir')\n", (3152, 3171), False, 'from utils import Model, update_parameters\n'), ((3320, 3379), 'logging.info', 'logging.info', (['"""Making predictions on random generated data"""'], {}), "('Making predictions on random generated data')\n", (3332, 3379), False, 'import logging, numpy as np\n'), ((152, 171), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (169, 171), False, 'import logging, numpy as np\n'), ((2900, 2911), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2909, 2911), False, 'import os\n'), ((3420, 3431), 'time.time', 'time.time', ([], {}), '()\n', (3429, 3431), False, 'import time\n'), ((3451, 3481), 'numpy.random.randn', 'np.random.randn', (['(2)', '(600)', '(18)', '(1)'], {}), '(2, 600, 18, 1)\n', (3466, 3481), True, 'import logging, numpy as np\n'), ((3591, 3602), 'time.time', 'time.time', ([], {}), '()\n', (3600, 3602), False, 'import time\n')] |
import string
import numpy as np
import tensorflow as tf
from distutils.version import LooseVersion
TF_VERSION_MIN = '1.1'
def check_tf(tf_):
error_message = "Please use TensorFlow {} or later".format(TF_VERSION_MIN)
tf_version = tf_.__version__
condition = LooseVersion(tf_version) > LooseVersion(TF_VERSION_MIN)
assert condition, error_message
_log('Using TensorFlow {}'.format(tf_version))
def test_case(f):
def run(*args, **kwargs):
f(*args, **kwargs)
_success_message()
return run
def _log(message):
print(message)
def _success_message():
_log('✓ Tests passed')
def _run_test(f, actuals, expected, messages):
for actual, exp, msg in zip(actuals, expected, messages):
assert f(*actual) == exp, msg
def _evaluate_tensor(tensor):
return tf.Session().run(tensor)
def _get_random_np_array():
no_rows, no_cols = np.random.randint(1, 5, 2)
value = np.random.randint(1, 100)
return np.full([no_rows, no_cols], fill_value=value)
def _get_random_string(string_length=10):
return ''.join(np.random.choice(list(string.ascii_lowercase),
string_length))
# Basic tensor operations
@test_case
def test_create_tensor_from_list(f):
np_array_expected = _get_random_np_array()
list_expected = np_array_expected.tolist()
tensor = f(list_expected)
np_array_actual = _evaluate_tensor(tensor)
assert list_expected == np_array_actual.tolist()
@test_case
def test_create_tensor_from_np_array(f):
np_array_expected = _get_random_np_array()
tensor_name = _get_random_string()
tensor = f(np_array_expected, name=tensor_name)
numpy_array_actual = _evaluate_tensor(tensor)
np.array_equal(np_array_expected, numpy_array_actual)
@test_case
def test_get_tensor_name(f):
name_expected = _get_random_string()
tensor = tf.constant(value=0, name=name_expected)
name_actual = f(tensor)
assert '{}:0'.format(name_expected) == name_actual
@test_case
def test_get_tensor_shape(f):
shape = [6, 2]
tensor_shape = f(tf.constant(0, shape=shape))
assert tensor_shape == shape
@test_case
def test_get_tensor_rank(f):
shape = [3, 7]
rank = f(tf.constant(0, shape=shape))
assert rank == len(shape)
@test_case
def test_get_tensor_dtype(f):
shape = [3, 7]
value = 1.2
dtype = f(tf.constant(value, shape=shape))
assert dtype == tf.float32
return _success_message()
@test_case
def test_create_constant_tensor(f):
value = 42
m = 5
n = 3
tensor = f(value, m, n)
array_tf = _evaluate_tensor(tensor)
array_np = np.full(shape=[m, n], fill_value=value)
np.array_equal(array_tf, array_np)
@test_case
def test_create_fill_tensor(f):
value = np.random.randint(1, 10)
m = np.random.randint(1, 10)
n = np.random.randint(1, 10)
tensor = f(value, m, n)
array_tf = _evaluate_tensor(tensor)
array_np = np.full(shape=[m, n], fill_value=value)
np.array_equal(array_tf, array_np)
# Using scopes
@test_case
def test_create_variable_in_scope(f):
name = _get_random_string()
scope_name = _get_random_string()
np_array = _get_random_np_array()
tensor = f(name, np_array, scope_name)
name_expected = '{}/{}:0'.format(scope_name, name)
assert tensor.name == name_expected
@test_case
def test_create_variable_in_nested_scope(f):
name = _get_random_string()
scope_name_outer = _get_random_string()
scope_name_inner = _get_random_string()
np_array = _get_random_np_array()
tensor = f(name, np_array, scope_name_outer, scope_name_inner)
name_expected = '{}/{}/{}:0'.format(scope_name_outer,
scope_name_inner,
name)
assert tensor.name == name_expected
# Using multiple graphs
@test_case
def test_get_default_graph(f):
default_graph = f()
assert default_graph is tf.get_default_graph()
@test_case
def test_create_new_graph(f):
g = f()
assert g is not tf.get_default_graph()
@test_case
def test_get_graph_seed(f):
seed = np.random.randint(0, 1000)
g = tf.Graph()
with g.as_default():
tf.set_random_seed(seed)
assert f(g) == seed
@test_case
def test_set_graph_seed(f):
seed = np.random.randint(0, 1000)
g = tf.Graph()
graph_actual = f(g, seed)
assert graph_actual.seed == seed
# Math operations
@test_case
def test_add(f):
actuals = [(1, 0), (2, 1)]
expected = [1, 3]
messages = ['add(1, 0) should return 1', 'add(2, 1) should return 3']
_run_test(f, actuals, expected, messages)
@test_case
def test_add_rank0_tensors(f):
x = 1
y = 2
z_tensor = f(x, y)
z = tf.Session().run(z_tensor)
assert z == x + y
@test_case
def test_add_rank1_tensors(f):
xs = [1, 2, 3]
ys = [6, 5, 4]
z_tensor = f(xs, ys)
z = tf.Session().run(z_tensor)
assert np.all(z == np.array([x + y for x, y in zip(xs, ys)]))
| [
"numpy.full",
"distutils.version.LooseVersion",
"tensorflow.Session",
"tensorflow.constant",
"tensorflow.set_random_seed",
"numpy.random.randint",
"tensorflow.Graph",
"numpy.array_equal",
"tensorflow.get_default_graph"
] | [((898, 924), 'numpy.random.randint', 'np.random.randint', (['(1)', '(5)', '(2)'], {}), '(1, 5, 2)\n', (915, 924), True, 'import numpy as np\n'), ((937, 962), 'numpy.random.randint', 'np.random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (954, 962), True, 'import numpy as np\n'), ((974, 1019), 'numpy.full', 'np.full', (['[no_rows, no_cols]'], {'fill_value': 'value'}), '([no_rows, no_cols], fill_value=value)\n', (981, 1019), True, 'import numpy as np\n'), ((1728, 1781), 'numpy.array_equal', 'np.array_equal', (['np_array_expected', 'numpy_array_actual'], {}), '(np_array_expected, numpy_array_actual)\n', (1742, 1781), True, 'import numpy as np\n'), ((1878, 1918), 'tensorflow.constant', 'tf.constant', ([], {'value': '(0)', 'name': 'name_expected'}), '(value=0, name=name_expected)\n', (1889, 1918), True, 'import tensorflow as tf\n'), ((2633, 2672), 'numpy.full', 'np.full', ([], {'shape': '[m, n]', 'fill_value': 'value'}), '(shape=[m, n], fill_value=value)\n', (2640, 2672), True, 'import numpy as np\n'), ((2677, 2711), 'numpy.array_equal', 'np.array_equal', (['array_tf', 'array_np'], {}), '(array_tf, array_np)\n', (2691, 2711), True, 'import numpy as np\n'), ((2769, 2793), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (2786, 2793), True, 'import numpy as np\n'), ((2802, 2826), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (2819, 2826), True, 'import numpy as np\n'), ((2835, 2859), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (2852, 2859), True, 'import numpy as np\n'), ((2943, 2982), 'numpy.full', 'np.full', ([], {'shape': '[m, n]', 'fill_value': 'value'}), '(shape=[m, n], fill_value=value)\n', (2950, 2982), True, 'import numpy as np\n'), ((2987, 3021), 'numpy.array_equal', 'np.array_equal', (['array_tf', 'array_np'], {}), '(array_tf, array_np)\n', (3001, 3021), True, 'import numpy as np\n'), ((4112, 4138), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (4129, 4138), True, 'import numpy as np\n'), ((4147, 4157), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (4155, 4157), True, 'import tensorflow as tf\n'), ((4292, 4318), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (4309, 4318), True, 'import numpy as np\n'), ((4327, 4337), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (4335, 4337), True, 'import tensorflow as tf\n'), ((273, 297), 'distutils.version.LooseVersion', 'LooseVersion', (['tf_version'], {}), '(tf_version)\n', (285, 297), False, 'from distutils.version import LooseVersion\n'), ((300, 328), 'distutils.version.LooseVersion', 'LooseVersion', (['TF_VERSION_MIN'], {}), '(TF_VERSION_MIN)\n', (312, 328), False, 'from distutils.version import LooseVersion\n'), ((2085, 2112), 'tensorflow.constant', 'tf.constant', (['(0)'], {'shape': 'shape'}), '(0, shape=shape)\n', (2096, 2112), True, 'import tensorflow as tf\n'), ((2221, 2248), 'tensorflow.constant', 'tf.constant', (['(0)'], {'shape': 'shape'}), '(0, shape=shape)\n', (2232, 2248), True, 'import tensorflow as tf\n'), ((2372, 2403), 'tensorflow.constant', 'tf.constant', (['value'], {'shape': 'shape'}), '(value, shape=shape)\n', (2383, 2403), True, 'import tensorflow as tf\n'), ((3939, 3961), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (3959, 3961), True, 'import tensorflow as tf\n'), ((4037, 4059), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (4057, 4059), True, 'import tensorflow as tf\n'), ((4191, 4215), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (4209, 4215), True, 'import tensorflow as tf\n'), ((820, 832), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (830, 832), True, 'import tensorflow as tf\n'), ((4721, 4733), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4731, 4733), True, 'import tensorflow as tf\n'), ((4885, 4897), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4895, 4897), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
import numpy as np
import scipy
import copy as cp
import warnings
try:
import utils.math_tools as umath
import utils.optimize as uopt
from utils.math_tools import AVAILABLE_OUTPUT_ACTIVATIONS
except ModuleNotFoundError:
import lib.utils.math_tools as umath
import lib.utils.optimize as uopt
from lib.utils.math_tools import AVAILABLE_OUTPUT_ACTIVATIONS
# TODO: validate SGD without minibatches
# TODO: validate SGD with minibatches
# TODO: implement momentum
# TODO: clean up cost function - move outside?
class LogisticRegression:
"""An implementation of Logistic regression."""
_fit_performed = False
def __init__(self, solver="lr-gd", activation="sigmoid",
max_iter=100, penalty="l2", tol=1e-8, alpha=1.0,
momentum=0.0, mini_batch_size=50):
"""Sets up the linalg backend.
Args:
solver (str): what kind of solver method to use. Default is
'lr-gd' (gradient descent). Choices: 'lr-gd', 'gd', 'cg',
'sga', 'sga-mb', 'nr', 'newton-cg'.
activation (str): type of activation function to use. Optional,
default is 'sigmoid'.
max_iter (int): number of iterations to run gradient descent for,
default is 100.
penalty (str): what kind of regulizer to use, either 'l1' or 'l2'.
Optional, default is 'l2'.
tol (float): tolerance or when to cut of calculations. Optional,
default is 1e-4.
alpha (float): regularization strength. Default is 1.0.
momentum (float): adds a momentum, in which the current gradient
deepends on the last gradient. Default is 0.0.
mini_batch_size (int): size of mini-batches. Only available for
sga-mb. Optional, default is 50.
"""
self.penalty = penalty
self.max_iter = max_iter
self.tol = tol
self.alpha = alpha
self.momentum = momentum
self.mini_batch_size = mini_batch_size
self._set_optimizer(solver)
self._set_activation_function(activation)
self._set_regularization_method(penalty)
def _set_optimizer(self, solver_method):
"""Set the penalty/regularization method to use."""
self.solver_method = solver_method
if solver_method == "lr-gd":
# Steepest descent for Logistic Regression
self.solver = uopt.LogRegGradientDescent(momentum=self.momentum)
elif solver_method == "gd":
# aka Steepest descent
self.solver = uopt.GradientDescent(momentum=self.momentum)
elif solver_method == "cg":
# Conjugate gradient method
self._chech_momentum(solver_method)
self.solver = uopt.ConjugateGradient()
elif solver_method == "sga":
# Stochastic Gradient Descent
self.solver = uopt.SGA(momentum=self.momentum)
elif solver_method == "sga-mb":
# Stochastic Gradient Descent with mini batches
self.solver = uopt.SGA(momentum=self.momentum,
use_minibatches=True,
mini_batch_size=self.mini_batch_size)
elif solver_method == "nr":
# Newton-Raphson method
self._chech_momentum(solver_method)
self.solver = uopt.NewtonRaphson()
elif solver_method == "newton-cg":
# Newton-Raphson method
self._chech_momentum(solver_method)
self.solver = uopt.NewtonCG()
else:
raise KeyError(("{} not recognized as a solver"
" method. Choices: {}.".format(
solver_method,
", ".join(uopt.OPTIMIZERS_KEYWORDS))))
def _chech_momentum(self, solver_method):
"""Raises error for given solver method if momentum is nonzero,
as solver method do not have momentum capabilities."""
if self.momentum != 0:
raise ValueError("Momentum not available for "
"method {}".format(solver_method))
def _set_regularization_method(self, penalty):
"""Set the penalty/regularization method to use."""
self.penalty = penalty
if penalty == "l1":
self._get_penalty = umath._l1
self._get_penalty_derivative = umath._l1_derivative
elif penalty == "l2":
self._get_penalty = umath._l2
self._get_penalty_derivative = umath._l2_derivative
elif penalty == "elastic_net":
self._get_penalty = umath._elastic_net
self._get_penalty_derivative = umath._elastic_net_derivative
elif isinstance(type(penalty), None):
self._get_penalty = lambda x: 0.0
self._get_penalty_derivative = lambda x: 0.0
else:
raise KeyError(("{} not recognized as a regularization"
" method.".format(penalty)))
def _set_activation_function(self, activation):
"""Sets the final layer activation."""
assert activation in AVAILABLE_OUTPUT_ACTIVATIONS, (
"{} not among available output activation functions: "
"{}".format(activation, ", ".join(
AVAILABLE_OUTPUT_ACTIVATIONS)))
self.activation = activation
if activation == "sigmoid":
self._activation = umath.sigmoid
elif activation == "softmax":
self._activation = umath.softmax
else:
raise KeyError("Final layer activation type '{}' not "
"recognized. Available activations:".format(
activation, ", ".join(
AVAILABLE_OUTPUT_ACTIVATIONS)))
@property
def coef_(self):
return self.coef
@coef_.getter
def coef_(self):
return cp.deepcopy(self.coef)
@coef_.setter
def coef_(self, value):
self.coef = value
def fit(self, X_train, y_train, eta=1.0):
"""Performs a linear regression fit for data X_train and y_train.
Args:
X_train (ndarray): input data.
y_train (ndarray): output one-hot labeled data.
eta (float): learning rate, optional. Choices: float(constant),
"inverse". "Inverse" sets eta to 1 - i/(N+1). Default is 1.0.
"""
X = cp.deepcopy(X_train)
y = cp.deepcopy(y_train)
self.N_features, self.p = X.shape
assert y.shape[0] == self.N_features
# Adds constant term and increments the number of predictors
X = np.hstack([np.ones((self.N_features, 1)), X])
# Adds beta_0 coefficients
self.coef = np.zeros(self.p + 1)
self.coef[0] = 1
self.coef = self.solver.solve(X, y, self.coef, self._cost_function,
self._cost_function_gradient, eta=0.01,
max_iter=100000, tol=1e-6,
scale=self.N_features,
alpha=self.alpha)
self._fit_performed = True
def _cost_function(self, X, y, weights, eps=1e-15):
"""Cost/loss function for logistic regression. Also known as the
cross entropy in statistics.
Args:
X (ndarray): design matrix, shape (N, p).
y (ndarray): predicted values, shape (N, labels).
weights (ndarray): matrix of coefficients (p, labels).
Returns:
(ndarray): 1D array of predictions
"""
p_ = np.dot(X, weights)
loss = - np.sum(y*p_ - np.log(1 + np.exp(p_)))
loss += (0.5*self.alpha*np.dot(weights, weights))
return loss
# y_pred = self._predict(X, weights)
# p_probabilities = self._activation(y_pred)
# # Removes bad values and replaces them with limiting values eps
# p_probabilities = np.clip(p_probabilities, eps, 1-eps)
# cost1 = - y * np.log(p_probabilities)
# cost2 = (1 - y) * np.log(1 - p_probabilities)
# cost = np.sum(cost1 - cost2) + self._get_penalty(weights)*self.alpha
# return cost
def _cost_function_gradient(self, X, y, weights):
"""Takes the gradient of the cost function w.r.t. the coefficients.
dC(W)/dw = - X^T * (y - p(X^T * w))
"""
# p_ = umath.sigmoid(X @ weights)
# loss = - X.T @ (y - p_) + self.alpha * weights
# return loss
grad = np.dot(X.T, (self._activation(self._predict(X, weights)) - y))
# Adds regularization
grad += self.alpha*weights
return grad
def _cost_function_laplacian(self, X, y, w):
"""Takes the laplacian of the cost function w.r.t. the coefficients.
d^2C(w) / (w w^T) = X^T W X
where
W = p(1 - X^T * w) * p(X^T * w)
"""
y_pred = self._predict(X, w)
return X.T @ self._activation(1-y_pred) @ self._activation(y_pred) @ X
def _predict(self, X, weights):
"""Performs a regular fit of parameters and sends them through
the sigmoid function.
Args:
X (ndarray): design matrix/feature matrix, shape (N, p)
weights (ndarray): coefficients
"""
return X @ weights
def score(self, X, y):
"""Returns the mean accuracy of the fit.
Args:
X (ndarray): array of shape (N, p - 1) to classify.
Y (ndarray): true labels.
Returns:
(float): mean accuracy score for features_test values.
"""
pred = self.predict(X)
accuracies = np.sum(self._indicator(pred, y))
return accuracies/float(y.shape[0])
def _indicator(self, features_test, labels_test):
"""Returns 1 if features_test[i] == labels_test[i]
Args:
features_test (ndarray): array of shape (N, p - 1) to test for
labels_test (ndarray): true labels
Returns:
(array): elements are 1 or 0
"""
return np.where(features_test == labels_test, 1, 0)
def predict(self, X):
"""Predicts category 1 or 2 of X.
Args:
X (ndarray): design matrix of shape (N, p - 1)
"""
if not self._fit_performed:
raise UserWarning("Fit not performed.")
# Adds intercept
X = np.hstack([np.ones((X.shape[0], 1)), X])
# Retrieves probabilitites
probabilities = self._activation(self._predict(X, self.coef)).ravel()
# Sets up binary probability
results_proba = np.asarray([1 - probabilities, probabilities])
# Moves axis from (2, N_probabilitites) to (N_probabilitites, 2)
results_proba = np.moveaxis(results_proba, 0, 1)
# Sets up binary prediction of either 0 or one
results = np.where(results_proba[:, 0] >= results_proba[:, 1], 0, 1).T
return results
def predict_proba(self, X):
"""Predicts probability of a design matrix X of shape (N, p - 1)."""
if not self._fit_performed:
raise UserWarning("Fit not performed.")
X = np.hstack([np.ones((X.shape[0], 1)), X])
probabilities = self._activation(self._predict(X, self.coef)).ravel()
results = np.asarray([1 - probabilities, probabilities])
return np.moveaxis(results, 0, 1)
def __test_logistic_regression():
from sklearn import datasets
import sklearn.linear_model as sk_model
import sklearn.model_selection as sk_modsel
import matplotlib.pyplot as plt
iris = datasets.load_iris()
X = iris["data"][:, 3:] # petal width
y = (iris["target"] == 2).astype(np.int) # 1 if Iris-Virginica, else 0
# Local implementation parameters
test_size = 0.1
penalty = "elastic_net"
learning_rate = 0.001
max_iter = 1000000
# Available solvers:
# ["lr-gd", "gd", "cg", "sga", "sga-mb", "nr", "newton-cg"]
solver = "lr-gd"
# solver = "newton-cg"
activation = "sigmoid"
tol = 1e-8
alpha = 0.1
momentum = 0.0
mini_batch_size = 20
# Sets up test and training data
X_train, X_test, y_train, y_test = \
sk_modsel.train_test_split(X, y, test_size=test_size, shuffle=True)
X_new = np.linspace(0, 3, 100).reshape(-1, 1)
# Manual logistic regression
print ("Manual solver method:", solver)
log_reg = LogisticRegression(penalty=penalty, solver=solver,
activation=activation, tol=tol,
alpha=alpha, momentum=momentum,
mini_batch_size=mini_batch_size,
max_iter=max_iter)
log_reg.fit(cp.deepcopy(X_train), cp.deepcopy(
y_train), eta=learning_rate)
y_proba = log_reg.predict_proba(X_new)
print("Manual log-reg coefs:", log_reg.coef_)
# SK-Learn logistic regression
if penalty == "elastic_net":
sk_penalty = "l2"
else:
sk_penalty = penalty
sk_log_reg = sk_model.LogisticRegression(fit_intercept=True,
C=1.0/alpha, penalty=sk_penalty,
max_iter=max_iter, tol=tol)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Removes annoing future-warning
sk_log_reg.fit(cp.deepcopy(X_train), cp.deepcopy(y_train))
y_sk_proba = sk_log_reg.predict_proba(X_new)
print("SK-learn log-reg coefs: ", sk_log_reg.intercept_, sk_log_reg.coef_)
# Sets coef with SK learns coef's for comparing outputs
manual_coefs = log_reg.coef_
sk_coefs = np.asarray(
[sk_log_reg.intercept_[0], sk_log_reg.coef_[0, 0]])
# =========================================================================
# Runs tests with SK learn's coefficients, and checks that our
# implementation's predictions match SK-learn's predictions.
# =========================================================================
print("Score before using SK-learn's coefficients: {0:.16f}".format(
log_reg.score(X_test, y_test)))
# Sets the coefficients from the SK-Learn to local method
log_reg.coef_ = sk_coefs
print("Score after using SK-learn's coefficients: {0:.16f}".format(
log_reg.score(X_test, y_test)))
# Asserts that predicted probabilities matches.
y_sk_proba_compare = sk_log_reg.predict_proba(X_test)
y_proba_compare = log_reg.predict_proba(X_test)
assert np.allclose(y_sk_proba_compare, y_proba_compare), (
"Predicted probabilities do not match: (SKLearn) {} != {} "
"(local implementation)".format(y_sk_proba_compare, y_proba_compare))
# Asserts that the labels match
sk_predict = sk_log_reg.predict(X_test)
local_predict = log_reg.predict(X_test)
assert np.allclose(sk_predict, local_predict), (
"Predicted class labels do not match: (SKLearn) {} != {} "
"(local implementation)".format(sk_predict, local_predict))
# Assert that the scores match
sk_score = sk_log_reg.score(X_test, y_test)
local_score = log_reg.score(X_test, y_test)
assert np.allclose(sk_score, local_score), (
"Predicted score do not match: (SKLearn) {} != {} "
"(local implementation)".format(sk_score, local_score))
fig1 = plt.figure()
# SK-Learn logistic regression
ax1 = fig1.add_subplot(211)
ax1.plot(X_new, y_sk_proba[:, 1], "g-", label="Iris-Virginica(SK-Learn)")
ax1.plot(X_new, y_sk_proba[:, 0], "b--",
label="Not Iris-Virginica(SK-Learn)")
ax1.set_title(
r"SK-Learn versus manual implementation of Logistic Regression")
ax1.set_ylabel(r"Probability")
ax1.legend()
# Manual logistic regression
ax2 = fig1.add_subplot(212)
ax2.plot(X_new, y_proba[:, 1], "g-", label="Iris-Virginica(Manual)")
ax2.plot(X_new, y_proba[:, 0], "b--", label="Not Iris-Virginica(Manual)")
ax2.set_ylabel(r"Probability")
ax2.legend()
# Plots decision boundary
log_reg.coef_ = manual_coefs
# Retrieves decision boundaries
p_false_manual, p_true_manual = log_reg.predict_proba(X_new).T
p_false_sk, p_true_sk = sk_log_reg.predict_proba(X_new).T
fig2 = plt.figure()
ax3 = fig2.add_subplot(111)
ax3.plot(X_train, y_train, "o")
ax3.plot(X_new, p_true_manual, label="Manual true")
ax3.plot(X_new, p_false_manual, label="Manual false")
ax3.plot(X_new, p_true_sk, label="SK-Learn true")
ax3.plot(X_new, p_false_sk, label="SK-Learn false")
ax3.legend()
ax3.axhline(0.5)
ax3.axvline(X_new[int(len(X_new)/2.0)])
ax3.set_title("Decision boundary")
plt.show()
if __name__ == '__main__':
__test_logistic_regression()
| [
"sklearn.datasets.load_iris",
"numpy.moveaxis",
"lib.utils.optimize.SGA",
"sklearn.model_selection.train_test_split",
"numpy.allclose",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.exp",
"lib.utils.optimize.NewtonRaphson",
"warnings.simplefilter",
"warnings.catch_warnings",
"numpy.linspace... | [((11730, 11750), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (11748, 11750), False, 'from sklearn import datasets\n'), ((12332, 12399), 'sklearn.model_selection.train_test_split', 'sk_modsel.train_test_split', (['X', 'y'], {'test_size': 'test_size', 'shuffle': '(True)'}), '(X, y, test_size=test_size, shuffle=True)\n', (12358, 12399), True, 'import sklearn.model_selection as sk_modsel\n'), ((13175, 13290), 'sklearn.linear_model.LogisticRegression', 'sk_model.LogisticRegression', ([], {'fit_intercept': '(True)', 'C': '(1.0 / alpha)', 'penalty': 'sk_penalty', 'max_iter': 'max_iter', 'tol': 'tol'}), '(fit_intercept=True, C=1.0 / alpha, penalty=\n sk_penalty, max_iter=max_iter, tol=tol)\n', (13202, 13290), True, 'import sklearn.linear_model as sk_model\n'), ((13796, 13858), 'numpy.asarray', 'np.asarray', (['[sk_log_reg.intercept_[0], sk_log_reg.coef_[0, 0]]'], {}), '([sk_log_reg.intercept_[0], sk_log_reg.coef_[0, 0]])\n', (13806, 13858), True, 'import numpy as np\n'), ((14654, 14702), 'numpy.allclose', 'np.allclose', (['y_sk_proba_compare', 'y_proba_compare'], {}), '(y_sk_proba_compare, y_proba_compare)\n', (14665, 14702), True, 'import numpy as np\n'), ((14988, 15026), 'numpy.allclose', 'np.allclose', (['sk_predict', 'local_predict'], {}), '(sk_predict, local_predict)\n', (14999, 15026), True, 'import numpy as np\n'), ((15308, 15342), 'numpy.allclose', 'np.allclose', (['sk_score', 'local_score'], {}), '(sk_score, local_score)\n', (15319, 15342), True, 'import numpy as np\n'), ((15482, 15494), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15492, 15494), True, 'import matplotlib.pyplot as plt\n'), ((16392, 16404), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (16402, 16404), True, 'import matplotlib.pyplot as plt\n'), ((16824, 16834), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16832, 16834), True, 'import matplotlib.pyplot as plt\n'), ((5991, 6013), 'copy.deepcopy', 'cp.deepcopy', (['self.coef'], {}), '(self.coef)\n', (6002, 6013), True, 'import copy as cp\n'), ((6505, 6525), 'copy.deepcopy', 'cp.deepcopy', (['X_train'], {}), '(X_train)\n', (6516, 6525), True, 'import copy as cp\n'), ((6538, 6558), 'copy.deepcopy', 'cp.deepcopy', (['y_train'], {}), '(y_train)\n', (6549, 6558), True, 'import copy as cp\n'), ((6831, 6851), 'numpy.zeros', 'np.zeros', (['(self.p + 1)'], {}), '(self.p + 1)\n', (6839, 6851), True, 'import numpy as np\n'), ((7706, 7724), 'numpy.dot', 'np.dot', (['X', 'weights'], {}), '(X, weights)\n', (7712, 7724), True, 'import numpy as np\n'), ((10203, 10247), 'numpy.where', 'np.where', (['(features_test == labels_test)', '(1)', '(0)'], {}), '(features_test == labels_test, 1, 0)\n', (10211, 10247), True, 'import numpy as np\n'), ((10746, 10792), 'numpy.asarray', 'np.asarray', (['[1 - probabilities, probabilities]'], {}), '([1 - probabilities, probabilities])\n', (10756, 10792), True, 'import numpy as np\n'), ((10891, 10923), 'numpy.moveaxis', 'np.moveaxis', (['results_proba', '(0)', '(1)'], {}), '(results_proba, 0, 1)\n', (10902, 10923), True, 'import numpy as np\n'), ((11431, 11477), 'numpy.asarray', 'np.asarray', (['[1 - probabilities, probabilities]'], {}), '([1 - probabilities, probabilities])\n', (11441, 11477), True, 'import numpy as np\n'), ((11494, 11520), 'numpy.moveaxis', 'np.moveaxis', (['results', '(0)', '(1)'], {}), '(results, 0, 1)\n', (11505, 11520), True, 'import numpy as np\n'), ((12857, 12877), 'copy.deepcopy', 'cp.deepcopy', (['X_train'], {}), '(X_train)\n', (12868, 12877), True, 'import copy as cp\n'), ((12879, 12899), 'copy.deepcopy', 'cp.deepcopy', (['y_train'], {}), '(y_train)\n', (12890, 12899), True, 'import copy as cp\n'), ((13383, 13408), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (13406, 13408), False, 'import warnings\n'), ((13418, 13449), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (13439, 13449), False, 'import warnings\n'), ((2502, 2552), 'lib.utils.optimize.LogRegGradientDescent', 'uopt.LogRegGradientDescent', ([], {'momentum': 'self.momentum'}), '(momentum=self.momentum)\n', (2528, 2552), True, 'import lib.utils.optimize as uopt\n'), ((7812, 7836), 'numpy.dot', 'np.dot', (['weights', 'weights'], {}), '(weights, weights)\n', (7818, 7836), True, 'import numpy as np\n'), ((10998, 11056), 'numpy.where', 'np.where', (['(results_proba[:, 0] >= results_proba[:, 1])', '(0)', '(1)'], {}), '(results_proba[:, 0] >= results_proba[:, 1], 0, 1)\n', (11006, 11056), True, 'import numpy as np\n'), ((12412, 12434), 'numpy.linspace', 'np.linspace', (['(0)', '(3)', '(100)'], {}), '(0, 3, 100)\n', (12423, 12434), True, 'import numpy as np\n'), ((13514, 13534), 'copy.deepcopy', 'cp.deepcopy', (['X_train'], {}), '(X_train)\n', (13525, 13534), True, 'import copy as cp\n'), ((13536, 13556), 'copy.deepcopy', 'cp.deepcopy', (['y_train'], {}), '(y_train)\n', (13547, 13556), True, 'import copy as cp\n'), ((2650, 2694), 'lib.utils.optimize.GradientDescent', 'uopt.GradientDescent', ([], {'momentum': 'self.momentum'}), '(momentum=self.momentum)\n', (2670, 2694), True, 'import lib.utils.optimize as uopt\n'), ((6740, 6769), 'numpy.ones', 'np.ones', (['(self.N_features, 1)'], {}), '((self.N_features, 1))\n', (6747, 6769), True, 'import numpy as np\n'), ((10540, 10564), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (10547, 10564), True, 'import numpy as np\n'), ((11305, 11329), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (11312, 11329), True, 'import numpy as np\n'), ((2845, 2869), 'lib.utils.optimize.ConjugateGradient', 'uopt.ConjugateGradient', ([], {}), '()\n', (2867, 2869), True, 'import lib.utils.optimize as uopt\n'), ((2975, 3007), 'lib.utils.optimize.SGA', 'uopt.SGA', ([], {'momentum': 'self.momentum'}), '(momentum=self.momentum)\n', (2983, 3007), True, 'import lib.utils.optimize as uopt\n'), ((3134, 3231), 'lib.utils.optimize.SGA', 'uopt.SGA', ([], {'momentum': 'self.momentum', 'use_minibatches': '(True)', 'mini_batch_size': 'self.mini_batch_size'}), '(momentum=self.momentum, use_minibatches=True, mini_batch_size=self\n .mini_batch_size)\n', (3142, 3231), True, 'import lib.utils.optimize as uopt\n'), ((7767, 7777), 'numpy.exp', 'np.exp', (['p_'], {}), '(p_)\n', (7773, 7777), True, 'import numpy as np\n'), ((3443, 3463), 'lib.utils.optimize.NewtonRaphson', 'uopt.NewtonRaphson', ([], {}), '()\n', (3461, 3463), True, 'import lib.utils.optimize as uopt\n'), ((3617, 3632), 'lib.utils.optimize.NewtonCG', 'uopt.NewtonCG', ([], {}), '()\n', (3630, 3632), True, 'import lib.utils.optimize as uopt\n')] |
#!/usr/bin/env python3
import os
import numpy as np
import glob
import re
import json
from collections import defaultdict, OrderedDict
from matplotlib.patches import Rectangle
from matplotlib.lines import Line2D
from matplotlib.text import Text
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams.update(mpl.rcParamsDefault)
data = defaultdict(lambda:[])
for fn in glob.glob("fill_*.json"):
for bench in json.load(open(fn))["benchmarks"]:
name = bench["name"]
time = min(bench["cpu_time"], bench["real_time"])
m = re.match("fill_([0-9])d<([^>]+)>", name)
tags = m.group(2).split(", ")
dim = int(m.group(1))
label = re.search("fill_([a-z]+).json", fn).group(1)
dist = tags[0]
if label == "boost":
label += "-" + {"dynamic_tag":"D", "static_tag":"S"}[tags[1]] + tags[2][0]
data[dim].append((label, dist, time / dim))
plt.figure()
if os.path.exists("/proc/cpuinfo"):
cpuinfo = open("/proc/cpuinfo").read()
m = re.search("model name\s*:\s*(.+)\n", cpuinfo)
if m:
plt.title(m.group(1))
i = 0
for dim in sorted(data):
v = data[dim]
labels = OrderedDict()
for label, dist, time in v:
if label in labels:
labels[label][dist] = time
else:
labels[label] = {dist: time}
j = 0
for label, d in labels.items():
t1 = d["uniform"]
t2 = d["normal"]
i -= 1
z = float(j) / len(labels)
col = ((1.0-z) * np.array((1.0, 0.0, 0.0))
+ z * np.array((1.0, 1.0, 0.0)))
if label == "root":
col = "k"
if "numpy" in label:
col = "0.6"
if "gsl" in label:
col = "0.3"
tmin = min(t1, t2)
tmax = max(t1, t2)
r1 = Rectangle((0, i), tmax, 1, facecolor=col)
r2 = Rectangle((tmin, i), tmax-tmin, 1, facecolor="none", edgecolor="w", hatch="//////")
plt.gca().add_artist(r1)
plt.gca().add_artist(r2)
tx = Text(-0.1, i+0.5, "%s" % label,
va="center", ha="right", clip_on=False)
plt.gca().add_artist(tx)
j += 1
i -= 1
font0 = FontProperties()
font0.set_weight("bold")
tx = Text(-0.1, i+0.6, "%iD" % dim,
fontproperties=font0, va="center", ha="right", clip_on=False)
plt.gca().add_artist(tx)
plt.ylim(0, i)
plt.xlim(0, 20)
plt.tick_params("y", left=False, labelleft=False)
plt.xlabel("fill time per random input value in nanoseconds (smaller is better)")
plt.savefig("fill_performance.svg")
plt.show()
| [
"collections.defaultdict",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"glob.glob",
"matplotlib.pyplot.tick_params",
"matplotlib.font_manager.FontProperties",
"matplotlib.patches.Rectangle",
"matplotlib.rcParams.update",
"os.path.exists",
"re.search",
"matplotlib.text.Text",
"matplotl... | [((353, 393), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (['mpl.rcParamsDefault'], {}), '(mpl.rcParamsDefault)\n', (372, 393), True, 'import matplotlib as mpl\n'), ((402, 426), 'collections.defaultdict', 'defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (413, 426), False, 'from collections import defaultdict, OrderedDict\n'), ((435, 459), 'glob.glob', 'glob.glob', (['"""fill_*.json"""'], {}), "('fill_*.json')\n", (444, 459), False, 'import glob\n'), ((974, 986), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (984, 986), True, 'import matplotlib.pyplot as plt\n'), ((990, 1021), 'os.path.exists', 'os.path.exists', (['"""/proc/cpuinfo"""'], {}), "('/proc/cpuinfo')\n", (1004, 1021), False, 'import os\n'), ((2427, 2441), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', 'i'], {}), '(0, i)\n', (2435, 2441), True, 'import matplotlib.pyplot as plt\n'), ((2442, 2457), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(20)'], {}), '(0, 20)\n', (2450, 2457), True, 'import matplotlib.pyplot as plt\n'), ((2459, 2508), 'matplotlib.pyplot.tick_params', 'plt.tick_params', (['"""y"""'], {'left': '(False)', 'labelleft': '(False)'}), "('y', left=False, labelleft=False)\n", (2474, 2508), True, 'import matplotlib.pyplot as plt\n'), ((2509, 2595), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""fill time per random input value in nanoseconds (smaller is better)"""'], {}), "(\n 'fill time per random input value in nanoseconds (smaller is better)')\n", (2519, 2595), True, 'import matplotlib.pyplot as plt\n'), ((2592, 2627), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fill_performance.svg"""'], {}), "('fill_performance.svg')\n", (2603, 2627), True, 'import matplotlib.pyplot as plt\n'), ((2628, 2638), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2636, 2638), True, 'import matplotlib.pyplot as plt\n'), ((1074, 1121), 're.search', 're.search', (['"""model name\\\\s*:\\\\s*(.+)\n"""', 'cpuinfo'], {}), "('model name\\\\s*:\\\\s*(.+)\\n', cpuinfo)\n", (1083, 1121), False, 'import re\n'), ((1222, 1235), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1233, 1235), False, 'from collections import defaultdict, OrderedDict\n'), ((2236, 2252), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {}), '()\n', (2250, 2252), False, 'from matplotlib.font_manager import FontProperties\n'), ((2291, 2390), 'matplotlib.text.Text', 'Text', (['(-0.1)', '(i + 0.6)', "('%iD' % dim)"], {'fontproperties': 'font0', 'va': '"""center"""', 'ha': '"""right"""', 'clip_on': '(False)'}), "(-0.1, i + 0.6, '%iD' % dim, fontproperties=font0, va='center', ha=\n 'right', clip_on=False)\n", (2295, 2390), False, 'from matplotlib.text import Text\n'), ((612, 652), 're.match', 're.match', (['"""fill_([0-9])d<([^>]+)>"""', 'name'], {}), "('fill_([0-9])d<([^>]+)>', name)\n", (620, 652), False, 'import re\n'), ((1857, 1898), 'matplotlib.patches.Rectangle', 'Rectangle', (['(0, i)', 'tmax', '(1)'], {'facecolor': 'col'}), '((0, i), tmax, 1, facecolor=col)\n', (1866, 1898), False, 'from matplotlib.patches import Rectangle\n'), ((1912, 2002), 'matplotlib.patches.Rectangle', 'Rectangle', (['(tmin, i)', '(tmax - tmin)', '(1)'], {'facecolor': '"""none"""', 'edgecolor': '"""w"""', 'hatch': '"""//////"""'}), "((tmin, i), tmax - tmin, 1, facecolor='none', edgecolor='w', hatch\n ='//////')\n", (1921, 2002), False, 'from matplotlib.patches import Rectangle\n'), ((2075, 2148), 'matplotlib.text.Text', 'Text', (['(-0.1)', '(i + 0.5)', "('%s' % label)"], {'va': '"""center"""', 'ha': '"""right"""', 'clip_on': '(False)'}), "(-0.1, i + 0.5, '%s' % label, va='center', ha='right', clip_on=False)\n", (2079, 2148), False, 'from matplotlib.text import Text\n'), ((2402, 2411), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2409, 2411), True, 'import matplotlib.pyplot as plt\n'), ((737, 772), 're.search', 're.search', (['"""fill_([a-z]+).json"""', 'fn'], {}), "('fill_([a-z]+).json', fn)\n", (746, 772), False, 'import re\n'), ((1562, 1587), 'numpy.array', 'np.array', (['(1.0, 0.0, 0.0)'], {}), '((1.0, 0.0, 0.0))\n', (1570, 1587), True, 'import numpy as np\n'), ((1609, 1634), 'numpy.array', 'np.array', (['(1.0, 1.0, 0.0)'], {}), '((1.0, 1.0, 0.0))\n', (1617, 1634), True, 'import numpy as np\n'), ((2004, 2013), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2011, 2013), True, 'import matplotlib.pyplot as plt\n'), ((2037, 2046), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2044, 2046), True, 'import matplotlib.pyplot as plt\n'), ((2173, 2182), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2180, 2182), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
from __future__ import print_function
from license_plate import LicensePlateDetector
from imutils import paths
import numpy as np
import argparse
import imutils
import cv2 as cv
import logging
__version__ = "1.0.0"
logging.basicConfig(level=logging.INFO,
format='[%(asctime)8s][%(filename)s][%(levelname)s] - %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
CONSOLE = logging.getLogger("dev")
CONSOLE.setLevel(logging.DEBUG)
CONSOLE.info("车牌识别 %s", __version__)
if "__main__" == __name__:
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--images", required=True, help="检测的图像的文件夹路径")
args = vars(ap.parse_args())
for image_path in list(paths.list_images(args["images"])):
image = cv.imread(image_path)
CONSOLE.info(image_path)
if 600 < image.shape[1]:
image = imutils.resize(image, width=600)
lpd = LicensePlateDetector(image)
plates = lpd.detect()
for (i, (lp, lp_box)) in enumerate(plates):
lp_box = np.array(lp_box).reshape((-1, 1, 2)).astype(np.int32)
cv.drawContours(image, [lp_box], -1, (0, 255, 0), 2)
candidates = np.dstack([lp.candidates] * 3)
thresh = np.dstack([lp.thresh] * 3)
output = np.vstack([lp.plate, thresh, candidates])
cv.imshow("Plate & Candidates #%d" % int(i + 1), output)
cv.imshow("Image", image)
cv.waitKey(0)
cv.destroyAllWindows()
| [
"numpy.dstack",
"imutils.paths.list_images",
"argparse.ArgumentParser",
"logging.basicConfig",
"license_plate.LicensePlateDetector",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.imread",
"numpy.vstack",
"numpy.array",
"imutils.resize",
"cv2.drawContours",
"cv2.imshow",
"logging.getLogger"
... | [((278, 428), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""[%(asctime)8s][%(filename)s][%(levelname)s] - %(message)s"""', 'datefmt': '"""%a, %d %b %Y %H:%M:%S"""'}), "(level=logging.INFO, format=\n '[%(asctime)8s][%(filename)s][%(levelname)s] - %(message)s', datefmt=\n '%a, %d %b %Y %H:%M:%S')\n", (297, 428), False, 'import logging\n'), ((472, 496), 'logging.getLogger', 'logging.getLogger', (['"""dev"""'], {}), "('dev')\n", (489, 496), False, 'import logging\n'), ((610, 635), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (633, 635), False, 'import argparse\n'), ((772, 805), 'imutils.paths.list_images', 'paths.list_images', (["args['images']"], {}), "(args['images'])\n", (789, 805), False, 'from imutils import paths\n'), ((825, 846), 'cv2.imread', 'cv.imread', (['image_path'], {}), '(image_path)\n', (834, 846), True, 'import cv2 as cv\n'), ((984, 1011), 'license_plate.LicensePlateDetector', 'LicensePlateDetector', (['image'], {}), '(image)\n', (1004, 1011), False, 'from license_plate import LicensePlateDetector\n'), ((1487, 1512), 'cv2.imshow', 'cv.imshow', (['"""Image"""', 'image'], {}), "('Image', image)\n", (1496, 1512), True, 'import cv2 as cv\n'), ((1522, 1535), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (1532, 1535), True, 'import cv2 as cv\n'), ((1545, 1567), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (1565, 1567), True, 'import cv2 as cv\n'), ((936, 968), 'imutils.resize', 'imutils.resize', (['image'], {'width': '(600)'}), '(image, width=600)\n', (950, 968), False, 'import imutils\n'), ((1185, 1237), 'cv2.drawContours', 'cv.drawContours', (['image', '[lp_box]', '(-1)', '(0, 255, 0)', '(2)'], {}), '(image, [lp_box], -1, (0, 255, 0), 2)\n', (1200, 1237), True, 'import cv2 as cv\n'), ((1264, 1294), 'numpy.dstack', 'np.dstack', (['([lp.candidates] * 3)'], {}), '([lp.candidates] * 3)\n', (1273, 1294), True, 'import numpy as np\n'), ((1317, 1343), 'numpy.dstack', 'np.dstack', (['([lp.thresh] * 3)'], {}), '([lp.thresh] * 3)\n', (1326, 1343), True, 'import numpy as np\n'), ((1366, 1407), 'numpy.vstack', 'np.vstack', (['[lp.plate, thresh, candidates]'], {}), '([lp.plate, thresh, candidates])\n', (1375, 1407), True, 'import numpy as np\n'), ((1118, 1134), 'numpy.array', 'np.array', (['lp_box'], {}), '(lp_box)\n', (1126, 1134), True, 'import numpy as np\n')] |
from keras.models import model_from_json
from keras.optimizers import RMSprop
import numpy as np
import csv
## Loads model weights.
with open('06.CIFAR-10_CombinedNet.config', 'r') as text_file:
json_config = text_file.read()
model = model_from_json(json_config)
model.load_weights('06.CIFAR-10_CombinedNet.weights')
print(model.summary())
## Read image
MNIST_dataset = np.load('06.Kaggle_CIFAR-10_test.npz')
x_test = MNIST_dataset['x_test']
x_test = x_test.astype('float32')/255.0
## Predict the x_test.
predictions = model.predict(x_test)
print('Prediction completed.')
predictions = np.argmax(predictions, axis=1)
## Save as CSV for submission.
with open('06.Kaggle_submission.csv', 'w', newline='') as csv_file:
print('Saving file...')
csv_writer = csv.writer(csv_file, delimiter=',')
# Define column name.
csv_writer.writerow(['id', 'label'])
for i in range(len(predictions)):
label = ''
if predictions[i] == 0:
label = 'airplane'
elif predictions[i] == 1:
label = 'automobile'
elif predictions[i] == 2:
label = 'bird'
elif predictions[i] == 3:
label = 'cat'
elif predictions[i] == 4:
label = 'deer'
elif predictions[i] == 5:
label = 'dog'
elif predictions[i] == 6:
label = 'frog'
elif predictions[i] == 7:
label = 'horse'
elif predictions[i] == 8:
label = 'ship'
elif predictions[i] == 9:
label = 'truck'
csv_writer.writerow([i+1, label])
print('File: 06.Kaggle_submission.csv Saved completed.')
| [
"numpy.load",
"keras.models.model_from_json",
"csv.writer",
"numpy.argmax"
] | [((384, 422), 'numpy.load', 'np.load', (['"""06.Kaggle_CIFAR-10_test.npz"""'], {}), "('06.Kaggle_CIFAR-10_test.npz')\n", (391, 422), True, 'import numpy as np\n'), ((601, 631), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (610, 631), True, 'import numpy as np\n'), ((243, 271), 'keras.models.model_from_json', 'model_from_json', (['json_config'], {}), '(json_config)\n', (258, 271), False, 'from keras.models import model_from_json\n'), ((777, 812), 'csv.writer', 'csv.writer', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (787, 812), False, 'import csv\n')] |
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('inputdir')
parser.add_argument("-p", dest='postfix', default='', help="plot postfix")
parser.add_argument("-c", dest='compare', default='stepwise_domain_adaptation', help="training to compare data and MC to")
args = parser.parse_args()
ntraingings = 5
#
# Losses
#
# pd.DataFrame(np.load( '../domada_50_epochs_newsample/domain_adaptation_two_samples/history.npy'))
da_history = pd.DataFrame(np.load('%s/%s/history.npy' % (args.inputdir,args.compare)))
data_history = pd.DataFrame(np.load('%s/data_training/history.npy' % args.inputdir))
mc_history = pd.DataFrame(np.load('%s/MC_training/history.npy' % args.inputdir))
fig = plt.figure()
dataonDA='$\\bf{data}$ on $\it{D.A.}$'
mconDA='$\\bf{mc}$ on $\it{D.A.}$'
dataonmc='$\\bf{data}$ on $\it{mc}$'
mconmc='$\\bf{mc}$ on $\it{mc}$'
dataondata='$\\bf{data}$ on $\it{data}$'
mcondata='$\\bf{mc}$ on $\it{data}$'
metaleg='$\\bf{sample}$\n$\it{training}$'
databtag='btag_discriminator_loss_2'
#'btag_discriminator_'
from mpl_toolkits.axes_grid.anchored_artists import AnchoredText
def textonly(ax, txt, fontsize = 10, loc = 2, *args, **kwargs):
at = AnchoredText(txt,
prop=dict(size=fontsize),
frameon=True,
loc=loc)
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at)
return at
def makeEpochPlot(idstring,fill):
if idstring == 'weighted_acc':
plt.ylim(0.4, 1.1)
else:
plt.ylim(0.2, 0.45)
nepochs=da_history['val_btag_discriminator_'+idstring+'_2_mean'].shape[0]
plt.plot(da_history['val_btag_discriminator_'+idstring+'_2_mean'],label=dataonDA, c='blue')
if fill:
plt.fill_between(
range(nepochs),
da_history['val_btag_discriminator_'+idstring+'_2_mean']-da_history['val_btag_discriminator_'+idstring+'_2_std'],
da_history['val_btag_discriminator_'+idstring+'_2_mean']+da_history['val_btag_discriminator_'+idstring+'_2_std'],
color='blue',
alpha=0.3
)
plt.plot(da_history['val_btag_discriminator_'+idstring+'_1_mean'],label=mconDA, c='green',linestyle=':')
if fill:
plt.fill_between(
range(nepochs),
da_history['val_btag_discriminator_'+idstring+'_1_mean']-da_history['val_btag_discriminator_'+idstring+'_1_std'],
da_history['val_btag_discriminator_'+idstring+'_1_mean']+da_history['val_btag_discriminator_'+idstring+'_1_std'],
color='green',
alpha=0.3
)
plt.plot(mc_history['val_btag_discriminator_'+idstring+'_2_mean'],label=dataonmc, c='red')
plt.plot(mc_history['val_btag_discriminator_'+idstring+'_1_mean'],label=mconmc, c='blueviolet',linestyle=':')
plt.plot(data_history['val_btag_discriminator_'+idstring+'_2_mean'],label=dataondata, c='orange')
plt.plot(data_history['val_btag_discriminator_'+idstring+'_1_mean'],label=mcondata, c='brown',linestyle=':')
if idstring == 'weighted_acc':
plt.plot(da_history['datamc_discriminator_'+idstring+'_1_mean'],label='data/mc discr', c='fuchsia',linestyle='--')
plt.ylabel(''+idstring+'')
plt.xlabel('epochs')
plt.legend(ncol=2, loc=1)#'best')
textonly(plt.gca(),metaleg,loc=3)
fig.savefig('%s/%s%s.png' % (args.inputdir, idstring, args.postfix))
fig.savefig('%s/%s%s.pdf' % (args.inputdir, idstring, args.postfix))
plt.clf()
makeEpochPlot('loss',True)
makeEpochPlot('weighted_acc',False)
from sklearn.metrics import roc_curve, roc_auc_score
from scipy.interpolate import InterpolatedUnivariateSpline
from pdb import set_trace
## pd.DataFrame(np.load( '../domada_50_epochs_newsample/domain_adaptation_two_samples/predictions.npy'))
da_predictions = pd.DataFrame(np.load('%s/%s/predictions.npy' % (args.inputdir, args.compare)))
data_predictions = pd.DataFrame(np.load('%s/data_training/predictions.npy' % args.inputdir))
mc_predictions = pd.DataFrame(np.load('%s/MC_training/predictions.npy' % args.inputdir))
def draw_roc(df, label, color, draw_unc=False, ls='-', draw_auc=True):
newx = np.logspace(-3, 0, 50)#arange(0,1,0.01)
tprs = pd.DataFrame()
scores = []
for idx in range(ntraingings):
tmp_fpr, tmp_tpr, _ = roc_curve(df.isB, df['prediction_%d' % idx])
scores.append(
roc_auc_score(df.isB, df['prediction_%d' % idx])
)
coords = pd.DataFrame()
coords['fpr'] = tmp_fpr
coords['tpr'] = tmp_tpr
clean = coords.drop_duplicates(subset=['fpr'])
spline = InterpolatedUnivariateSpline(clean.fpr, clean.tpr,k=1)
tprs[idx] = spline(newx)
scores = np.array(scores)
auc = ' AUC: %.3f +/- %.3f' % (scores.mean(), scores.std()) if draw_auc else ''
if draw_unc:
plt.fill_between(
newx,
tprs.mean(axis=1) - tprs.std(axis=1),
tprs.mean(axis=1) + tprs.std(axis=1),
color=color,
alpha=0.3
)
plt.plot(newx, tprs.mean(axis=1), label=label + auc, c=color, ls=ls)
plt.clf()
draw_roc(
da_predictions[da_predictions.isMC == 0],
dataonDA,
'blue',
draw_unc = True,
draw_auc=True,
)
draw_roc(
da_predictions[da_predictions.isMC == 1],
mconDA,
'green',
draw_unc = True,
draw_auc=True,
ls=':'
)
draw_roc(
mc_predictions[mc_predictions.isMC == 0],
dataonmc, 'red', draw_auc=True
)
draw_roc(
mc_predictions[mc_predictions.isMC == 1],
mconmc, 'blueviolet', draw_auc=True, ls=':'
)
draw_roc(
data_predictions[data_predictions.isMC == 0],
dataondata, 'orange', draw_auc=True
)
draw_roc(
data_predictions[data_predictions.isMC == 1],
mcondata, 'brown', draw_auc=True, ls=':'
)
plt.xlim(0., 1)
plt.ylim(0.45, 1)
plt.grid(True)
plt.ylabel('true positive rate')
plt.xlabel('false positive rate')
plt.legend(loc='best')
textonly(plt.gca(),metaleg,loc=3)
fig.savefig('%s/rocs%s.png' % (args.inputdir, args.postfix))
fig.savefig('%s/rocs%s.pdf' % (args.inputdir, args.postfix))
plt.xlim(10**-3, 1)
plt.ylim(0.3, 1)
plt.gca().set_xscale('log')
fig.savefig('%s/rocs_log%s.png' % (args.inputdir, args.postfix))
fig.savefig('%s/rocs_log%s.pdf' % (args.inputdir, args.postfix))
def plot_discriminator(df, name):
plt.clf()
plt.hist(
[df[df.isB == 1].prediction_mean, df[df.isB == 0].prediction_mean],
bins = 50, range=(0, 1.), histtype='bar', stacked=True,
color=['green', 'blue'], label=['B jets', 'light jets']
)
plt.ylabel('occurrences')
plt.xlabel('NN output (averaged)')
plt.legend(loc='best')
fig.savefig('%s/%s%s.png' % (args.inputdir, name, args.postfix))
fig.savefig('%s/%s%s.pdf' % (args.inputdir, name, args.postfix))
plot_discriminator(da_predictions[da_predictions.isMC == 1], 'nn_out_da_mc')
plot_discriminator(da_predictions[da_predictions.isMC == 0], 'nn_out_da_data')
plot_discriminator(data_predictions[data_predictions.isMC == 1], 'nn_out_dataTraining_mc')
plot_discriminator(data_predictions[data_predictions.isMC == 0], 'nn_out_dataTraining_data')
plot_discriminator(mc_predictions[mc_predictions.isMC == 1], 'nn_out_mcTraining_mc')
plot_discriminator(mc_predictions[mc_predictions.isMC == 0], 'nn_out_mcTraining_data')
| [
"numpy.load",
"argparse.ArgumentParser",
"matplotlib.pyplot.clf",
"numpy.logspace",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"pandas.DataFrame",
"scipy.interpolate.InterpolatedUnivariateSpline",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"sklearn.metrics.roc_auc_score",
... | [((57, 78), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (71, 78), False, 'import matplotlib\n'), ((157, 173), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (171, 173), False, 'from argparse import ArgumentParser\n'), ((829, 841), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (839, 841), True, 'import matplotlib.pyplot as plt\n'), ((4893, 4902), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4900, 4902), True, 'import matplotlib.pyplot as plt\n'), ((5521, 5537), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.0)', '(1)'], {}), '(0.0, 1)\n', (5529, 5537), True, 'import matplotlib.pyplot as plt\n'), ((5537, 5554), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.45)', '(1)'], {}), '(0.45, 1)\n', (5545, 5554), True, 'import matplotlib.pyplot as plt\n'), ((5555, 5569), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5563, 5569), True, 'import matplotlib.pyplot as plt\n'), ((5570, 5602), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""true positive rate"""'], {}), "('true positive rate')\n", (5580, 5602), True, 'import matplotlib.pyplot as plt\n'), ((5603, 5636), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""false positive rate"""'], {}), "('false positive rate')\n", (5613, 5636), True, 'import matplotlib.pyplot as plt\n'), ((5637, 5659), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (5647, 5659), True, 'import matplotlib.pyplot as plt\n'), ((5817, 5838), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(10 ** -3)', '(1)'], {}), '(10 ** -3, 1)\n', (5825, 5838), True, 'import matplotlib.pyplot as plt\n'), ((5837, 5853), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.3)', '(1)'], {}), '(0.3, 1)\n', (5845, 5853), True, 'import matplotlib.pyplot as plt\n'), ((593, 653), 'numpy.load', 'np.load', (["('%s/%s/history.npy' % (args.inputdir, args.compare))"], {}), "('%s/%s/history.npy' % (args.inputdir, args.compare))\n", (600, 653), True, 'import numpy as np\n'), ((682, 737), 'numpy.load', 'np.load', (["('%s/data_training/history.npy' % args.inputdir)"], {}), "('%s/data_training/history.npy' % args.inputdir)\n", (689, 737), True, 'import numpy as np\n'), ((767, 820), 'numpy.load', 'np.load', (["('%s/MC_training/history.npy' % args.inputdir)"], {}), "('%s/MC_training/history.npy' % args.inputdir)\n", (774, 820), True, 'import numpy as np\n'), ((1720, 1820), 'matplotlib.pyplot.plot', 'plt.plot', (["da_history['val_btag_discriminator_' + idstring + '_2_mean']"], {'label': 'dataonDA', 'c': '"""blue"""'}), "(da_history['val_btag_discriminator_' + idstring + '_2_mean'],\n label=dataonDA, c='blue')\n", (1728, 1820), True, 'import matplotlib.pyplot as plt\n'), ((2134, 2248), 'matplotlib.pyplot.plot', 'plt.plot', (["da_history['val_btag_discriminator_' + idstring + '_1_mean']"], {'label': 'mconDA', 'c': '"""green"""', 'linestyle': '""":"""'}), "(da_history['val_btag_discriminator_' + idstring + '_1_mean'],\n label=mconDA, c='green', linestyle=':')\n", (2142, 2248), True, 'import matplotlib.pyplot as plt\n'), ((2564, 2663), 'matplotlib.pyplot.plot', 'plt.plot', (["mc_history['val_btag_discriminator_' + idstring + '_2_mean']"], {'label': 'dataonmc', 'c': '"""red"""'}), "(mc_history['val_btag_discriminator_' + idstring + '_2_mean'],\n label=dataonmc, c='red')\n", (2572, 2663), True, 'import matplotlib.pyplot as plt\n'), ((2656, 2775), 'matplotlib.pyplot.plot', 'plt.plot', (["mc_history['val_btag_discriminator_' + idstring + '_1_mean']"], {'label': 'mconmc', 'c': '"""blueviolet"""', 'linestyle': '""":"""'}), "(mc_history['val_btag_discriminator_' + idstring + '_1_mean'],\n label=mconmc, c='blueviolet', linestyle=':')\n", (2664, 2775), True, 'import matplotlib.pyplot as plt\n'), ((2767, 2873), 'matplotlib.pyplot.plot', 'plt.plot', (["data_history['val_btag_discriminator_' + idstring + '_2_mean']"], {'label': 'dataondata', 'c': '"""orange"""'}), "(data_history['val_btag_discriminator_' + idstring + '_2_mean'],\n label=dataondata, c='orange')\n", (2775, 2873), True, 'import matplotlib.pyplot as plt\n'), ((2866, 2984), 'matplotlib.pyplot.plot', 'plt.plot', (["data_history['val_btag_discriminator_' + idstring + '_1_mean']"], {'label': 'mcondata', 'c': '"""brown"""', 'linestyle': '""":"""'}), "(data_history['val_btag_discriminator_' + idstring + '_1_mean'],\n label=mcondata, c='brown', linestyle=':')\n", (2874, 2984), True, 'import matplotlib.pyplot as plt\n'), ((3129, 3159), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('' + idstring + '')"], {}), "('' + idstring + '')\n", (3139, 3159), True, 'import matplotlib.pyplot as plt\n'), ((3157, 3177), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (3167, 3177), True, 'import matplotlib.pyplot as plt\n'), ((3179, 3204), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'ncol': '(2)', 'loc': '(1)'}), '(ncol=2, loc=1)\n', (3189, 3204), True, 'import matplotlib.pyplot as plt\n'), ((3389, 3398), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3396, 3398), True, 'import matplotlib.pyplot as plt\n'), ((3745, 3809), 'numpy.load', 'np.load', (["('%s/%s/predictions.npy' % (args.inputdir, args.compare))"], {}), "('%s/%s/predictions.npy' % (args.inputdir, args.compare))\n", (3752, 3809), True, 'import numpy as np\n'), ((3843, 3902), 'numpy.load', 'np.load', (["('%s/data_training/predictions.npy' % args.inputdir)"], {}), "('%s/data_training/predictions.npy' % args.inputdir)\n", (3850, 3902), True, 'import numpy as np\n'), ((3936, 3993), 'numpy.load', 'np.load', (["('%s/MC_training/predictions.npy' % args.inputdir)"], {}), "('%s/MC_training/predictions.npy' % args.inputdir)\n", (3943, 3993), True, 'import numpy as np\n'), ((4075, 4097), 'numpy.logspace', 'np.logspace', (['(-3)', '(0)', '(50)'], {}), '(-3, 0, 50)\n', (4086, 4097), True, 'import numpy as np\n'), ((4125, 4139), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4137, 4139), True, 'import pandas as pd\n'), ((4558, 4574), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (4566, 4574), True, 'import numpy as np\n'), ((5669, 5678), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5676, 5678), True, 'import matplotlib.pyplot as plt\n'), ((6049, 6058), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6056, 6058), True, 'import matplotlib.pyplot as plt\n'), ((6060, 6256), 'matplotlib.pyplot.hist', 'plt.hist', (['[df[df.isB == 1].prediction_mean, df[df.isB == 0].prediction_mean]'], {'bins': '(50)', 'range': '(0, 1.0)', 'histtype': '"""bar"""', 'stacked': '(True)', 'color': "['green', 'blue']", 'label': "['B jets', 'light jets']"}), "([df[df.isB == 1].prediction_mean, df[df.isB == 0].prediction_mean],\n bins=50, range=(0, 1.0), histtype='bar', stacked=True, color=['green',\n 'blue'], label=['B jets', 'light jets'])\n", (6068, 6256), True, 'import matplotlib.pyplot as plt\n'), ((6261, 6286), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""occurrences"""'], {}), "('occurrences')\n", (6271, 6286), True, 'import matplotlib.pyplot as plt\n'), ((6288, 6322), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""NN output (averaged)"""'], {}), "('NN output (averaged)')\n", (6298, 6322), True, 'import matplotlib.pyplot as plt\n'), ((6324, 6346), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (6334, 6346), True, 'import matplotlib.pyplot as plt\n'), ((1592, 1610), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.4)', '(1.1)'], {}), '(0.4, 1.1)\n', (1600, 1610), True, 'import matplotlib.pyplot as plt\n'), ((1620, 1639), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.2)', '(0.45)'], {}), '(0.2, 0.45)\n', (1628, 1639), True, 'import matplotlib.pyplot as plt\n'), ((3011, 3136), 'matplotlib.pyplot.plot', 'plt.plot', (["da_history['datamc_discriminator_' + idstring + '_1_mean']"], {'label': '"""data/mc discr"""', 'c': '"""fuchsia"""', 'linestyle': '"""--"""'}), "(da_history['datamc_discriminator_' + idstring + '_1_mean'], label=\n 'data/mc discr', c='fuchsia', linestyle='--')\n", (3019, 3136), True, 'import matplotlib.pyplot as plt\n'), ((3223, 3232), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3230, 3232), True, 'import matplotlib.pyplot as plt\n'), ((4209, 4253), 'sklearn.metrics.roc_curve', 'roc_curve', (['df.isB', "df['prediction_%d' % idx]"], {}), "(df.isB, df['prediction_%d' % idx])\n", (4218, 4253), False, 'from sklearn.metrics import roc_curve, roc_auc_score\n'), ((4339, 4353), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4351, 4353), True, 'import pandas as pd\n'), ((4466, 4521), 'scipy.interpolate.InterpolatedUnivariateSpline', 'InterpolatedUnivariateSpline', (['clean.fpr', 'clean.tpr'], {'k': '(1)'}), '(clean.fpr, clean.tpr, k=1)\n', (4494, 4521), False, 'from scipy.interpolate import InterpolatedUnivariateSpline\n'), ((5854, 5863), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5861, 5863), True, 'import matplotlib.pyplot as plt\n'), ((4274, 4322), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['df.isB', "df['prediction_%d' % idx]"], {}), "(df.isB, df['prediction_%d' % idx])\n", (4287, 4322), False, 'from sklearn.metrics import roc_curve, roc_auc_score\n')] |
from pathlib import Path
import sys
path = str(Path(Path(__file__).parent.absolute()).parent.absolute())
sys.path.insert(0, path)
from sklearn.cluster import DBSCAN
from sklearn.decomposition import PCA
from sklearn.metrics import accuracy_score, adjusted_rand_score
from sklearn.preprocessing import StandardScaler
from tabulate import tabulate
from mnist_utils.util import _x, _y_int
import math
import numpy as np
import random as rand
#global vars
clustering = None
label_count = len(np.unique(_y_int))
slice_size = 6000
def classify_clusters(l1, l2):
ref_labels = {}
m = np.unique(l1).max() + 1
for i in range(l1.size):
if l1[i] == -1:
l1[i] = m
for i in range(len(np.unique(l1))):
index = np.where(l1 == i,1,0)
temp = np.bincount(l2[index==1])
ref_labels[i] = temp.argmax()
decimal_labels = np.zeros(len(l1))
for i in range(len(l1)):
decimal_labels[i] = ref_labels[l1[i]]
return decimal_labels
def init_clustring_scikit(epsilon=2, min_samples=2):
global clustering, slice_size
indexes = np.random.choice(len(_x), size=slice_size, replace=False)
clustering = DBSCAN(eps=epsilon, min_samples=min_samples)
scaler = StandardScaler()
x_train = scaler.fit_transform(_x[indexes])
clustering.fit(x_train)
print(clustering.labels_)
return _y_int[indexes]
def test_accuracy_scikit(labels):
global clustering
core_samples_mask = np.zeros_like(clustering.labels_, dtype=bool)
core_samples_mask[clustering.core_sample_indices_] = True
decimal_labels = classify_clusters(clustering.labels_, labels)
print("predicted labels:\t", decimal_labels[:16].astype('int'))
print("true labels:\t\t", labels[:16])
print(60 * '_')
AP = accuracy_score(decimal_labels,labels)
RI = adjusted_rand_score(decimal_labels,labels)
print("Accuracy (PURITY):" , AP)
print("Accuracy (RAND INDEX):" , RI)
return AP, RI, len(np.unique(clustering.labels_))
def pipeline(epsilon_max=50, min_samples_max=50, coefficient=2):
epsilon = 1
min_samples = 1
result = []
AP = None
RI = None
while epsilon <= epsilon_max:
while min_samples <= min_samples_max:
print(10 * "*" + "TRYING WITH " + str(epsilon) + " " + str(min_samples) + 10 * "*")
labels = init_clustring_scikit(epsilon, min_samples)
AP, RI, n= test_accuracy_scikit(labels)
result.append([epsilon, min_samples, AP, RI, n])
min_samples *= coefficient
min_samples = math.ceil(min_samples)
min_samples = 1
epsilon *= coefficient
epsilon = math.ceil(epsilon)
print(tabulate(result, headers=['epsilon', 'min_samples', 'AP', 'RI', 'Cluster Count']))
pipeline(coefficient=1.2)
| [
"numpy.zeros_like",
"sklearn.preprocessing.StandardScaler",
"math.ceil",
"sklearn.metrics.accuracy_score",
"numpy.unique",
"sys.path.insert",
"pathlib.Path",
"numpy.where",
"tabulate.tabulate",
"sklearn.metrics.adjusted_rand_score",
"numpy.bincount",
"sklearn.cluster.DBSCAN"
] | [((105, 129), 'sys.path.insert', 'sys.path.insert', (['(0)', 'path'], {}), '(0, path)\n', (120, 129), False, 'import sys\n'), ((489, 506), 'numpy.unique', 'np.unique', (['_y_int'], {}), '(_y_int)\n', (498, 506), True, 'import numpy as np\n'), ((1158, 1202), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': 'epsilon', 'min_samples': 'min_samples'}), '(eps=epsilon, min_samples=min_samples)\n', (1164, 1202), False, 'from sklearn.cluster import DBSCAN\n'), ((1216, 1232), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1230, 1232), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1447, 1492), 'numpy.zeros_like', 'np.zeros_like', (['clustering.labels_'], {'dtype': 'bool'}), '(clustering.labels_, dtype=bool)\n', (1460, 1492), True, 'import numpy as np\n'), ((1762, 1800), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['decimal_labels', 'labels'], {}), '(decimal_labels, labels)\n', (1776, 1800), False, 'from sklearn.metrics import accuracy_score, adjusted_rand_score\n'), ((1809, 1852), 'sklearn.metrics.adjusted_rand_score', 'adjusted_rand_score', (['decimal_labels', 'labels'], {}), '(decimal_labels, labels)\n', (1828, 1852), False, 'from sklearn.metrics import accuracy_score, adjusted_rand_score\n'), ((740, 763), 'numpy.where', 'np.where', (['(l1 == i)', '(1)', '(0)'], {}), '(l1 == i, 1, 0)\n', (748, 763), True, 'import numpy as np\n'), ((777, 804), 'numpy.bincount', 'np.bincount', (['l2[index == 1]'], {}), '(l2[index == 1])\n', (788, 804), True, 'import numpy as np\n'), ((2645, 2663), 'math.ceil', 'math.ceil', (['epsilon'], {}), '(epsilon)\n', (2654, 2663), False, 'import math\n'), ((2674, 2759), 'tabulate.tabulate', 'tabulate', (['result'], {'headers': "['epsilon', 'min_samples', 'AP', 'RI', 'Cluster Count']"}), "(result, headers=['epsilon', 'min_samples', 'AP', 'RI',\n 'Cluster Count'])\n", (2682, 2759), False, 'from tabulate import tabulate\n'), ((707, 720), 'numpy.unique', 'np.unique', (['l1'], {}), '(l1)\n', (716, 720), True, 'import numpy as np\n'), ((1953, 1982), 'numpy.unique', 'np.unique', (['clustering.labels_'], {}), '(clustering.labels_)\n', (1962, 1982), True, 'import numpy as np\n'), ((2549, 2571), 'math.ceil', 'math.ceil', (['min_samples'], {}), '(min_samples)\n', (2558, 2571), False, 'import math\n'), ((585, 598), 'numpy.unique', 'np.unique', (['l1'], {}), '(l1)\n', (594, 598), True, 'import numpy as np\n'), ((52, 66), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (56, 66), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#-------------------------------------------------------------------------------
'''
@Author : {SEASON}
@License : (C) Copyright 2013-2022, {OLD_IT_WANG}
@Contact : {<EMAIL>}
@Software: PyCharm
@File : NLP_DEMO -- semantic network
@Time : 2020/6/2 10:51
@Desc :
'''
#-------------------------------------------------------------------------------
import re # 正则表达式库
import jieba #分词
import collections # 词频统计库
import numpy as np
import pandas as pd
import networkx as nx #复杂网络分析库
import matplotlib.pyplot as plt
# 图中节点个数
num=20
G=nx.Graph()
plt.figure(figsize=(100,70))
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
# 读取文件
fn = open(r'../../blog/1000.txt',encoding='utf-8') # 打开文件
string_data = fn.read() # 读出整个文件
fn.close() # 关闭文件
# 文本预处理
pattern = re.compile(u'\t|\.|-|:|;|\)|\(|\?|"') # 定义正则表达式匹配模式
string_data = re.sub(pattern, '', string_data) # 将符合模式的字符去除
# 文本分词
seg_list_exact = jieba.cut(string_data, cut_all = False) # 精确模式分词
object_list = []
remove_words = list(open('../stopwords/stopwords.txt','r',encoding='utf-8').readlines()) # 自定义去除词库
stop_words = [x.replace('\n','') for x in remove_words ]
# stop_words = []
# for x in remove_words:
# x = x.replace('\n', '')
# stop_words.append(x)
stop_words.append('''\n''')
stop_words.append('')
stop_words.append(u'\xa0')
stop_words.append(' ')
stop_words = set(stop_words)
print(stop_words)
for word in seg_list_exact: # 循环读出每个分词
if word not in stop_words : # 如果不在去除词库中
object_list.append(word) # 分词追加到列表
# 词频统计
word_counts = collections.Counter(object_list) # 对分词做词频统计
word_counts_top = word_counts.most_common(num) # 获取最高频的词
word = pd.DataFrame(word_counts_top, columns=['关键词','次数'])
print(word)
print('词频统计完成--------------')
word_T = pd.DataFrame(word.values.T,columns=word.iloc[:,0])
net = pd.DataFrame(np.mat(np.zeros((num,num))),columns=word.iloc[:,0])
k = 0
#构建语义关联矩阵
for i in range(len(string_data)):
if string_data[i] == '\n': #根据换行符读取一段文字
seg_list_exact = jieba.cut(string_data[k:i], cut_all = False) # 精确模式分词
object_list2 = []
for words in seg_list_exact: # 循环读出每个分词
if words not in stop_words: # 如果不在去除词库中
object_list2.append(words) # 分词追加到列表
if len(object_list2)==0:## 跳过空段落
continue
word_counts2 = collections.Counter(object_list2)
word_counts_top2 = word_counts2.most_common(num) # 获取该段最高频的词
word2 = pd.DataFrame(word_counts_top2)
word2_T = pd.DataFrame(word2.values.T,columns=word2.iloc[:,0])
relation = list(0 for x in range(num))
# 查看该段最高频的词是否在总的最高频的词列表中
for j in range(num):
for p in range(len(word2)):
if word.iloc[j,0] == word2.iloc[p,0]:
relation[j] = 1
break
#对于同段落内出现的最高频词,根据其出现次数加到语义关联矩阵的相应位置
for j in range(num):
if relation[j] == 1:
for q in range(num):
if relation[q] == 1:
net.iloc[j, q] = net.iloc[j, q] + word2_T.loc[1, word_T.iloc[0, q]]
k = i + 1
# 处理最后一段内容,完成语义关联矩阵的构建
print('关联矩阵构造完成--------------')
n = len(word)
# 边的起点,终点,权重
for i in range(n):
for j in range(i, n):
G.add_weighted_edges_from([(word.iloc[i, 0], word.iloc[j, 0], net.iloc[i, j])])
print(G.edges())
print('开始进行绘制--------------')
my_width = [float(v['weight'] / 3) for (r, c, v) in G.edges(data=True)]
my_node_size=[float(net.iloc[i, i] * 1) for i in np.arange(num)]
print(my_node_size)
print(len(my_width))
print(len(my_node_size))
print(len(G.nodes()))
nx.draw_networkx(G,
pos=nx.spring_layout(G),
# 根据权重的大小,设置线的粗细
width=my_width,
edge_color='orange',
# 根据词出现的次数,设置点的大小
node_size=my_node_size,
node_color='black'
)
#plt.axis('off')
#plt.title('助攻表现(常规赛)',fontstyle='oblique')
plt.savefig('test3.png')
#plt.show()
| [
"pandas.DataFrame",
"jieba.cut",
"numpy.zeros",
"matplotlib.pyplot.figure",
"networkx.spring_layout",
"networkx.Graph",
"numpy.arange",
"collections.Counter",
"re.sub",
"matplotlib.pyplot.savefig",
"re.compile"
] | [((606, 616), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (614, 616), True, 'import networkx as nx\n'), ((618, 647), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(100, 70)'}), '(figsize=(100, 70))\n', (628, 647), True, 'import matplotlib.pyplot as plt\n'), ((900, 941), 're.compile', 're.compile', (['u"""\t|\\\\.|-|:|;|\\\\)|\\\\(|\\\\?|\\""""'], {}), '(u\'\\t|\\\\.|-|:|;|\\\\)|\\\\(|\\\\?|"\')\n', (910, 941), False, 'import re\n'), ((966, 998), 're.sub', 're.sub', (['pattern', '""""""', 'string_data'], {}), "(pattern, '', string_data)\n", (972, 998), False, 'import re\n'), ((1037, 1074), 'jieba.cut', 'jieba.cut', (['string_data'], {'cut_all': '(False)'}), '(string_data, cut_all=False)\n', (1046, 1074), False, 'import jieba\n'), ((1657, 1689), 'collections.Counter', 'collections.Counter', (['object_list'], {}), '(object_list)\n', (1676, 1689), False, 'import collections\n'), ((1765, 1817), 'pandas.DataFrame', 'pd.DataFrame', (['word_counts_top'], {'columns': "['关键词', '次数']"}), "(word_counts_top, columns=['关键词', '次数'])\n", (1777, 1817), True, 'import pandas as pd\n'), ((1870, 1922), 'pandas.DataFrame', 'pd.DataFrame', (['word.values.T'], {'columns': 'word.iloc[:, 0]'}), '(word.values.T, columns=word.iloc[:, 0])\n', (1882, 1922), True, 'import pandas as pd\n'), ((4111, 4135), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""test3.png"""'], {}), "('test3.png')\n", (4122, 4135), True, 'import matplotlib.pyplot as plt\n'), ((1947, 1967), 'numpy.zeros', 'np.zeros', (['(num, num)'], {}), '((num, num))\n', (1955, 1967), True, 'import numpy as np\n'), ((2113, 2155), 'jieba.cut', 'jieba.cut', (['string_data[k:i]'], {'cut_all': '(False)'}), '(string_data[k:i], cut_all=False)\n', (2122, 2155), False, 'import jieba\n'), ((2435, 2468), 'collections.Counter', 'collections.Counter', (['object_list2'], {}), '(object_list2)\n', (2454, 2468), False, 'import collections\n'), ((2555, 2585), 'pandas.DataFrame', 'pd.DataFrame', (['word_counts_top2'], {}), '(word_counts_top2)\n', (2567, 2585), True, 'import pandas as pd\n'), ((2604, 2658), 'pandas.DataFrame', 'pd.DataFrame', (['word2.values.T'], {'columns': 'word2.iloc[:, 0]'}), '(word2.values.T, columns=word2.iloc[:, 0])\n', (2616, 2658), True, 'import pandas as pd\n'), ((3622, 3636), 'numpy.arange', 'np.arange', (['num'], {}), '(num)\n', (3631, 3636), True, 'import numpy as np\n'), ((3772, 3791), 'networkx.spring_layout', 'nx.spring_layout', (['G'], {}), '(G)\n', (3788, 3791), True, 'import networkx as nx\n')] |
"""
Test file for analysis of pNEUMA data
"""
from pneumapackage.settings import *
import pneumapackage.compute as cp
from pneumapackage.__init__ import read_pickle, write_pickle, path_data, path_results
import pneumapackage.iodata as rd
import test_network as tn
import test_data as td
import numpy as np
import pandas as pd
import leuvenmapmatching.util.dist_euclidean as distxy
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from tqdm.contrib import tenumerate
from tqdm import tqdm
import os
"""
Up until now we have list of dataframes, trajectories, with a column with the matched edge in the extracted OSM network
The line trajectories can be used to count vehicles crossing specific locations in the network, keeping the individual
information for more specific aggregations afterwards (augmented loop detector data)
Place detectors on the edges in the used network and select specific edges
--> using Qgis to select manually the needed edge ids
Input parameters:
- 20 m = width of detector edges, make sure they span the whole road
- 10 m = distance from intersection
- True = place double virtual loops
- 1 m = loop distance
- 2 = number of detectors on every link
"""
def test_crossings(line_traj, df_det, **kwargs):
df_crossings = cp.vehicle_crossings(line_traj, df_det, **kwargs)
return df_crossings
def get_traj_crossings(track, df_crossings):
df_crossings = df_crossings[~df_crossings[track].isna()][track]
return df_crossings
def get_vehicle_types(group_id):
pid, _, _ = td.get_hdf_names(group_id)
hdf_path = rd.get_hdf_path()
vehicle_types = rd.get_from_hdf(hdf_path, key_id=pid, result='ids')
vehicle_types = vehicle_types.loc[:, ['track_id', 'type']]
return vehicle_types
def case_configuration(group_id, det_obj, edges):
"""
Get all the needed information of the detectors for the chosen edges, as well as only those trajectories that map
onto one of the edges.
Parameters
----------
group_id
det_obj
edges
Returns
-------
"""
ds = det_obj.detector_selection(edges)
id_ft_pan = list(set(det_obj.features.index.get_level_values(0)) & set(edges))
id_ft_pan.sort()
ds_ft = det_obj.features.loc[(id_ft_pan,)]
ds_ft.attrs = det_obj.features.attrs
lt = td.get_lt(group_id=group_id, edges=edges, gdf=True)
return ds, ds_ft, lt
def edges_crossings(group_id, crossing_edges, case_number=1, det_obj=None, bearing_difference=90, strict_match=True,
folder=path_results):
dataset_name = rd.get_path_dict()['groups'][group_id].replace('/', '_')
try:
df_ft = read_pickle(f'features_crossing_{dataset_name}_bd{bearing_difference}_case{case_number}',
os.path.join(folder, 'crossings'))
df_det = read_pickle(f'detectors_crossing_{dataset_name}_bd{bearing_difference}_case{case_number}',
os.path.join(folder, 'crossings'))
except FileNotFoundError:
if det_obj is None:
det_obj = tn.test_detectors(tn.test_network(), path_data)
ds, ds_ft, lt = case_configuration(group_id, det_obj, edges=crossing_edges)
# Determine crossings
df_ft = cp.vehicle_crossings(lt, ds_ft, bearing_difference=bearing_difference, strict_match=strict_match)
df_det = cp.vehicle_crossings(lt, ds, bearing_difference=bearing_difference, strict_match=strict_match)
write_pickle(df_ft, f'features_crossing_{dataset_name}_bd{bearing_difference}_case{case_number}',
os.path.join(folder, 'crossings'))
write_pickle(df_det, f'detectors_crossing_{dataset_name}_bd{bearing_difference}_case{case_number}',
os.path.join(folder, 'crossings'))
return df_ft, df_det, dataset_name
def signal_timings(df_crossings, time_rows=('t1', 't2'), time_step=1000):
df_det = df_crossings.sort_index()
df_det = df_det.reset_index()
df_sel = df_det.loc[df_det['detector'].isin(list(time_rows))]
df_sel.set_index(['edge', 'detector'], inplace=True)
df_sel = df_sel.transpose()
max_time = int(max(df_sel.max()) + time_step)
df_cycle = {'time_step': [], 'passing': []}
for t in tqdm(range(time_step, max_time, time_step)):
df = df_sel[(df_sel >= (t - time_step)) & (df_sel < t)]
df_cycle['time_step'].append(t), df_cycle['passing'].append(df.count().values)
df_cycle = pd.DataFrame(df_cycle['passing'], index=df_cycle['time_step'], columns=df_sel.columns)
df_cycle.index.name = 'time_step'
# df_cycle = df_st.mask(df_st > 0, 1)
# df_cum = df_st.cumsum()
return df_cycle
def cycle_times(df_cycle, edge, column=None, thresh_filter=10000, filter_step=3, thresh=5000):
if column is None:
column = 't2'
tmp = df_cycle.loc[:, edge].copy()
tmp.loc[:, 'green'] = 0
tmp.loc[:, 'edge'] = edge
step = list(set(np.diff(tmp.index)))[0]
tmp2 = tmp.loc[list(set(np.r_[tmp[tmp[column] > 0].index, tmp.index[0], tmp.index[-1]]))].copy()
tmp2.sort_index(inplace=True)
tmp2.reset_index(inplace=True)
tmp2.loc[:, 'filter_b'] = tmp2.loc[:, 'time_step'].diff(filter_step)
tmp2.loc[:, 'filter_a'] = abs(tmp2.loc[:, 'time_step'].diff(-filter_step))
filter_index = tmp2.loc[(tmp2.filter_b > thresh_filter) & (tmp2.filter_a > thresh_filter)].index
tmp2.loc[filter_index, 'filter_b'] = 0
tmp2.loc[filter_index, 'filter_a'] = 0
tmp2 = tmp2[~tmp2.index.isin(tmp2[(tmp2.filter_a == 0) & (tmp2.filter_b == 0)].index)]
tmp2.loc[:, 'before'] = tmp2.loc[:, 'time_step'].diff(1)
tmp2.loc[:, 'after'] = abs(tmp2.loc[:, 'time_step'].diff(-1))
tmp2.loc[tmp2.before <= thresh, 'before'] = 0
tmp2.loc[tmp2.after <= thresh, 'after'] = 0
tmp2 = tmp2.loc[tmp2[column] > 0]
tmp2.loc[:, 'green_start'] = 0
tmp2.loc[:, 'green_end'] = 0
tmp2.loc[tmp2.before > 0, 'green_start'] = 1
tmp2.loc[tmp2.after > 0, 'green_end'] = 1
tmp2 = tmp2[tmp2.index.isin(tmp2[(tmp2.green_start > 0) | (tmp2.green_end > 0)].index)]
if len(tmp2.loc[(tmp2.green_start > 0) & (tmp2.green_end > 0)]):
print('Adjust filters')
raise ValueError('Invalid instances detected')
tmp2 = tmp2[~tmp2.index.isin(tmp2[(tmp2.green_start > 0) & (tmp2.green_end > 0)].index)]
tmp2.set_index('time_step', inplace=True)
tmp2.loc[:, 'green_time'] = 0
tmp2.loc[:, 'red_time'] = tmp2.before
index_greens = []
ls_tmp = []
row = 0
for i, j in tmp2.iterrows():
if row == 0:
if j['green_end'] > 0:
index_greens.extend(np.arange(tmp.index[0], i + step, step).tolist())
tmp2.loc[i, 'green_time'] = i - tmp.index[0] - step
else:
ls_tmp.append(i)
row += 1
elif row == len(tmp2) - 1:
if j['green_start'] > 0:
index_greens.extend(np.arange(i, tmp.index[-1] + step, step).tolist())
tmp2.loc[i, 'green_time'] = tmp.index[-1] - i
else:
ls_tmp.append(i)
index_greens.extend(np.arange(ls_tmp[0], ls_tmp[1] + step, step).tolist())
tmp2.loc[i, 'green_time'] = ls_tmp[1] - ls_tmp[0]
ls_tmp = []
else:
if j['green_end'] > 0:
ls_tmp.append(i)
index_greens.extend(np.arange(ls_tmp[0], ls_tmp[1] + step, step).tolist())
tmp2.loc[i, 'green_time'] = ls_tmp[1] - ls_tmp[0]
ls_tmp = []
else:
ls_tmp.append(i)
row += 1
tmp.loc[index_greens, 'green'] = 1
return tmp2, tmp
def create_cumulative(tuple_crossings, edge_selection, turn='other', time_step=1000, plot=False, statistics=False):
assert turn in ['incoming', 'outgoing', 'turn_right', 'turn_left', 'straight', 'other']
df_det = tuple_crossings[1]
data_title = tuple_crossings[2]
df_det = df_det.sort_index()
df_sel = df_det.loc[edge_selection, :]
df = df_sel.dropna(axis=1)
df = df.transpose()
max_time = int(max(df.max()) + time_step)
df_st = {'time_step': [], 'count': []}
df_tt = df.astype('float64')
df_tt = df_tt.assign(travel_time=df_tt[edge_selection[-1]] - df_tt[edge_selection[0]])
for t in range(time_step, max_time, time_step):
tmp = df[(df >= (t - time_step)) & (df < t)]
df_st['time_step'].append(t), df_st['count'].append(tmp.count().values)
df_st = pd.DataFrame(df_st['count'], index=df_st['time_step'],
columns=[f'count_{i[0]}_{i[1]}' for i in edge_selection])
df_st = df_st.assign(veh_diff=df_st[f'count_{edge_selection[0][0]}_{edge_selection[0][1]}'] -
df_st[f'count_{edge_selection[-1][0]}_{edge_selection[-1][1]}'])
for i in edge_selection:
df_st.loc[:, f'cumulative_{i[0]}_{i[1]}'] = df_st[f'count_{i[0]}_{i[1]}'].cumsum()
df_tt = df_tt.assign(travel_time_sec=df_tt.travel_time / 1000)
if statistics:
print(f'Basic statistics of travel time from {edge_selection[0]} to {edge_selection[-1]}: '
f'{df_tt.travel_time_sec.describe()}')
if plot:
fig, ax = plt.subplots(figsize=(10, 8))
ind_link = 0
for i in edge_selection:
ax.plot(df_st.index / 1000, df_st[f'count_{i[0]}_{i[1]}'],
color=qual_colorlist[ind_link], label=f'{i[0]}_{i[1]}')
ind_link += 1
ax.grid(True)
ax.legend()
ax.set_xlabel('Time [s]')
ax.set_ylabel('Vehicles passing [veh]')
plt.close()
fig, ax = plt.subplots()
ind_link = 0
for i in edge_selection:
ax.plot(df_st.index / 1000, df_st[f'count_{i[0]}_{i[1]}'].cumsum(), color=qual_colorlist[ind_link],
label=f'{i[0]}_{i[1]}')
ind_link += 1
ax.grid(True)
ax.legend()
ax.set_xlabel('Time [s]')
ax.set_ylabel('Cumulative count [veh]')
ax.set_title(f'{data_title}_{turn}')
fig.savefig(f'{data_title}_{edge_selection[0][0]}_{edge_selection[-1][0]}_{turn}')
fig, ax = plt.subplots()
ax.plot(df_st.index / 1000, df_st['veh_diff'].cumsum(), label='vehicle accumulation',
color=qual_colorlist[0])
ax.plot(df_st.index / 1000, df_st[f'count_{edge_selection[-1][0]}_{edge_selection[-1][1]}'],
label='vehicles passing downstream',
color=qual_colorlist[5])
ax.grid(True)
ax.legend()
ax.set_xlabel('Time [s]')
ax.set_ylabel('Vehicles [veh]')
ax.set_title(f'{data_title} {turn} accumulation')
fig.savefig(f'{data_title}_{edge_selection[0][0]}_{edge_selection[-1][0]}_accumulation')
return df_st, df_tt
def test_cycle_times(group_id, crossing_edges, **kwargs):
_, df_crossings, _ = edges_crossings(group_id, crossing_edges, **kwargs)
df_cycle = signal_timings(df_crossings)
return df_cycle
def test_cumulative(group_id, crossing_edges, edge_selection, **kwargs):
tuple_crossings = edges_crossings(group_id, crossing_edges, **kwargs)
df_st, df_tt = create_cumulative(tuple_crossings, edge_selection)
return df_st, df_tt
def create_output_table(df_crossing_sel, group_id, save_csv=False, filename=None):
hdf_path = rd.get_hdf_path()
group_path = rd.get_group(group_id)
df_dict = {'track_id': [], 'from': [], 'to': [], 't_from': [], 't_to': [], 'delta_t': []}
for i in df_crossing_sel:
df = df_crossing_sel[i][df_crossing_sel[i].notna()]
if len(df) % 2 == 0:
nr = int(len(df) / 2)
df = df.sort_values()
df_idx = df.index.get_level_values(0).to_list()
df_val = df.values
df_dict['track_id'].extend([i] * nr)
df_dict['from'].extend(df_idx[::2])
df_dict['t_from'].extend(df_val[::2])
df_dict['to'].extend(df_idx[1::2])
df_dict['t_to'].extend(df_val[1::2])
df_dict['delta_t'].extend(df_val[1::2] - df_val[::2])
else:
continue
df = pd.DataFrame(df_dict)
tr_id = rd.get_from_hdf(hdf_path, key_id=group_path + '/all_id', result='ids')
df = df.merge(tr_id[['track_id', 'type']], how='left', on='track_id')
if save_csv:
fn = filename
if filename is None:
fn = 'traj_data.csv'
df.to_csv(path_data + fn)
return df
def create_xt(edge, group_id, network_df, crossing_edges, show_det=None, plot=False, colormap='gist_rainbow',
lines=False, veh_type=None, psize=1, bearing_difference=90,
strict_match=True, folder=path_results, **kwargs):
edge_length = network_df.loc[network_df['_id'] == edge, 'length'].values[0]
vt_str = "all"
_, df_det, data_title = edges_crossings(group_id, crossing_edges, bearing_difference=bearing_difference,
strict_match=strict_match)
df_sel = df_det.loc[edge]
df_sel = df_sel.dropna(axis=1, how='any')
lt = td.get_lt_from_id(df_sel.columns.to_list(), group_id=group_id, gdf=False, **kwargs)
df_xt = pd.DataFrame()
s = {'pos_start': [], 'pos_end': [], 'track_id': []}
e1 = (network_df.loc[network_df['_id'] == edge, ['x1', 'y1']].values[0])
e2 = (network_df.loc[network_df['_id'] == edge, ['x2', 'y2']].values[0])
df_transpose = df_sel.transpose()
c1 = [(xy) for xy in zip(df_transpose.loc[:, 'cross_x1'], df_transpose.loc[:, 'cross_y1'])]
c2 = [(xy) for xy in zip(df_transpose.loc[:, 'cross_x2'], df_transpose.loc[:, 'cross_y2'])]
for ind, el in enumerate(lt.index.get_level_values(0).unique()):
tmp = lt.loc[(el, slice(df_sel.loc['rid1', el], int(df_sel.loc['rid2', el] + 1))), :].copy()
tmp2 = tmp.apply(help_proj, e1=e1, e2=e2, axis=1)
_, t1 = distxy.project(e1, e2, c1[ind])
_, t2 = distxy.project(e1, e2, c2[ind])
s['pos_start'].append(t1), s['pos_end'].append(t2), s['track_id'].append(el)
# tmp['proj'] = tmp2
tmp_dist, _, tmp_proj = zip(*tmp2)
tmp['lateral_dist'] = tmp_dist
tmp['proj'] = tmp_proj
tmp['avg_speed'] = tmp['line_length_yx'] * 3.6
df_xt = pd.concat([df_xt, tmp], axis=0)
df_xt.loc[:, 'proj_m'] = df_xt.loc[:, 'proj'] * edge_length
s2 = pd.DataFrame(s, index=s['track_id'])
if veh_type is not None:
assert isinstance(veh_type, (str, list))
if isinstance(veh_type, str):
veh_type = [veh_type]
vt = get_vehicle_types(group_id)
if set(veh_type).issubset(set(vt.type.values.unique())):
vt_sel = vt.loc[vt.type.isin(veh_type)]
df_xt = df_xt.loc[df_xt.index.get_level_values(0).isin(vt_sel.track_id.values)]
else:
raise Exception(f"Vehicle type not recognized, should be subset of: {set(vt.type.values.unique())}")
vt_str = ""
tmp_str = [w for i in veh_type for w in i if w.isupper()]
vt_str = vt_str.join(tmp_str)
if plot:
fig, ax = plt.subplots(figsize=(12, 8))
if lines:
norm = plt.Normalize(0, 50)
# c_map = cm.ScalarMappable(cmap=colormap, norm=norm)
for i in df_xt.index.get_level_values(0).unique():
points = np.array([df_xt.loc[i, 'time'], df_xt.loc[i, 'proj_m']]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(segments, linewidths=psize, cmap=colormap, norm=norm)
lc.set_array(df_xt.loc[i, 'avg_speed'])
im = ax.add_collection(lc)
ax.autoscale()
else:
im = ax.scatter(df_xt.time, df_xt.proj_m, s=psize, c=df_xt.speed_1, vmin=0, vmax=50, cmap=colormap)
if show_det is not None:
ft = show_det.features.loc[show_det.features.index.get_level_values(0) == edge]
if len(ft) > 0:
c_dict = {'crossing': 'dimgrey', 'traffic_signals': 'darkorange'}
for r, v in ft.iterrows():
ax.hlines(v.proj_feature * edge_length, xmin=0, xmax=df_xt.time.max(), linestyles='dashed',
colors=c_dict[v.feature], label=v.feature)
det_loc = show_det.det_loc.loc[show_det.det_loc._id == edge]
for d in range(1, show_det.n_det + 1):
ax.hlines(det_loc[f'proj_det{d}'] * edge_length, xmin=0, xmax=df_xt.time.max(), linestyles='dashed',
colors='k', label=f'detector {d}')
ax.grid(True)
ax.set_title(f'X-T for link {edge}')
ax.set_xlabel('Time [ms]')
ax.set_ylabel('Distance (m)')
if show_det is not None:
ax.legend()
fig.suptitle(f"{rd.get_path_dict()['groups'][group_id]}")
fig.colorbar(im, ax=ax)
fig.savefig(os.path.join(folder, "plots", f"xt_{rd.get_path_dict()['groups'][group_id].replace('/', '_')}_"
f"{edge}_{vt_str}_bd{bearing_difference}"))
return df_xt, s2
def create_xt_arterial(specified_detectors, group_id, network_df, crossing_edges, show_det=None, plot=False,
colormap='gist_rainbow', lines=False, veh_type=None, psize=1, bearing_difference=90,
strict_match=True, folder=path_results, **kwargs):
edge_lengths = network_df.loc[network_df['_id'].isin(specified_detectors[0]), 'length'].sum()
length_factor = edge_lengths / len(specified_detectors[0])
vt_str = "all"
_, df_det, data_title = edges_crossings(group_id, crossing_edges=crossing_edges, bearing_difference=bearing_difference,
strict_match=strict_match)
df_sel = df_det.loc[specified_detectors[0]]
if specified_detectors[1][0] > 1:
id1 = df_sel.loc[specified_detectors[0][0]].dropna(axis=1, how='all').columns.to_list()
id2 = df_sel.loc[specified_detectors[0][1:]].dropna(axis=1, how='any').columns.to_list()
id3 = list(set(id1).intersection(id2))
id3.sort()
df_sel = df_sel.loc[:, id3]
else:
df_sel = df_sel.dropna(axis=1, how='any')
lt = td.get_lt_from_id(df_sel.columns.to_list(), group_id=group_id, gdf=False, **kwargs)
df_xt = pd.DataFrame()
s = {'pos_start': [], 'pos_end': [], 't_start': [], 't_end': [], 'track_id': []}
df_transpose = df_sel.transpose()
c1 = [(xy) for xy in zip(df_transpose.loc[:, (specified_detectors[0][0], f'cross_x{specified_detectors[1][0]}')],
df_transpose.loc[:, (specified_detectors[0][0], f'cross_y{specified_detectors[1][0]}')])]
c2 = [(xy) for xy in zip(df_transpose.loc[:, (specified_detectors[0][-1], f'cross_x{specified_detectors[1][-1]}')],
df_transpose.loc[:, (specified_detectors[0][-1], f'cross_y{specified_detectors[1][-1]}')])]
ct1 = df_transpose.loc[:, (specified_detectors[0][0], f't{specified_detectors[1][0]}')].values
ct2 = df_transpose.loc[:, (specified_detectors[0][-1], f't{specified_detectors[1][-1]}')].values
ed1 = network_df.loc[network_df['_id'].isin(specified_detectors[0]), ['x1', 'y1']]
ed2 = network_df.loc[network_df['_id'].isin(specified_detectors[0]), ['x2', 'y2']]
e1 = []
e2 = []
for i, j in enumerate(specified_detectors[0]):
e1.append(ed1.loc[j].values)
e2.append(ed2.loc[j].values)
error_traj = []
for ind, el in enumerate(lt.index.get_level_values(0).unique()):
tmp = lt.loc[(el, slice(df_sel.loc[(specified_detectors[0][0], f'rid{specified_detectors[1][0]}'), el],
int(df_sel.loc[(specified_detectors[0][-1], f'rid{specified_detectors[1][-1]}'), el]
+ 1))), :].copy()
tmp['proj'] = 0
tmp['avg_speed'] = tmp['line_length_yx'] * 3.6
_, t1 = distxy.project(e1[0], e2[0], c1[ind])
_, t2 = distxy.project(e1[-1], e2[-1], c2[ind])
s['pos_start'].append(t1), s['pos_end'].append(t2 + len(specified_detectors[0]) - 1), s['track_id'].append(el)
s['t_start'].append(ct1[ind]), s['t_end'].append(ct2[ind])
while True:
try:
for i, j in enumerate(specified_detectors[0]):
tmp2 = tmp.apply(help_proj, e1=e1[i], e2=e2[i], axis=1)
_, _, tmp_proj = zip(*tmp2)
tmp['proj'] += tmp_proj
break
except TypeError:
print(el)
error_traj.append(el)
break
if len(tmp) < 1:
print(el)
continue
df_xt = pd.concat([df_xt, tmp], axis=0)
df_xt.loc[:, 'proj_m'] = df_xt.loc[:, 'proj'] * length_factor
s = pd.DataFrame(s, index=s['track_id'])
s.loc[:, 'tt_ms'] = s.t_end - s.t_start
s.loc[:, 'dist_m'] = (s.pos_end - s.pos_start) * length_factor
if veh_type is not None:
assert isinstance(veh_type, (str, list))
if isinstance(veh_type, str):
veh_type = [veh_type]
vt = get_vehicle_types(group_id)
if set(veh_type).issubset(set(vt.type.values.unique())):
vt_sel = vt.loc[vt.type.isin(veh_type)]
df_xt = df_xt.loc[df_xt.index.get_level_values(0).isin(vt_sel.track_id.values)]
else:
raise Exception(f"Vehicle type not recognized, should be subset of: {set(vt.type.values.unique())}")
vt_str = ""
tmp_str = [w for i in veh_type for w in i if w.isupper()]
vt_str = vt_str.join(tmp_str)
if plot:
fig, ax = plt.subplots(figsize=(12, 8))
if lines:
norm = plt.Normalize(0, 50)
# c_map = cm.ScalarMappable(cmap=colormap, norm=norm)
for i in df_xt.index.get_level_values(0).unique():
points = np.array([df_xt.loc[i, 'time'], df_xt.loc[i, 'proj_m']]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(segments, linewidths=psize, cmap=colormap, norm=norm)
lc.set_array(df_xt.loc[i, 'avg_speed'])
im = ax.add_collection(lc)
ax.autoscale()
else:
im = ax.scatter(df_xt.time, df_xt.proj_m, s=psize, c=df_xt.speed_1, vmin=0, vmax=50, cmap=colormap)
if show_det is not None:
ft = show_det.features.loc[show_det.features.index.get_level_values(0).isin(specified_detectors[0])]
if len(ft) > 0:
c_dict = {'crossing': 'dimgrey', 'traffic_signals': 'darkorange'}
for r, v in enumerate(ft.iterrows()):
v_add = specified_detectors[0].index(v[0][0])
ax.hlines((v[1].proj_feature + len(specified_detectors[0][:v_add])) * length_factor, xmin=0,
xmax=df_xt.time.max(),
linestyles='dashed', colors=c_dict[v[1].feature], label=v[1].feature)
det_loc = show_det.det_loc.loc[show_det.det_loc._id.isin([specified_detectors[0][0],
specified_detectors[0][-1]])]
for r, d in enumerate(specified_detectors[1]):
if r == 0:
ax.hlines(det_loc[det_loc._id == specified_detectors[0][0]].loc[:, f'proj_det{d}'].values[0]
* length_factor,
xmin=0, xmax=df_xt.time.max(), linestyles='dashed', colors='darkgreen',
label=f'start')
else:
ax.hlines((det_loc[det_loc._id == specified_detectors[0][-1]].loc[:, f'proj_det{d}'].values[0]
+ len(specified_detectors[0][:-1])) * length_factor, xmin=0, xmax=df_xt.time.max(),
linestyles='dashed',
colors='darkred', label=f'end')
ax.grid(True)
ax.set_title(f'X-T for link {specified_detectors[0]}')
ax.set_xlabel('Time [ms]')
ax.set_ylabel('Distance (m)')
if show_det is not None:
ax.legend()
fig.suptitle(f"{rd.get_path_dict()['groups'][group_id]}")
fig.colorbar(im, ax=ax)
fig.savefig(os.path.join(folder, "plots", f"xt_{rd.get_path_dict()['groups'][group_id].replace('/', '_')}_"
f"{specified_detectors[0][0]}_{specified_detectors[0][-1]}_"
f"arterial_{vt_str}_bd{bearing_difference}"))
return df_xt, error_traj, s
def get_tt_from_xt(df_xt, bins=20):
df = pd.DataFrame({'track_id': df_xt.index.get_level_values(0).unique(), 'tt': 0})
tt = [df_xt.loc[i, 'time'].iloc[-1] - df_xt.loc[i, 'time'].iloc[0] for i in df.track_id]
df.loc[:, 'tt'] = tt
df.loc[:, 'tts'] = df.tt / 1000
plt.hist(df.tts, bins=bins, edgecolor='k')
plt.title('Travel time')
plt.xlabel('Seconds')
return df
def help_proj(row, e1, e2, delta=0.0):
p = (row['x_1'], row['y_1'])
dist, p_int, t = distxy.distance_point_to_segment(s1=e1, s2=e2, p=p, delta=delta)
return dist, p_int, t
| [
"matplotlib.pyplot.title",
"numpy.arange",
"os.path.join",
"matplotlib.pyplot.Normalize",
"pandas.DataFrame",
"pneumapackage.iodata.get_group",
"pneumapackage.compute.vehicle_crossings",
"matplotlib.pyplot.close",
"test_data.get_hdf_names",
"matplotlib.pyplot.subplots",
"pandas.concat",
"matpl... | [((1290, 1339), 'pneumapackage.compute.vehicle_crossings', 'cp.vehicle_crossings', (['line_traj', 'df_det'], {}), '(line_traj, df_det, **kwargs)\n', (1310, 1339), True, 'import pneumapackage.compute as cp\n'), ((1554, 1580), 'test_data.get_hdf_names', 'td.get_hdf_names', (['group_id'], {}), '(group_id)\n', (1570, 1580), True, 'import test_data as td\n'), ((1596, 1613), 'pneumapackage.iodata.get_hdf_path', 'rd.get_hdf_path', ([], {}), '()\n', (1611, 1613), True, 'import pneumapackage.iodata as rd\n'), ((1634, 1685), 'pneumapackage.iodata.get_from_hdf', 'rd.get_from_hdf', (['hdf_path'], {'key_id': 'pid', 'result': '"""ids"""'}), "(hdf_path, key_id=pid, result='ids')\n", (1649, 1685), True, 'import pneumapackage.iodata as rd\n'), ((2322, 2373), 'test_data.get_lt', 'td.get_lt', ([], {'group_id': 'group_id', 'edges': 'edges', 'gdf': '(True)'}), '(group_id=group_id, edges=edges, gdf=True)\n', (2331, 2373), True, 'import test_data as td\n'), ((4445, 4536), 'pandas.DataFrame', 'pd.DataFrame', (["df_cycle['passing']"], {'index': "df_cycle['time_step']", 'columns': 'df_sel.columns'}), "(df_cycle['passing'], index=df_cycle['time_step'], columns=\n df_sel.columns)\n", (4457, 4536), True, 'import pandas as pd\n'), ((8474, 8591), 'pandas.DataFrame', 'pd.DataFrame', (["df_st['count']"], {'index': "df_st['time_step']", 'columns': "[f'count_{i[0]}_{i[1]}' for i in edge_selection]"}), "(df_st['count'], index=df_st['time_step'], columns=[\n f'count_{i[0]}_{i[1]}' for i in edge_selection])\n", (8486, 8591), True, 'import pandas as pd\n'), ((11331, 11348), 'pneumapackage.iodata.get_hdf_path', 'rd.get_hdf_path', ([], {}), '()\n', (11346, 11348), True, 'import pneumapackage.iodata as rd\n'), ((11366, 11388), 'pneumapackage.iodata.get_group', 'rd.get_group', (['group_id'], {}), '(group_id)\n', (11378, 11388), True, 'import pneumapackage.iodata as rd\n'), ((12114, 12135), 'pandas.DataFrame', 'pd.DataFrame', (['df_dict'], {}), '(df_dict)\n', (12126, 12135), True, 'import pandas as pd\n'), ((12148, 12218), 'pneumapackage.iodata.get_from_hdf', 'rd.get_from_hdf', (['hdf_path'], {'key_id': "(group_path + '/all_id')", 'result': '"""ids"""'}), "(hdf_path, key_id=group_path + '/all_id', result='ids')\n", (12163, 12218), True, 'import pneumapackage.iodata as rd\n'), ((13153, 13167), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (13165, 13167), True, 'import pandas as pd\n'), ((14336, 14372), 'pandas.DataFrame', 'pd.DataFrame', (['s'], {'index': "s['track_id']"}), "(s, index=s['track_id'])\n", (14348, 14372), True, 'import pandas as pd\n'), ((18302, 18316), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (18314, 18316), True, 'import pandas as pd\n'), ((20784, 20820), 'pandas.DataFrame', 'pd.DataFrame', (['s'], {'index': "s['track_id']"}), "(s, index=s['track_id'])\n", (20796, 20820), True, 'import pandas as pd\n'), ((24905, 24947), 'matplotlib.pyplot.hist', 'plt.hist', (['df.tts'], {'bins': 'bins', 'edgecolor': '"""k"""'}), "(df.tts, bins=bins, edgecolor='k')\n", (24913, 24947), True, 'import matplotlib.pyplot as plt\n'), ((24952, 24976), 'matplotlib.pyplot.title', 'plt.title', (['"""Travel time"""'], {}), "('Travel time')\n", (24961, 24976), True, 'import matplotlib.pyplot as plt\n'), ((24981, 25002), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Seconds"""'], {}), "('Seconds')\n", (24991, 25002), True, 'import matplotlib.pyplot as plt\n'), ((25112, 25176), 'leuvenmapmatching.util.dist_euclidean.distance_point_to_segment', 'distxy.distance_point_to_segment', ([], {'s1': 'e1', 's2': 'e2', 'p': 'p', 'delta': 'delta'}), '(s1=e1, s2=e2, p=p, delta=delta)\n', (25144, 25176), True, 'import leuvenmapmatching.util.dist_euclidean as distxy\n'), ((9199, 9228), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (9211, 9228), True, 'import matplotlib.pyplot as plt\n'), ((9588, 9599), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9597, 9599), True, 'import matplotlib.pyplot as plt\n'), ((9618, 9632), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9630, 9632), True, 'import matplotlib.pyplot as plt\n'), ((10147, 10161), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10159, 10161), True, 'import matplotlib.pyplot as plt\n'), ((13853, 13884), 'leuvenmapmatching.util.dist_euclidean.project', 'distxy.project', (['e1', 'e2', 'c1[ind]'], {}), '(e1, e2, c1[ind])\n', (13867, 13884), True, 'import leuvenmapmatching.util.dist_euclidean as distxy\n'), ((13901, 13932), 'leuvenmapmatching.util.dist_euclidean.project', 'distxy.project', (['e1', 'e2', 'c2[ind]'], {}), '(e1, e2, c2[ind])\n', (13915, 13932), True, 'import leuvenmapmatching.util.dist_euclidean as distxy\n'), ((14231, 14262), 'pandas.concat', 'pd.concat', (['[df_xt, tmp]'], {'axis': '(0)'}), '([df_xt, tmp], axis=0)\n', (14240, 14262), True, 'import pandas as pd\n'), ((15055, 15084), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (15067, 15084), True, 'import matplotlib.pyplot as plt\n'), ((19908, 19945), 'leuvenmapmatching.util.dist_euclidean.project', 'distxy.project', (['e1[0]', 'e2[0]', 'c1[ind]'], {}), '(e1[0], e2[0], c1[ind])\n', (19922, 19945), True, 'import leuvenmapmatching.util.dist_euclidean as distxy\n'), ((19962, 20001), 'leuvenmapmatching.util.dist_euclidean.project', 'distxy.project', (['e1[-1]', 'e2[-1]', 'c2[ind]'], {}), '(e1[-1], e2[-1], c2[ind])\n', (19976, 20001), True, 'import leuvenmapmatching.util.dist_euclidean as distxy\n'), ((20678, 20709), 'pandas.concat', 'pd.concat', (['[df_xt, tmp]'], {'axis': '(0)'}), '([df_xt, tmp], axis=0)\n', (20687, 20709), True, 'import pandas as pd\n'), ((21614, 21643), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (21626, 21643), True, 'import matplotlib.pyplot as plt\n'), ((2779, 2812), 'os.path.join', 'os.path.join', (['folder', '"""crossings"""'], {}), "(folder, 'crossings')\n", (2791, 2812), False, 'import os\n'), ((2951, 2984), 'os.path.join', 'os.path.join', (['folder', '"""crossings"""'], {}), "(folder, 'crossings')\n", (2963, 2984), False, 'import os\n'), ((3244, 3345), 'pneumapackage.compute.vehicle_crossings', 'cp.vehicle_crossings', (['lt', 'ds_ft'], {'bearing_difference': 'bearing_difference', 'strict_match': 'strict_match'}), '(lt, ds_ft, bearing_difference=bearing_difference,\n strict_match=strict_match)\n', (3264, 3345), True, 'import pneumapackage.compute as cp\n'), ((3359, 3457), 'pneumapackage.compute.vehicle_crossings', 'cp.vehicle_crossings', (['lt', 'ds'], {'bearing_difference': 'bearing_difference', 'strict_match': 'strict_match'}), '(lt, ds, bearing_difference=bearing_difference,\n strict_match=strict_match)\n', (3379, 3457), True, 'import pneumapackage.compute as cp\n'), ((15122, 15142), 'matplotlib.pyplot.Normalize', 'plt.Normalize', (['(0)', '(50)'], {}), '(0, 50)\n', (15135, 15142), True, 'import matplotlib.pyplot as plt\n'), ((21681, 21701), 'matplotlib.pyplot.Normalize', 'plt.Normalize', (['(0)', '(50)'], {}), '(0, 50)\n', (21694, 21701), True, 'import matplotlib.pyplot as plt\n'), ((3581, 3614), 'os.path.join', 'os.path.join', (['folder', '"""crossings"""'], {}), "(folder, 'crossings')\n", (3593, 3614), False, 'import os\n'), ((3745, 3778), 'os.path.join', 'os.path.join', (['folder', '"""crossings"""'], {}), "(folder, 'crossings')\n", (3757, 3778), False, 'import os\n'), ((4921, 4939), 'numpy.diff', 'np.diff', (['tmp.index'], {}), '(tmp.index)\n', (4928, 4939), True, 'import numpy as np\n'), ((15401, 15450), 'numpy.concatenate', 'np.concatenate', (['[points[:-1], points[1:]]'], {'axis': '(1)'}), '([points[:-1], points[1:]], axis=1)\n', (15415, 15450), True, 'import numpy as np\n'), ((15472, 15540), 'matplotlib.collections.LineCollection', 'LineCollection', (['segments'], {'linewidths': 'psize', 'cmap': 'colormap', 'norm': 'norm'}), '(segments, linewidths=psize, cmap=colormap, norm=norm)\n', (15486, 15540), False, 'from matplotlib.collections import LineCollection\n'), ((21960, 22009), 'numpy.concatenate', 'np.concatenate', (['[points[:-1], points[1:]]'], {'axis': '(1)'}), '([points[:-1], points[1:]], axis=1)\n', (21974, 22009), True, 'import numpy as np\n'), ((22031, 22099), 'matplotlib.collections.LineCollection', 'LineCollection', (['segments'], {'linewidths': 'psize', 'cmap': 'colormap', 'norm': 'norm'}), '(segments, linewidths=psize, cmap=colormap, norm=norm)\n', (22045, 22099), False, 'from matplotlib.collections import LineCollection\n'), ((2579, 2597), 'pneumapackage.iodata.get_path_dict', 'rd.get_path_dict', ([], {}), '()\n', (2595, 2597), True, 'import pneumapackage.iodata as rd\n'), ((3084, 3101), 'test_network.test_network', 'tn.test_network', ([], {}), '()\n', (3099, 3101), True, 'import test_network as tn\n'), ((6609, 6648), 'numpy.arange', 'np.arange', (['tmp.index[0]', '(i + step)', 'step'], {}), '(tmp.index[0], i + step, step)\n', (6618, 6648), True, 'import numpy as np\n'), ((15297, 15353), 'numpy.array', 'np.array', (["[df_xt.loc[i, 'time'], df_xt.loc[i, 'proj_m']]"], {}), "([df_xt.loc[i, 'time'], df_xt.loc[i, 'proj_m']])\n", (15305, 15353), True, 'import numpy as np\n'), ((16783, 16801), 'pneumapackage.iodata.get_path_dict', 'rd.get_path_dict', ([], {}), '()\n', (16799, 16801), True, 'import pneumapackage.iodata as rd\n'), ((21856, 21912), 'numpy.array', 'np.array', (["[df_xt.loc[i, 'time'], df_xt.loc[i, 'proj_m']]"], {}), "([df_xt.loc[i, 'time'], df_xt.loc[i, 'proj_m']])\n", (21864, 21912), True, 'import numpy as np\n'), ((24193, 24211), 'pneumapackage.iodata.get_path_dict', 'rd.get_path_dict', ([], {}), '()\n', (24209, 24211), True, 'import pneumapackage.iodata as rd\n'), ((6907, 6947), 'numpy.arange', 'np.arange', (['i', '(tmp.index[-1] + step)', 'step'], {}), '(i, tmp.index[-1] + step, step)\n', (6916, 6947), True, 'import numpy as np\n'), ((7107, 7151), 'numpy.arange', 'np.arange', (['ls_tmp[0]', '(ls_tmp[1] + step)', 'step'], {}), '(ls_tmp[0], ls_tmp[1] + step, step)\n', (7116, 7151), True, 'import numpy as np\n'), ((7374, 7418), 'numpy.arange', 'np.arange', (['ls_tmp[0]', '(ls_tmp[1] + step)', 'step'], {}), '(ls_tmp[0], ls_tmp[1] + step, step)\n', (7383, 7418), True, 'import numpy as np\n'), ((16913, 16931), 'pneumapackage.iodata.get_path_dict', 'rd.get_path_dict', ([], {}), '()\n', (16929, 16931), True, 'import pneumapackage.iodata as rd\n'), ((24323, 24341), 'pneumapackage.iodata.get_path_dict', 'rd.get_path_dict', ([], {}), '()\n', (24339, 24341), True, 'import pneumapackage.iodata as rd\n')] |
import pandas as pd
import csv
import numpy as np
import glob
def gain_pools(directory, predmonth, stocknum):
selected_model = directory + 'for' + predmonth + '\\SelectedModels' + predmonth + '.csv'
df = pd.read_csv(selected_model)
df = df.ix[:, 2:]
stockpool = []
for j in range(len(df.columns)):
for i in range(len(df.index)):
try:
stockpool_name = '{0}\\SP_{1}_{1}_{2}.txt'.format(df.ix[i, j], predmonth, str(stocknum))
with open(stockpool_name) as f:
next(f, None)
cr = csv.reader(f, delimiter='\t')
for row in cr:
stocks = row[1:]
stockpool.extend(stocks)
break
except TypeError:
pass
stockpool_final = [stockpool[0]]
for index_stock in range(1, len(stockpool)):
if (not stockpool[index_stock] in stockpool_final) and (not stockpool[index_stock] in [' ', '']):
stockpool_final.append(stockpool[index_stock])
stockpool_final = np.array([stockpool_final]).T
writing_path = directory + 'for' + predmonth + '\\' + 'StockPool_' + predmonth + '.txt'
with open(writing_path, 'w', newline='') as fw:
cw = csv.writer(fw, delimiter='\t')
cw.writerows(stockpool_final)
pred_months = []
pred_months.extend(['201502', '201503', '201504', '201505', '201506',
'201507', '201508', '201509', '201510', '201511', '201512'])
pred_months.extend(['201601', '201602', '201603', '201604', '201605', '201606',
'201607', '201608', '201609', '201610', '201611', '201612'])
pred_months.extend(['201701', '201702', '201703', '201704', '201705', '201706',
'201707', '201708', '201709', '201710', '201711', '201712'])
pred_months.extend(['201801', '201802'])
stockNum = 500
path0 = 'D:/rongshidata/experiment_data_1'
# reportPaths = glob.glob(path0 + '/monthly/ReportXgb_V*_V*_Index*_MP*/')
reportPaths = glob.glob(path0 + '/monthly/ReportDF_V*_V*_Index*_MP*/')
for reportPath in reportPaths:
for pred_month in pred_months:
gain_pools(reportPath, pred_month, stocknum=stockNum)
print(pred_month, 'has been done.')
| [
"csv.reader",
"csv.writer",
"pandas.read_csv",
"numpy.array",
"glob.glob"
] | [((2037, 2093), 'glob.glob', 'glob.glob', (["(path0 + '/monthly/ReportDF_V*_V*_Index*_MP*/')"], {}), "(path0 + '/monthly/ReportDF_V*_V*_Index*_MP*/')\n", (2046, 2093), False, 'import glob\n'), ((215, 242), 'pandas.read_csv', 'pd.read_csv', (['selected_model'], {}), '(selected_model)\n', (226, 242), True, 'import pandas as pd\n'), ((1101, 1128), 'numpy.array', 'np.array', (['[stockpool_final]'], {}), '([stockpool_final])\n', (1109, 1128), True, 'import numpy as np\n'), ((1289, 1319), 'csv.writer', 'csv.writer', (['fw'], {'delimiter': '"""\t"""'}), "(fw, delimiter='\\t')\n", (1299, 1319), False, 'import csv\n'), ((590, 619), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""'}), "(f, delimiter='\\t')\n", (600, 619), False, 'import csv\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import dataset as data
import utils
X, y, tX, ty = data.get_data()
X = data.normalize(X)
tX = data.normalize(tX)
X = X[:15000]
tX = tX[:1000]
y = y[:15000]
ty = ty[:1000]
#region init w and b
k = 1e-2
w1 = np.random.uniform(-10, 10, (X.shape[1], 400)) * k
b1 = np.random.uniform(-10, 10, (1, 400)) * k
w2 = np.random.uniform(-10, 10, (400, 100)) * k
b2 = np.random.uniform(-10, 10, (1, 100)) * k
w3 = np.random.uniform(-10, 10, (100, 10)) * k
b3 = np.random.uniform(-10, 10, (1, 10)) * k
#endregion
#region hyperparams
epoch = int(1e2)
lr = 8e-1
L = []
#endregion
#region learning
for i in range (1, epoch + 1):
if i % (epoch / 10) == 0:
print("iteracija ", i)
X, y = utils.Randomize(X, y)
#region feed forward
z1 = X @ w1 + b1
a1 = utils.ReLU(z1)
z2 = a1 @ w2 + b2
a2 = utils.ReLU(z2)
z3 = a2 @ w3 + b3
yh = utils.Softmax(z3)
L.append(utils.CELoss(y, yh))
#endregion
#region back propagation
dz3 = (yh - y) / y.shape[0]
dw3 = a2.transpose() @ dz3
db3 = dz3.sum(axis=0)
da2 = dz3 @ w3.transpose()
dz2 = utils.dReLU(z2) * da2
dw2 = a1.transpose() @ dz2
db2 = dz2.sum(axis=0)
da1 = dz2 @ w2.transpose()
dz1 = utils.dReLU(z1) * da1
dw1 = X.transpose() @ dz1
db1 = dz1.sum(axis=0)
#endregion
#region learnig
w3 -= lr * dw3
b3 -= lr * db3
w2 -= lr * dw2
b2 -= lr * db2
w1 -= lr * dw1
b1 -= lr * db1
#endregion
#endregion
#region testing
tz1 = tX @ w1 + b1
ta1 = utils.ReLU(tz1)
tz2 = ta1 @ w2 + b2
ta2 = utils.ReLU(tz2)
tz3 = ta2 @ w3 + b3
tyh = utils.Softmax(tz3)
#endregion
#region plot
print( np.sum( np.argmax( ty, axis=1 )==np.argmax( tyh, axis=1 ) ) )
plt.plot(L)
_, ax = plt.subplots(1, 2)
ax[0].matshow(ty)
ax[1].matshow(tyh)
plt.show()
#endregion
parameters = [w1, b1, w2, b2, w3, b3]
file_names = []
for i in range (0, len(parameters) // 2):
file_names.append("parameters/parameters_w" + str(i+1) + ".csv")
pd.DataFrame(parameters[2*i]).to_csv(file_names[-1], header=None)
file_names.append("parameters/parameters_b" + str(i+1) + ".csv")
pd.DataFrame(parameters[2*i+1]).to_csv(file_names[-1], header=None)
| [
"pandas.DataFrame",
"numpy.random.uniform",
"matplotlib.pyplot.show",
"utils.dReLU",
"matplotlib.pyplot.plot",
"utils.Randomize",
"numpy.argmax",
"utils.Softmax",
"dataset.normalize",
"dataset.get_data",
"utils.ReLU",
"utils.CELoss",
"matplotlib.pyplot.subplots"
] | [((124, 139), 'dataset.get_data', 'data.get_data', ([], {}), '()\n', (137, 139), True, 'import dataset as data\n'), ((145, 162), 'dataset.normalize', 'data.normalize', (['X'], {}), '(X)\n', (159, 162), True, 'import dataset as data\n'), ((168, 186), 'dataset.normalize', 'data.normalize', (['tX'], {}), '(tX)\n', (182, 186), True, 'import dataset as data\n'), ((1596, 1611), 'utils.ReLU', 'utils.ReLU', (['tz1'], {}), '(tz1)\n', (1606, 1611), False, 'import utils\n'), ((1639, 1654), 'utils.ReLU', 'utils.ReLU', (['tz2'], {}), '(tz2)\n', (1649, 1654), False, 'import utils\n'), ((1682, 1700), 'utils.Softmax', 'utils.Softmax', (['tz3'], {}), '(tz3)\n', (1695, 1700), False, 'import utils\n'), ((1796, 1807), 'matplotlib.pyplot.plot', 'plt.plot', (['L'], {}), '(L)\n', (1804, 1807), True, 'import matplotlib.pyplot as plt\n'), ((1816, 1834), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (1828, 1834), True, 'import matplotlib.pyplot as plt\n'), ((1872, 1882), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1880, 1882), True, 'import matplotlib.pyplot as plt\n'), ((283, 328), 'numpy.random.uniform', 'np.random.uniform', (['(-10)', '(10)', '(X.shape[1], 400)'], {}), '(-10, 10, (X.shape[1], 400))\n', (300, 328), True, 'import numpy as np\n'), ((338, 374), 'numpy.random.uniform', 'np.random.uniform', (['(-10)', '(10)', '(1, 400)'], {}), '(-10, 10, (1, 400))\n', (355, 374), True, 'import numpy as np\n'), ((385, 423), 'numpy.random.uniform', 'np.random.uniform', (['(-10)', '(10)', '(400, 100)'], {}), '(-10, 10, (400, 100))\n', (402, 423), True, 'import numpy as np\n'), ((433, 469), 'numpy.random.uniform', 'np.random.uniform', (['(-10)', '(10)', '(1, 100)'], {}), '(-10, 10, (1, 100))\n', (450, 469), True, 'import numpy as np\n'), ((480, 517), 'numpy.random.uniform', 'np.random.uniform', (['(-10)', '(10)', '(100, 10)'], {}), '(-10, 10, (100, 10))\n', (497, 517), True, 'import numpy as np\n'), ((527, 562), 'numpy.random.uniform', 'np.random.uniform', (['(-10)', '(10)', '(1, 10)'], {}), '(-10, 10, (1, 10))\n', (544, 562), True, 'import numpy as np\n'), ((771, 792), 'utils.Randomize', 'utils.Randomize', (['X', 'y'], {}), '(X, y)\n', (786, 792), False, 'import utils\n'), ((849, 863), 'utils.ReLU', 'utils.ReLU', (['z1'], {}), '(z1)\n', (859, 863), False, 'import utils\n'), ((896, 910), 'utils.ReLU', 'utils.ReLU', (['z2'], {}), '(z2)\n', (906, 910), False, 'import utils\n'), ((943, 960), 'utils.Softmax', 'utils.Softmax', (['z3'], {}), '(z3)\n', (956, 960), False, 'import utils\n'), ((975, 994), 'utils.CELoss', 'utils.CELoss', (['y', 'yh'], {}), '(y, yh)\n', (987, 994), False, 'import utils\n'), ((1176, 1191), 'utils.dReLU', 'utils.dReLU', (['z2'], {}), '(z2)\n', (1187, 1191), False, 'import utils\n'), ((1297, 1312), 'utils.dReLU', 'utils.dReLU', (['z1'], {}), '(z1)\n', (1308, 1312), False, 'import utils\n'), ((1741, 1762), 'numpy.argmax', 'np.argmax', (['ty'], {'axis': '(1)'}), '(ty, axis=1)\n', (1750, 1762), True, 'import numpy as np\n'), ((1766, 1788), 'numpy.argmax', 'np.argmax', (['tyh'], {'axis': '(1)'}), '(tyh, axis=1)\n', (1775, 1788), True, 'import numpy as np\n'), ((2065, 2096), 'pandas.DataFrame', 'pd.DataFrame', (['parameters[2 * i]'], {}), '(parameters[2 * i])\n', (2077, 2096), True, 'import pandas as pd\n'), ((2204, 2239), 'pandas.DataFrame', 'pd.DataFrame', (['parameters[2 * i + 1]'], {}), '(parameters[2 * i + 1])\n', (2216, 2239), True, 'import pandas as pd\n')] |
import argparse
import os
import random
import shutil
import warnings
import sys
warnings.filterwarnings("ignore")
from keras import backend as K
import numpy as np
from PIL import Image, ImageFilter
from skimage.measure import compare_ssim as SSIM
import keras
import tensorflow as tf
import os
from helper import load_data
from helper import Coverage
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
DATA_DIR = "../data/"
MODEL_DIR = "../models/"
RESULT_DIR = "../coverage/"
MNIST = "mnist"
CIFAR = "cifar"
SVHN = "svhn"
DATASET_NAMES = [MNIST, CIFAR, SVHN]
BIM = "bim"
CW = "cw"
FGSM = "fgsm"
JSMA = "jsma"
PGD = "pgd"
APGD = "apgd"
DF = "deepfool"
NF = "newtonfool"
SA = "squareattack"
ST = "spatialtransformation"
ATTACK_NAMES = [APGD, BIM, CW, DF, FGSM, JSMA, NF, PGD, SA, ST]
# helper function
if __name__ == '__main__':
dataset = 'mnist'
model_name = 'lenet1'
l = [0, 8]
x_train, y_train, x_test, y_test = load_data(dataset)
# ## load mine trained model
from keras.models import load_model
model_path = "{}{}/{}.h5".format(MODEL_DIR,
dataset, model_name)
model = load_model(model_path)
model.summary()
tknp_all = np.array([])
for num in range(0, 50):
coverage = Coverage(model, x_train, y_train, x_test, y_test, x_test[0: 200*num])
tknp = coverage.TKNP(l)
tknp_all = np.append(tknp_all, tknp)
with open("testing_coverage_result.txt", "a") as f:
f.write("\n------------------------------------------------------------------------------\n")
f.write('x: {} \n'.format(num*200+1))
f.write('TKNP: {} \n'.format(tknp))
np.save('Q2_original/tknp_all.npy', tknp_all)
| [
"keras.models.load_model",
"numpy.save",
"helper.load_data",
"warnings.filterwarnings",
"numpy.append",
"helper.Coverage",
"numpy.array"
] | [((82, 115), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (105, 115), False, 'import warnings\n'), ((935, 953), 'helper.load_data', 'load_data', (['dataset'], {}), '(dataset)\n', (944, 953), False, 'from helper import load_data\n'), ((1145, 1167), 'keras.models.load_model', 'load_model', (['model_path'], {}), '(model_path)\n', (1155, 1167), False, 'from keras.models import load_model\n'), ((1205, 1217), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1213, 1217), True, 'import numpy as np\n'), ((1686, 1731), 'numpy.save', 'np.save', (['"""Q2_original/tknp_all.npy"""', 'tknp_all'], {}), "('Q2_original/tknp_all.npy', tknp_all)\n", (1693, 1731), True, 'import numpy as np\n'), ((1267, 1337), 'helper.Coverage', 'Coverage', (['model', 'x_train', 'y_train', 'x_test', 'y_test', 'x_test[0:200 * num]'], {}), '(model, x_train, y_train, x_test, y_test, x_test[0:200 * num])\n', (1275, 1337), False, 'from helper import Coverage\n'), ((1388, 1413), 'numpy.append', 'np.append', (['tknp_all', 'tknp'], {}), '(tknp_all, tknp)\n', (1397, 1413), True, 'import numpy as np\n')] |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import numpy as np
from packaging import version
import tsfresh
from tsfresh import extract_features
def _is_awake(hour):
return (((hour >= 6) & (hour <= 23)) | (hour == 0)).astype(int).values
def _is_busy_hours(hour):
return (((hour >= 7) & (hour <= 9)) | (hour >= 16) & (hour <= 19)).astype(int).values
def _is_weekend(weekday):
return (weekday >= 5).values
TIME_FEATURE = ("MINUTE", "DAY", "DAYOFYEAR", "HOUR", "WEEKDAY", "WEEKOFYEAR", "MONTH")
ADDITIONAL_TIME_FEATURE_HOUR = {"IS_AWAKE": _is_awake,
"IS_BUSY_HOURS": _is_busy_hours}
ADDITIONAL_TIME_FEATURE_WEEKDAY = {"IS_WEEKEND": _is_weekend}
def generate_dt_features(input_df, dt_col):
'''
generate features related to datetime
:param input_df: pandas dataframe
:param dt_col: col name of the datetime in `input_df`
'''
df = input_df.copy()
field = df[dt_col]
# built in time features
for attr in TIME_FEATURE:
if attr == "WEEKOFYEAR" and \
version.parse(pd.__version__) >= version.parse("1.1.0"):
field_datetime = pd.to_datetime(field.values.astype(np.int64))
df[attr + "({})".format(dt_col)] =\
pd.Int64Index(field_datetime.isocalendar().week)
else:
df[attr + "({})".format(dt_col)] = getattr(field.dt, attr.lower())
# additional time features
hour = field.dt.hour
for attr in ADDITIONAL_TIME_FEATURE_HOUR:
df[attr + "({})".format(dt_col)] = ADDITIONAL_TIME_FEATURE_HOUR[attr](hour)
weekday = field.dt.weekday
for attr in ADDITIONAL_TIME_FEATURE_WEEKDAY:
df[attr + "({})".format(dt_col)] = ADDITIONAL_TIME_FEATURE_WEEKDAY[attr](weekday)
return df
def generate_global_features(input_df,
column_id,
column_sort,
default_fc_parameters=None,
kind_to_fc_parameters=None):
'''
generate global features by tsfresh.
:param input_df: input dataframe.
:param column_id: id column name
:param column_sort: time column name
:param default_fc_parameters: same as tsfresh.
:param kind_to_fc_parameters: same as tsfresh.
:return : a new input_df that contains all generated feature.
'''
if kind_to_fc_parameters is not None:
global_feature = extract_features(input_df,
column_id=column_id,
column_sort=column_sort,
kind_to_fc_parameters=kind_to_fc_parameters)
else:
global_feature = extract_features(input_df,
column_id=column_id,
column_sort=column_sort,
default_fc_parameters=default_fc_parameters)
res_df = input_df.copy()
id_list = list(np.unique(input_df[column_id]))
addtional_feature = []
for col_name in global_feature.columns:
# any feature that can not be extracted will be dropped
if global_feature[col_name].isna().sum() > 0:
continue
# const value will be given to each univariate time series
for id_name in id_list:
res_df.loc[input_df["id"] == id_name, col_name] = global_feature.loc[id_name][col_name]
addtional_feature.append(col_name)
return res_df, addtional_feature
| [
"packaging.version.parse",
"tsfresh.extract_features",
"numpy.unique"
] | [((2982, 3103), 'tsfresh.extract_features', 'extract_features', (['input_df'], {'column_id': 'column_id', 'column_sort': 'column_sort', 'kind_to_fc_parameters': 'kind_to_fc_parameters'}), '(input_df, column_id=column_id, column_sort=column_sort,\n kind_to_fc_parameters=kind_to_fc_parameters)\n', (2998, 3103), False, 'from tsfresh import extract_features\n'), ((3261, 3382), 'tsfresh.extract_features', 'extract_features', (['input_df'], {'column_id': 'column_id', 'column_sort': 'column_sort', 'default_fc_parameters': 'default_fc_parameters'}), '(input_df, column_id=column_id, column_sort=column_sort,\n default_fc_parameters=default_fc_parameters)\n', (3277, 3382), False, 'from tsfresh import extract_features\n'), ((3553, 3583), 'numpy.unique', 'np.unique', (['input_df[column_id]'], {}), '(input_df[column_id])\n', (3562, 3583), True, 'import numpy as np\n'), ((1624, 1653), 'packaging.version.parse', 'version.parse', (['pd.__version__'], {}), '(pd.__version__)\n', (1637, 1653), False, 'from packaging import version\n'), ((1657, 1679), 'packaging.version.parse', 'version.parse', (['"""1.1.0"""'], {}), "('1.1.0')\n", (1670, 1679), False, 'from packaging import version\n')] |
''''| Author: <NAME>
| Date: 29/09/2018
| https://github.com/Jeanvit/PySkinDetection
'''
import cv2
import numpy as np
import os
import sys
import time
class SkinDetector(object):
# class constructor
def __init__(self, imageName, saveName):
self.image = cv2.imread(imageName)
self.save_name = saveName
if self.image is None:
print("IMAGE NOT FOUND")
exit(1)
# print('res = ', cv2.resize(self.image, dsize=(100, 100), interpolation=cv2.INTER_CUBIC)) #self.image = cv2.resize(self.image,(600,600),cv2.INTER_AREA)
self.HSV_image = cv2.cvtColor(self.image, cv2.COLOR_BGR2HSV)
self.YCbCr_image = cv2.cvtColor(self.image, cv2.COLOR_BGR2YCR_CB)
self.binary_mask_image = self.HSV_image
# ================================================================================================================================
# function to process the image and segment the skin using the HSV and YCbCr colorspaces, followed by the Watershed algorithm
def find_skin(self):
self.__color_segmentation()
self.__region_based_segmentation()
# ================================================================================================================================
# Apply a threshold to an HSV and YCbCr images, the used values were based on current research papers along with some
# empirical tests and visual evaluation
def __color_segmentation(self):
lower_HSV_values = np.array([0, 40, 0], dtype="uint8")
upper_HSV_values = np.array([25, 255, 255], dtype="uint8")
lower_YCbCr_values = np.array((0, 138, 67), dtype="uint8")
upper_YCbCr_values = np.array((255, 173, 133), dtype="uint8")
# A binary mask is returned. White pixels (255) represent pixels that fall into the upper/lower.
mask_YCbCr = cv2.inRange(self.YCbCr_image, lower_YCbCr_values, upper_YCbCr_values)
mask_HSV = cv2.inRange(self.HSV_image, lower_HSV_values, upper_HSV_values)
self.binary_mask_image = cv2.add(mask_HSV, mask_YCbCr)
# ================================================================================================================================
# Function that applies Watershed and morphological operations on the thresholded image
def __region_based_segmentation(self):
# morphological operations
image_foreground = cv2.erode(self.binary_mask_image, None, iterations=3) # remove noise
dilated_binary_image = cv2.dilate(self.binary_mask_image, None,
iterations=3) # The background region is reduced a little because of the dilate operation
ret, image_background = cv2.threshold(dilated_binary_image, 1, 128,
cv2.THRESH_BINARY) # set all background regions to 128
image_marker = cv2.add(image_foreground,
image_background) # add both foreground and backgroud, forming markers. The markers are "seeds" of the future image regions.
image_marker32 = np.int32(image_marker) # convert to 32SC1 format
cv2.watershed(self.image, image_marker32)
m = cv2.convertScaleAbs(image_marker32) # convert back to uint8
# bitwise of the mask with the input image
ret, image_mask = cv2.threshold(m, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
output = cv2.bitwise_and(self.image, self.image, mask=image_mask)
# save the image
self.save_image(output)
# ================================================================================================================================
def save_image(self, image):
cv2.imwrite(self.save_name, image)
def run():
base_dir = r'E:\Pycharm Projects\GridProject\data\dataset\train'
output_dir = r'E:\Pycharm Projects\GridProject\data\skin_detect'
for dir_ in os.listdir(base_dir):
print(f'running for {dir_}')
for image in os.listdir(fr'{base_dir}\{dir_}'):
SkinDetector(imageName=fr'{base_dir}\{dir_}\{image}', saveName=fr'{output_dir}\{dir_}\{image}').find_skin()
name = fr'{dir_}\{image}'
sys.stdout.write(f"\rDone {name}")
sys.stdout.flush()
run() | [
"sys.stdout.write",
"cv2.bitwise_and",
"cv2.dilate",
"cv2.cvtColor",
"cv2.imwrite",
"cv2.threshold",
"cv2.add",
"cv2.watershed",
"cv2.imread",
"numpy.array",
"cv2.convertScaleAbs",
"numpy.int32",
"sys.stdout.flush",
"cv2.erode",
"cv2.inRange",
"os.listdir"
] | [((3936, 3956), 'os.listdir', 'os.listdir', (['base_dir'], {}), '(base_dir)\n', (3946, 3956), False, 'import os\n'), ((276, 297), 'cv2.imread', 'cv2.imread', (['imageName'], {}), '(imageName)\n', (286, 297), False, 'import cv2\n'), ((607, 650), 'cv2.cvtColor', 'cv2.cvtColor', (['self.image', 'cv2.COLOR_BGR2HSV'], {}), '(self.image, cv2.COLOR_BGR2HSV)\n', (619, 650), False, 'import cv2\n'), ((678, 724), 'cv2.cvtColor', 'cv2.cvtColor', (['self.image', 'cv2.COLOR_BGR2YCR_CB'], {}), '(self.image, cv2.COLOR_BGR2YCR_CB)\n', (690, 724), False, 'import cv2\n'), ((1508, 1543), 'numpy.array', 'np.array', (['[0, 40, 0]'], {'dtype': '"""uint8"""'}), "([0, 40, 0], dtype='uint8')\n", (1516, 1543), True, 'import numpy as np\n'), ((1571, 1610), 'numpy.array', 'np.array', (['[25, 255, 255]'], {'dtype': '"""uint8"""'}), "([25, 255, 255], dtype='uint8')\n", (1579, 1610), True, 'import numpy as np\n'), ((1641, 1678), 'numpy.array', 'np.array', (['(0, 138, 67)'], {'dtype': '"""uint8"""'}), "((0, 138, 67), dtype='uint8')\n", (1649, 1678), True, 'import numpy as np\n'), ((1708, 1748), 'numpy.array', 'np.array', (['(255, 173, 133)'], {'dtype': '"""uint8"""'}), "((255, 173, 133), dtype='uint8')\n", (1716, 1748), True, 'import numpy as np\n'), ((1876, 1945), 'cv2.inRange', 'cv2.inRange', (['self.YCbCr_image', 'lower_YCbCr_values', 'upper_YCbCr_values'], {}), '(self.YCbCr_image, lower_YCbCr_values, upper_YCbCr_values)\n', (1887, 1945), False, 'import cv2\n'), ((1965, 2028), 'cv2.inRange', 'cv2.inRange', (['self.HSV_image', 'lower_HSV_values', 'upper_HSV_values'], {}), '(self.HSV_image, lower_HSV_values, upper_HSV_values)\n', (1976, 2028), False, 'import cv2\n'), ((2063, 2092), 'cv2.add', 'cv2.add', (['mask_HSV', 'mask_YCbCr'], {}), '(mask_HSV, mask_YCbCr)\n', (2070, 2092), False, 'import cv2\n'), ((2426, 2479), 'cv2.erode', 'cv2.erode', (['self.binary_mask_image', 'None'], {'iterations': '(3)'}), '(self.binary_mask_image, None, iterations=3)\n', (2435, 2479), False, 'import cv2\n'), ((2527, 2581), 'cv2.dilate', 'cv2.dilate', (['self.binary_mask_image', 'None'], {'iterations': '(3)'}), '(self.binary_mask_image, None, iterations=3)\n', (2537, 2581), False, 'import cv2\n'), ((2733, 2795), 'cv2.threshold', 'cv2.threshold', (['dilated_binary_image', '(1)', '(128)', 'cv2.THRESH_BINARY'], {}), '(dilated_binary_image, 1, 128, cv2.THRESH_BINARY)\n', (2746, 2795), False, 'import cv2\n'), ((2903, 2946), 'cv2.add', 'cv2.add', (['image_foreground', 'image_background'], {}), '(image_foreground, image_background)\n', (2910, 2946), False, 'import cv2\n'), ((3111, 3133), 'numpy.int32', 'np.int32', (['image_marker'], {}), '(image_marker)\n', (3119, 3133), True, 'import numpy as np\n'), ((3170, 3211), 'cv2.watershed', 'cv2.watershed', (['self.image', 'image_marker32'], {}), '(self.image, image_marker32)\n', (3183, 3211), False, 'import cv2\n'), ((3224, 3259), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['image_marker32'], {}), '(image_marker32)\n', (3243, 3259), False, 'import cv2\n'), ((3363, 3424), 'cv2.threshold', 'cv2.threshold', (['m', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(m, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (3376, 3424), False, 'import cv2\n'), ((3442, 3498), 'cv2.bitwise_and', 'cv2.bitwise_and', (['self.image', 'self.image'], {'mask': 'image_mask'}), '(self.image, self.image, mask=image_mask)\n', (3457, 3498), False, 'import cv2\n'), ((3734, 3768), 'cv2.imwrite', 'cv2.imwrite', (['self.save_name', 'image'], {}), '(self.save_name, image)\n', (3745, 3768), False, 'import cv2\n'), ((4016, 4049), 'os.listdir', 'os.listdir', (['f"""{base_dir}\\\\{dir_}"""'], {}), "(f'{base_dir}\\\\{dir_}')\n", (4026, 4049), False, 'import os\n'), ((4221, 4255), 'sys.stdout.write', 'sys.stdout.write', (["f'\\rDone {name}'"], {}), "(f'\\rDone {name}')\n", (4237, 4255), False, 'import sys\n'), ((4268, 4286), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4284, 4286), False, 'import sys\n')] |
import os, re
import json
from PIL import Image, ImageDraw
import PIL.ImageFile
import numpy as np
import scipy.ndimage
src = '/Volumes/CKXZ 1/@City/363, FP/Dataset(s)/sorted img_dataset/'
data = open('/Volumes/CKXZ 1/@City/363, FP/Dataset(s)/improved_facial_landmarks img_dataset_v1/facial_landmarks.json')
data = json.load(data)
dst = '/Volumes/CKXZ 1/@City/363, FP/Dataset(s)/aligned img_dataset'
def recreate_aligned_images(json_data=data, dst_dir=dst,
output_size=1024, transform_size=4096, enable_padding=True):
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
for idx, item in enumerate(json_data['ontheradar'].values()):
#print(item['filename'])
# Parse landmarks
lm = np.array(item['face_1_landmarks'])
lm_chin = lm[0 : 17] # left-right
lm_eyebrow_left = lm[17 : 22] # left-right
lm_eyebrow_right = lm[22 : 27] # left-right
lm_nose = lm[27 : 31] # top-down
lm_nostrils = lm[31 : 36] # top-down
lm_eye_left = lm[36 : 42] # left-clockwise
lm_eye_right = lm[42 : 48] # left-clockwise
lm_mouth_outer = lm[48 : 60] # left-clockwise
lm_mouth_inner = lm[60 : 68] #left-clockwise
# Calculate auxiliary vectors
eye_left = np.mean(lm_eye_left, axis=0)
eye_right = np.mean(lm_eye_right, axis=0)
eye_avg = (eye_left + eye_right) * 0.5
eye_to_eye = eye_right - eye_left
mouth_left = lm_mouth_outer[0]
mouth_right = lm_mouth_outer[6]
mouth_avg = (mouth_left + mouth_right) * 0.5
eye_to_mouth = mouth_avg - eye_avg
# Choose oriented crop rectangle
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1] # ??
x /= np.hypot(*x) # normalize
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8) # ??
y = np.flipud(x) * [-1, 1]
c = eye_avg + eye_to_mouth * 0.1 # (slightly) displaces eye_avg down the img frame: kind of defines the center of the face
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) # quadrilateral: pre-defines crop area
q_wdth = quad[3, 0] - quad[0, 0]
q_hth = quad[1, 1] - quad[0, 1]
#quad[0, 0] -= q_wdth
#quad[0, 3] += q_wdth
#quad[]
qsize = np.hypot(*x) * 2 # ??
# Load in-the-wild image
src_file = re.sub('/content/drive/My Drive/PIFuHD/SCULPTURES/', src, item['file_path'])
if not os.path.isfile(src_file):
print(f'Cannot find source image: {src_file}')
return
img = Image.open(src_file)
draw = ImageDraw.Draw(img)
draw.ellipse((eye_right[0] - 2, eye_right[1] - 2, eye_right[0] + 2, eye_right[1] + 2), fill=(255, 0, 0, 0))
draw.ellipse((eye_left[0] - 2, eye_left[1] - 2, eye_left[0] + 2, eye_left[1] + 2), fill=(255, 0, 0, 0))
draw.ellipse((eye_avg[0] - 2, eye_avg[1] - 2 , eye_avg[0] + 2, eye_avg[1] + 2), fill=(255, 0, 0, 0))
#img.show()
# Shrink
#shrink = int(np.floor(qsize / output_size * 0.5))
#if shrink > 1:
# print('/'.join(x for x in src_file.split('/')[-4:-2]) + '/' + src_file.split('/')[-1])
# rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
# img = img.resize(size=rsize, resample=Image.ANTIALIAS)
# quad /= shrink
# qsize /= shrink
#img.show()
# Crop
border = max(int(np.rint(qsize * 0.1)), 3)
crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), int(np.ceil(max(quad[:, 1]))))
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))
#crop = (int(np.floor(min(quad[:, 0]) - q_wdth)), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]) + q_wdth)), int(np.ceil(max(quad[:, 1]) + (6 * q_hth))))
#crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))
if crop[2] - crop [0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
img = img.crop(crop)
quad -= crop[0:2] # redefines quad within new (cropped) img's coordinates
#img.show()
# Pad
pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), int(np.ceil(max(quad[:, 1]))))
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))
if enable_padding and max(pad) > border - 4:
img.show()
print('/'.join(x for x in src_file.split('/')[-4:-2]) + '/' + src_file.split('/')[-1])
pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
h, w, _ = img.shape
y, x, _ = np.ogrid[:h, :w, :1]
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w-1-x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h-1-y) / pad[3]))
blur = qsize * 0.02
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)
img = Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
quad += pad[:2]
img.show()
# Transform
img = img.transform(size=(transform_size, transform_size), method=Image.QUAD, data=(quad + 0.5).flatten(), resample=Image.BILINEAR) # ??
img = img.transform(size=(transform_size, transform_size), method=Image.QUAD, data=(quad + 0.5).flatten(),
resample=Image.BILINEAR)
if output_size < transform_size:
img = img.resize((output_size, output_size), Image.ANTIALIAS)
img.show()
"""# Save aligned image
folder = item['folder']
rep = item['rep']
filename = item['filename']
dst_subdir = f'{dst_dir}/{folder}/{rep}'
if not os.path.exists(dst_subdir):
try:
os.mkdir(dst_subdir)
except FileNotFoundError:
os.mkdir('/'.join(dst_subdir.split('/')[0:-1]))
os.mkdir(dst_subdir)
img.save(os.path.join(dst_subdir, filename))"""
recreate_aligned_images() | [
"numpy.stack",
"os.mkdir",
"json.load",
"numpy.median",
"numpy.float32",
"os.path.exists",
"numpy.flipud",
"numpy.clip",
"PIL.Image.open",
"numpy.hypot",
"os.path.isfile",
"numpy.mean",
"numpy.array",
"numpy.rint",
"PIL.ImageDraw.Draw",
"re.sub"
] | [((316, 331), 'json.load', 'json.load', (['data'], {}), '(data)\n', (325, 331), False, 'import json\n'), ((535, 558), 'os.path.exists', 'os.path.exists', (['dst_dir'], {}), '(dst_dir)\n', (549, 558), False, 'import os, re\n'), ((562, 579), 'os.mkdir', 'os.mkdir', (['dst_dir'], {}), '(dst_dir)\n', (570, 579), False, 'import os, re\n'), ((700, 734), 'numpy.array', 'np.array', (["item['face_1_landmarks']"], {}), "(item['face_1_landmarks'])\n", (708, 734), True, 'import numpy as np\n'), ((1168, 1196), 'numpy.mean', 'np.mean', (['lm_eye_left'], {'axis': '(0)'}), '(lm_eye_left, axis=0)\n', (1175, 1196), True, 'import numpy as np\n'), ((1211, 1240), 'numpy.mean', 'np.mean', (['lm_eye_right'], {'axis': '(0)'}), '(lm_eye_right, axis=0)\n', (1218, 1240), True, 'import numpy as np\n'), ((1570, 1582), 'numpy.hypot', 'np.hypot', (['*x'], {}), '(*x)\n', (1578, 1582), True, 'import numpy as np\n'), ((1835, 1889), 'numpy.stack', 'np.stack', (['[c - x - y, c - x + y, c + x + y, c + x - y]'], {}), '([c - x - y, c - x + y, c + x + y, c + x - y])\n', (1843, 1889), True, 'import numpy as np\n'), ((2131, 2207), 're.sub', 're.sub', (['"""/content/drive/My Drive/PIFuHD/SCULPTURES/"""', 'src', "item['file_path']"], {}), "('/content/drive/My Drive/PIFuHD/SCULPTURES/', src, item['file_path'])\n", (2137, 2207), False, 'import os, re\n'), ((2312, 2332), 'PIL.Image.open', 'Image.open', (['src_file'], {}), '(src_file)\n', (2322, 2332), False, 'from PIL import Image, ImageDraw\n'), ((2343, 2362), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (2357, 2362), False, 'from PIL import Image, ImageDraw\n'), ((1677, 1689), 'numpy.flipud', 'np.flipud', (['x'], {}), '(x)\n', (1686, 1689), True, 'import numpy as np\n'), ((2067, 2079), 'numpy.hypot', 'np.hypot', (['*x'], {}), '(*x)\n', (2075, 2079), True, 'import numpy as np\n'), ((2218, 2242), 'os.path.isfile', 'os.path.isfile', (['src_file'], {}), '(src_file)\n', (2232, 2242), False, 'import os, re\n'), ((1524, 1547), 'numpy.flipud', 'np.flipud', (['eye_to_mouth'], {}), '(eye_to_mouth)\n', (1533, 1547), True, 'import numpy as np\n'), ((1606, 1627), 'numpy.hypot', 'np.hypot', (['*eye_to_eye'], {}), '(*eye_to_eye)\n', (1614, 1627), True, 'import numpy as np\n'), ((1635, 1658), 'numpy.hypot', 'np.hypot', (['*eye_to_mouth'], {}), '(*eye_to_mouth)\n', (1643, 1658), True, 'import numpy as np\n'), ((3112, 3132), 'numpy.rint', 'np.rint', (['(qsize * 0.1)'], {}), '(qsize * 0.1)\n', (3119, 3132), True, 'import numpy as np\n'), ((4406, 4421), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (4416, 4421), True, 'import numpy as np\n'), ((4793, 4828), 'numpy.clip', 'np.clip', (['(mask * 3.0 + 1.0)', '(0.0)', '(1.0)'], {}), '(mask * 3.0 + 1.0, 0.0, 1.0)\n', (4800, 4828), True, 'import numpy as np\n'), ((4876, 4899), 'numpy.clip', 'np.clip', (['mask', '(0.0)', '(1.0)'], {}), '(mask, 0.0, 1.0)\n', (4883, 4899), True, 'import numpy as np\n'), ((4367, 4387), 'numpy.rint', 'np.rint', (['(qsize * 0.3)'], {}), '(qsize * 0.3)\n', (4374, 4387), True, 'import numpy as np\n'), ((4840, 4867), 'numpy.median', 'np.median', (['img'], {'axis': '(0, 1)'}), '(img, axis=(0, 1))\n', (4849, 4867), True, 'import numpy as np\n'), ((4942, 4954), 'numpy.rint', 'np.rint', (['img'], {}), '(img)\n', (4949, 4954), True, 'import numpy as np\n'), ((4575, 4588), 'numpy.float32', 'np.float32', (['x'], {}), '(x)\n', (4585, 4588), True, 'import numpy as np\n'), ((4599, 4620), 'numpy.float32', 'np.float32', (['(w - 1 - x)'], {}), '(w - 1 - x)\n', (4609, 4620), True, 'import numpy as np\n'), ((4645, 4658), 'numpy.float32', 'np.float32', (['y'], {}), '(y)\n', (4655, 4658), True, 'import numpy as np\n'), ((4669, 4690), 'numpy.float32', 'np.float32', (['(h - 1 - y)'], {}), '(h - 1 - y)\n', (4679, 4690), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 18 15:31:36 2017
@author: mirla
"""
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#%%
# AND
entradas = np.array([[0,0],[0,1], [1,0], [1,1]])
saidas = np.array([0,0,0,1])
# OR
#entradas = np.array([[0,0],[0,1], [1,0], [1,1]])
#saidas = np.array([0,1,1,1])
d = np.zeros([4])
pesos = np.array([0.0, 0.0])
taxaAprendizagem = 0.1
b=0
iteracoes = 10
erros = np.zeros([4])
erros_iter = np.zeros([iteracoes,4]) #10x4
#%%
# função de ativação: step function
def stepFunction(soma):
if (soma >= 1):
return 1
return 0
#Função para gerar a decision boundary
def gerar_espaco(pesos,b):
pixels = 100
eixo_x = np.arange(-0.5, 1.8, (1.8 + 0.5)/pixels)
eixo_y = np.arange(-0.5, 1.8, (1.8 + 0.5)/pixels)
xx, yy = np.meshgrid(eixo_x, eixo_y)
pontos = np.c_[xx.ravel(), yy.ravel()]
Z = np.zeros([pontos.shape[0]])
for i in range(Z.shape[0]):
U = pontos[i].dot(pesos)
Z[i] = stepFunction(U+b)
Z = Z.reshape(xx.shape)
return xx,yy,Z
# função de subplots
fig = plt.figure(figsize=(15, 5))
def sub_plots(d,pesos,n,b):
xx, yy, Z = gerar_espaco(pesos,b)
plt.contourf(xx, yy, Z, alpha=0.3)
sns.scatterplot(x=[0,0,1,1], y=[0,1,0,1], hue=d, s=90)
plt.xlim(-0.15,1.2)
plt.ylim(-0.15,1.2)
plt.title(str(n+1) + '° Iteração ')
#%%
# treinamento
for it in range(iteracoes):
for i in range(entradas.shape[0]):
U = entradas[i].dot(pesos)
d[i] = stepFunction(U+b)
erros[i] = saidas[i] - d[i]
for j in range(pesos.shape[0]):
pesos[j] = pesos[j] + (taxaAprendizagem * entradas[i][j] * erros[i])
b += taxaAprendizagem*erros[i]
fig.add_subplot(2,5,it+1)
sub_plots(d,pesos,it,b)
plt.tight_layout()
##salvar os erros por iteração
erros_iter[it] = erros
plt.show()
#%%
# Plot de erros por iteração
fig = plt.figure(figsize=(15, 3))
for i in range(iteracoes):
fig.add_subplot(2,5,i+1)
plt.plot(['[0,0]','[0,1]','[1,0]','[1,1]'],erros_iter[i])
plt.title(str(i+1) + '° Iteração ')
plt.tight_layout()
plt.show()
#%%
aaa = entradas[3].dot(pesos) | [
"matplotlib.pyplot.xlim",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"seaborn.scatterplot",
"matplotlib.pyplot.ylim",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.tight_layout"
] | [((191, 233), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [1, 0], [1, 1]]'], {}), '([[0, 0], [0, 1], [1, 0], [1, 1]])\n', (199, 233), True, 'import numpy as np\n'), ((239, 261), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (247, 261), True, 'import numpy as np\n'), ((354, 367), 'numpy.zeros', 'np.zeros', (['[4]'], {}), '([4])\n', (362, 367), True, 'import numpy as np\n'), ((377, 397), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (385, 397), True, 'import numpy as np\n'), ((456, 469), 'numpy.zeros', 'np.zeros', (['[4]'], {}), '([4])\n', (464, 469), True, 'import numpy as np\n'), ((484, 508), 'numpy.zeros', 'np.zeros', (['[iteracoes, 4]'], {}), '([iteracoes, 4])\n', (492, 508), True, 'import numpy as np\n'), ((1144, 1171), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (1154, 1171), True, 'import matplotlib.pyplot as plt\n'), ((1951, 1961), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1959, 1961), True, 'import matplotlib.pyplot as plt\n'), ((2006, 2033), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 3)'}), '(figsize=(15, 3))\n', (2016, 2033), True, 'import matplotlib.pyplot as plt\n'), ((2221, 2231), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2229, 2231), True, 'import matplotlib.pyplot as plt\n'), ((738, 780), 'numpy.arange', 'np.arange', (['(-0.5)', '(1.8)', '((1.8 + 0.5) / pixels)'], {}), '(-0.5, 1.8, (1.8 + 0.5) / pixels)\n', (747, 780), True, 'import numpy as np\n'), ((793, 835), 'numpy.arange', 'np.arange', (['(-0.5)', '(1.8)', '((1.8 + 0.5) / pixels)'], {}), '(-0.5, 1.8, (1.8 + 0.5) / pixels)\n', (802, 835), True, 'import numpy as np\n'), ((848, 875), 'numpy.meshgrid', 'np.meshgrid', (['eixo_x', 'eixo_y'], {}), '(eixo_x, eixo_y)\n', (859, 875), True, 'import numpy as np\n'), ((935, 962), 'numpy.zeros', 'np.zeros', (['[pontos.shape[0]]'], {}), '([pontos.shape[0]])\n', (943, 962), True, 'import numpy as np\n'), ((1247, 1281), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx', 'yy', 'Z'], {'alpha': '(0.3)'}), '(xx, yy, Z, alpha=0.3)\n', (1259, 1281), True, 'import matplotlib.pyplot as plt\n'), ((1287, 1347), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': '[0, 0, 1, 1]', 'y': '[0, 1, 0, 1]', 'hue': 'd', 's': '(90)'}), '(x=[0, 0, 1, 1], y=[0, 1, 0, 1], hue=d, s=90)\n', (1302, 1347), True, 'import seaborn as sns\n'), ((1351, 1371), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.15)', '(1.2)'], {}), '(-0.15, 1.2)\n', (1359, 1371), True, 'import matplotlib.pyplot as plt\n'), ((1376, 1396), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.15)', '(1.2)'], {}), '(-0.15, 1.2)\n', (1384, 1396), True, 'import matplotlib.pyplot as plt\n'), ((1867, 1885), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1883, 1885), True, 'import matplotlib.pyplot as plt\n'), ((2097, 2158), 'matplotlib.pyplot.plot', 'plt.plot', (["['[0,0]', '[0,1]', '[1,0]', '[1,1]']", 'erros_iter[i]'], {}), "(['[0,0]', '[0,1]', '[1,0]', '[1,1]'], erros_iter[i])\n", (2105, 2158), True, 'import matplotlib.pyplot as plt\n'), ((2201, 2219), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2217, 2219), True, 'import matplotlib.pyplot as plt\n')] |
"""
1. Used skills not mentioned in zero shot paper:
1. random crop, fill_mode='reflect'
2. horizontal_flip
3. nesterov momentum
"""
import os
import sys
import math
import numpy as np
import tensorflow as tf
from utils.preprocess import get_cifar10_data
from utils.preprocess import get_fashion_mnist_data
from utils.preprocess import to_categorical
from utils.preprocess import balance_sampling
from utils.seed import set_seed
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.keras.callbacks import CSVLogger
from tensorflow.keras.optimizers.schedules import PiecewiseConstantDecay
from net.wide_resnet import WideResidualNetwork
import argparse
from tensorflow.keras.models import load_model
class Config:
"""
Static config
"""
batch_size = 128
# We need to have 80k iterations for cifar 10
epochs = 205 # math.ceil(80000 * batch_size / 50000)
momentum = 0.9
weight_decay = 5e-4
init_lr = 0.1
n_data_per_class = 5000
def lr_schedule(epoch):
"""
Although we do not change parameters, hard coding is a very bad habbit.
# of operations < 20 is negligible when we have 30s per epoch.
"""
init_lr = Config.init_lr
fraction = epoch / Config.epochs
if fraction >= 0.8:
return init_lr * (0.2**3)
elif fraction >= 0.6:
return init_lr * (0.2**2)
elif fraction >= 0.3:
return init_lr * 0.2
return 0.1
def mkdir(dirname):
save_dir = os.path.join(os.getcwd(), dirname)
os.makedirs(save_dir, exist_ok=True)
def train(depth, width, seed=42, data_per_class=-1, dataset='cifar10', savedir='saved_models', is_continue=False):
set_seed(seed)
# Load data
if dataset == 'cifar10':
# TODO: sampling for Fig2 green line
(x_train, y_train_lbl), (x_test, y_test_lbl) = get_cifar10_data()
# x_train, y_train_lbl = balance_sampling(x_train, y_train_lbl, data_per_class=200)
shape = (32, 32, 3)
classes = 10
elif dataset == 'fashion_mnist':
(x_train, y_train_lbl), (x_test, y_test_lbl) = get_fashion_mnist_data()
shape = (32, 32, 1)
classes = 10
else:
raise NotImplementedError("TODO: SVHN")
# ====================================================================
# make sampling
if data_per_class > 0:
# sample
x_train_sample, y_train_lbl_sample = \
balance_sampling(x_train, y_train_lbl, data_per_class=data_per_class)
# repeat the sampled data to be as large as the full data set for convienient
x_train = np.repeat(x_train_sample, Config.n_data_per_class/data_per_class, axis=0)
y_train_lbl = np.repeat(y_train_lbl_sample, Config.n_data_per_class/data_per_class, axis=0)
# ====================================================================
# To one-hot
y_train = to_categorical(y_train_lbl)
y_test = to_categorical(y_test_lbl)
# Setup model
model_type = 'WRN-%d-%d-seed%d' % (depth, width, seed)
wrn_model = WideResidualNetwork(
depth, width, classes=classes, input_shape=shape,
weight_decay=Config.weight_decay)
# Prepare model model saving directory.
save_dir = os.path.join(os.getcwd(), savedir)
mkdir(save_dir)
# Set up model name and path
model_name = '%s_%s_model.{epoch:03d}.h5' % (dataset, model_type)
model_filepath = os.path.join(save_dir, model_name)
# set up log file
log_fname = '{}-wrn-{}-{}-seed{}_log.csv'.format(dataset, depth, width, seed)
log_filepath = os.path.join(save_dir, log_fname)
# =================================================================
if is_continue:
for i in range(Config.epochs, 0, -1):
fname = model_filepath.format(epoch=i)
if os.path.isfile(fname):
print("Using ", fname, " as the save point.")
break
if i <= 1:
raise RuntimeError("Cannot continue the training")
# ======================================================
initial_epoch = i
wrn_model = load_model(fname)
is_log_append = True
else:
initial_epoch = 0
# compile model
optim = SGD(learning_rate=lr_schedule(initial_epoch),
momentum=Config.momentum,
decay=0.0,
nesterov=True
)
wrn_model.compile(loss='categorical_crossentropy',
optimizer=optim,
metrics=['accuracy'])
is_log_append = False
logger = CSVLogger(filename=log_filepath, separator=',', append=is_log_append)
# Prepare callbacks for model saving and for learning rate adjustment.
lr_scheduler = LearningRateScheduler(lr_schedule)
checkpointer = ModelCheckpoint(filepath=model_filepath,
monitor='val_acc',
verbose=1,
save_best_only=True
)
callbacks = [lr_scheduler, checkpointer, logger]
datagen = ImageDataGenerator(
width_shift_range=4,
height_shift_range=4,
horizontal_flip=True,
vertical_flip=False,
rescale=None,
fill_mode='reflect',
)
datagen.fit(x_train)
wrn_model.fit_generator(
datagen.flow(x_train, y_train, batch_size=Config.batch_size, shuffle=True),
validation_data=(x_test, y_test),
epochs=Config.epochs,
initial_epoch=initial_epoch,
verbose=1,
callbacks=callbacks)
scores = wrn_model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# =================================================
# use the final one as teachers
wrn_model.save(model_filepath.format(epoch=Config.epochs-1))
def get_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--width', type=int, required=True)
parser.add_argument('-d', '--depth', type=int, required=True)
parser.add_argument('-m', '--sample_per_class', type=int, default=-1)
parser.add_argument('--savedir', type=str, default='savedir')
parser.add_argument('--dataset', type=str, default='cifar10')
parser.add_argument('--seed', type=int, default=10)
parser.add_argument('--continue', dest='cont', action='store_true')
return parser
if __name__ == '__main__':
parser = get_arg_parser()
args = parser.parse_args()
train(args.depth, args.width, seed=args.seed,
data_per_class=args.sample_per_class,
savedir=args.savedir, is_continue=args.cont)
| [
"net.wide_resnet.WideResidualNetwork",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.models.load_model",
"os.makedirs",
"argparse.ArgumentParser",
"os.getcwd",
"utils.preprocess.to_categorical",
"utils.preprocess.get_cifar10_data",
"tensorflow.keras.callbacks.ModelChec... | [((1684, 1720), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (1695, 1720), False, 'import os\n'), ((1843, 1857), 'utils.seed.set_seed', 'set_seed', (['seed'], {}), '(seed)\n', (1851, 1857), False, 'from utils.seed import set_seed\n'), ((3041, 3068), 'utils.preprocess.to_categorical', 'to_categorical', (['y_train_lbl'], {}), '(y_train_lbl)\n', (3055, 3068), False, 'from utils.preprocess import to_categorical\n'), ((3082, 3108), 'utils.preprocess.to_categorical', 'to_categorical', (['y_test_lbl'], {}), '(y_test_lbl)\n', (3096, 3108), False, 'from utils.preprocess import to_categorical\n'), ((3203, 3310), 'net.wide_resnet.WideResidualNetwork', 'WideResidualNetwork', (['depth', 'width'], {'classes': 'classes', 'input_shape': 'shape', 'weight_decay': 'Config.weight_decay'}), '(depth, width, classes=classes, input_shape=shape,\n weight_decay=Config.weight_decay)\n', (3222, 3310), False, 'from net.wide_resnet import WideResidualNetwork\n'), ((3572, 3606), 'os.path.join', 'os.path.join', (['save_dir', 'model_name'], {}), '(save_dir, model_name)\n', (3584, 3606), False, 'import os\n'), ((3731, 3764), 'os.path.join', 'os.path.join', (['save_dir', 'log_fname'], {}), '(save_dir, log_fname)\n', (3743, 3764), False, 'import os\n'), ((4762, 4831), 'tensorflow.keras.callbacks.CSVLogger', 'CSVLogger', ([], {'filename': 'log_filepath', 'separator': '""","""', 'append': 'is_log_append'}), "(filename=log_filepath, separator=',', append=is_log_append)\n", (4771, 4831), False, 'from tensorflow.keras.callbacks import CSVLogger\n'), ((4927, 4961), 'tensorflow.keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', (['lr_schedule'], {}), '(lr_schedule)\n', (4948, 4961), False, 'from tensorflow.keras.callbacks import LearningRateScheduler\n'), ((4981, 5076), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'model_filepath', 'monitor': '"""val_acc"""', 'verbose': '(1)', 'save_best_only': '(True)'}), "(filepath=model_filepath, monitor='val_acc', verbose=1,\n save_best_only=True)\n", (4996, 5076), False, 'from tensorflow.keras.callbacks import ModelCheckpoint\n'), ((5283, 5431), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'width_shift_range': '(4)', 'height_shift_range': '(4)', 'horizontal_flip': '(True)', 'vertical_flip': '(False)', 'rescale': 'None', 'fill_mode': '"""reflect"""'}), "(width_shift_range=4, height_shift_range=4,\n horizontal_flip=True, vertical_flip=False, rescale=None, fill_mode=\n 'reflect')\n", (5301, 5431), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((6135, 6160), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6158, 6160), False, 'import argparse\n'), ((1658, 1669), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1667, 1669), False, 'import os\n'), ((2004, 2022), 'utils.preprocess.get_cifar10_data', 'get_cifar10_data', ([], {}), '()\n', (2020, 2022), False, 'from utils.preprocess import get_cifar10_data\n'), ((2586, 2655), 'utils.preprocess.balance_sampling', 'balance_sampling', (['x_train', 'y_train_lbl'], {'data_per_class': 'data_per_class'}), '(x_train, y_train_lbl, data_per_class=data_per_class)\n', (2602, 2655), False, 'from utils.preprocess import balance_sampling\n'), ((2761, 2836), 'numpy.repeat', 'np.repeat', (['x_train_sample', '(Config.n_data_per_class / data_per_class)'], {'axis': '(0)'}), '(x_train_sample, Config.n_data_per_class / data_per_class, axis=0)\n', (2770, 2836), True, 'import numpy as np\n'), ((2857, 2936), 'numpy.repeat', 'np.repeat', (['y_train_lbl_sample', '(Config.n_data_per_class / data_per_class)'], {'axis': '(0)'}), '(y_train_lbl_sample, Config.n_data_per_class / data_per_class, axis=0)\n', (2866, 2936), True, 'import numpy as np\n'), ((3405, 3416), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3414, 3416), False, 'import os\n'), ((4269, 4286), 'tensorflow.keras.models.load_model', 'load_model', (['fname'], {}), '(fname)\n', (4279, 4286), False, 'from tensorflow.keras.models import load_model\n'), ((2256, 2280), 'utils.preprocess.get_fashion_mnist_data', 'get_fashion_mnist_data', ([], {}), '()\n', (2278, 2280), False, 'from utils.preprocess import get_fashion_mnist_data\n'), ((3969, 3990), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (3983, 3990), False, 'import os\n')] |
import random
import numpy as np
from codit.outbreak import Outbreak
from codit.outbreak_recorder import VariantComponent
from codit.society import TestingTracingSociety, ContactTestingSociety
from codit.society.strategic import TwoTrackTester
from codit.disease import Covid
ALL_TIME_DAYS = 15
def test_covid_model():
s = TestingTracingSociety(episodes_per_day=2, config=dict(PROB_TEST_IF_REQUESTED=0.4))
random.seed(42)
np.random.seed(42)
o = Outbreak(s, Covid(), pop_size=8, seed_size=1, n_days=ALL_TIME_DAYS)
o.simulate()
# for k, v in o.pop.contacts.items():
# print(k, len(v))
# t cov risks tests isol
np.testing.assert_allclose(o.recorder.main_component.story[:15], [[0.5, 0.25, 0.125, 0.0, 0.125, 0.0],
[1.0, 0.25, 0.125, 0.25, 0.0, 0.0],
[1.5, 0.25, 0.125, 0.0, 0.0, 0.0],
[2.0, 0.375, 0.125, 0.0, 0.0, 0.0],
[2.5, 0.375, 0.125, 0.0, 0.0, 0.0],
[3.0, 0.375, 0.125, 0.0, 0.0, 0.0],
[3.5, 0.375, 0.125, 0.0, 0.0, 0.0],
[4.0, 0.375, 0.125, 0.0, 0.0, 0.125],
[4.5, 0.375, 0.25, 0.0, 0.0, 0.125],
[5.0, 0.5, 0.25, 0.0, 0.0, 0.125],
[5.5, 0.5, 0.125, 0.0, 0.0, 0.125],
[6.0, 0.5, 0.25, 0.0, 0.0, 0.125],
[6.5, 0.5, 0.25, 0.0, 0.0, 0.125],
[7.0, 0.5, 0.25, 0.0, 0.0, 0.125],
[7.5, 0.5, 0.25, 0.0, 0.0, 0.125]])
def test_contact_testing():
s = ContactTestingSociety(episodes_per_day=2)
random.seed(42)
np.random.seed(42)
o = Outbreak(s, Covid(), pop_size=8, seed_size=1, n_days=ALL_TIME_DAYS)
o.simulate()
# for k, v in o.pop.contacts.items():
# print(k, len(v))
# t cov risks tests isol
np.testing.assert_allclose(o.recorder.main_component.story[:15], [[0.5, 0.25, 0.125, 0.0, 0.125, 0.0],
[1.0, 0.25, 0.125, 0.0, 0.125, 0.0],
[1.5, 0.25, 0.125, 0.0, 0.125, 0.0],
[2.0, 0.25, 0.125, 0.0, 0.125, 0.0],
[2.5, 0.25, 0.125, 0.0, 0.125, 0.0],
[3.0, 0.25, 0.125, 0.0, 0.125, 0.0],
[3.5, 0.25, 0.125, 0.0, 0.125, 0.0],
[4.0, 0.25, 0.125, 0.0, 0.125, 0.125],
[4.5, 0.25, 0.25, 0.0, 0.125, 0.125],
[5.0, 0.375, 0.25, 0.0, 0.125, 0.125],
[5.5, 0.375, 0.125, 0.0, 0.125, 0.125],
[6.0, 0.375, 0.125, 0.0, 0.125, 0.125],
[6.5, 0.375, 0.125, 0.0, 0.125, 0.125],
[7.0, 0.375, 0.125, 0.0, 0.125, 0.125],
[7.5, 0.375, 0.125, 0.0, 0.125, 0.125]])
def test_two_track_model():
s = TwoTrackTester(episodes_per_day=2)
random.seed(42)
np.random.seed(42)
o = Outbreak(s, Covid(), pop_size=8, seed_size=1, n_days=ALL_TIME_DAYS)
o.simulate()
# for k, v in o.pop.contacts.items():
# print(k, len(v))
# t cov risks tests tests_back isol
np.testing.assert_allclose(o.recorder.main_component.story[:15], [[0.5, 0.25, 0.125, 0.0, 0.125, 0.0],
[1.0, 0.25, 0.125, 0.0, 0.125, 0.0],
[1.5, 0.25, 0.125, 0.0, 0.125, 0.0],
[2.0, 0.25, 0.125, 0.0, 0.125, 0.0],
[2.5, 0.25, 0.125, 0.0, 0.125, 0.0],
[3.0, 0.25, 0.125, 0.0, 0.125, 0.0],
[3.5, 0.25, 0.125, 0.0, 0.125, 0.0],
[4.0, 0.375, 0.125, 0.0, 0.125, 0.0],
[4.5, 0.375, 0.25, 0.0, 0.125, 0.0],
[5.0, 0.5, 0.25, 0.0, 0.125, 0.0],
[5.5, 0.5, 0.125, 0.0, 0.125, 0.0],
[6.0, 0.5, 0.125, 0.0, 0.125, 0.0],
[6.5, 0.5, 0.125, 0.0, 0.125, 0.0],
[7.0, 0.5, 0.125, 0.0, 0.125, 0.0],
[7.5, 0.5, 0.125, 0.0, 0.125, 0.0]])
def test_recorder_components():
s = TwoTrackTester(episodes_per_day=2)
o = Outbreak(s, Covid(), pop_size=8, seed_size=1, n_days=ALL_TIME_DAYS, show_heatmap=True)
o.recorder.add_component(VariantComponent())
o.simulate()
| [
"codit.outbreak_recorder.VariantComponent",
"codit.society.ContactTestingSociety",
"numpy.random.seed",
"codit.society.strategic.TwoTrackTester",
"codit.disease.Covid",
"random.seed",
"numpy.testing.assert_allclose"
] | [((418, 433), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (429, 433), False, 'import random\n'), ((438, 456), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (452, 456), True, 'import numpy as np\n'), ((692, 1332), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['o.recorder.main_component.story[:15]', '[[0.5, 0.25, 0.125, 0.0, 0.125, 0.0], [1.0, 0.25, 0.125, 0.25, 0.0, 0.0], [\n 1.5, 0.25, 0.125, 0.0, 0.0, 0.0], [2.0, 0.375, 0.125, 0.0, 0.0, 0.0], [\n 2.5, 0.375, 0.125, 0.0, 0.0, 0.0], [3.0, 0.375, 0.125, 0.0, 0.0, 0.0],\n [3.5, 0.375, 0.125, 0.0, 0.0, 0.0], [4.0, 0.375, 0.125, 0.0, 0.0, 0.125\n ], [4.5, 0.375, 0.25, 0.0, 0.0, 0.125], [5.0, 0.5, 0.25, 0.0, 0.0, \n 0.125], [5.5, 0.5, 0.125, 0.0, 0.0, 0.125], [6.0, 0.5, 0.25, 0.0, 0.0, \n 0.125], [6.5, 0.5, 0.25, 0.0, 0.0, 0.125], [7.0, 0.5, 0.25, 0.0, 0.0, \n 0.125], [7.5, 0.5, 0.25, 0.0, 0.0, 0.125]]'], {}), '(o.recorder.main_component.story[:15], [[0.5, \n 0.25, 0.125, 0.0, 0.125, 0.0], [1.0, 0.25, 0.125, 0.25, 0.0, 0.0], [1.5,\n 0.25, 0.125, 0.0, 0.0, 0.0], [2.0, 0.375, 0.125, 0.0, 0.0, 0.0], [2.5, \n 0.375, 0.125, 0.0, 0.0, 0.0], [3.0, 0.375, 0.125, 0.0, 0.0, 0.0], [3.5,\n 0.375, 0.125, 0.0, 0.0, 0.0], [4.0, 0.375, 0.125, 0.0, 0.0, 0.125], [\n 4.5, 0.375, 0.25, 0.0, 0.0, 0.125], [5.0, 0.5, 0.25, 0.0, 0.0, 0.125],\n [5.5, 0.5, 0.125, 0.0, 0.0, 0.125], [6.0, 0.5, 0.25, 0.0, 0.0, 0.125],\n [6.5, 0.5, 0.25, 0.0, 0.0, 0.125], [7.0, 0.5, 0.25, 0.0, 0.0, 0.125], [\n 7.5, 0.5, 0.25, 0.0, 0.0, 0.125]])\n', (718, 1332), True, 'import numpy as np\n'), ((2105, 2146), 'codit.society.ContactTestingSociety', 'ContactTestingSociety', ([], {'episodes_per_day': '(2)'}), '(episodes_per_day=2)\n', (2126, 2146), False, 'from codit.society import TestingTracingSociety, ContactTestingSociety\n'), ((2151, 2166), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (2162, 2166), False, 'import random\n'), ((2171, 2189), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (2185, 2189), True, 'import numpy as np\n'), ((2425, 3104), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['o.recorder.main_component.story[:15]', '[[0.5, 0.25, 0.125, 0.0, 0.125, 0.0], [1.0, 0.25, 0.125, 0.0, 0.125, 0.0],\n [1.5, 0.25, 0.125, 0.0, 0.125, 0.0], [2.0, 0.25, 0.125, 0.0, 0.125, 0.0\n ], [2.5, 0.25, 0.125, 0.0, 0.125, 0.0], [3.0, 0.25, 0.125, 0.0, 0.125, \n 0.0], [3.5, 0.25, 0.125, 0.0, 0.125, 0.0], [4.0, 0.25, 0.125, 0.0, \n 0.125, 0.125], [4.5, 0.25, 0.25, 0.0, 0.125, 0.125], [5.0, 0.375, 0.25,\n 0.0, 0.125, 0.125], [5.5, 0.375, 0.125, 0.0, 0.125, 0.125], [6.0, 0.375,\n 0.125, 0.0, 0.125, 0.125], [6.5, 0.375, 0.125, 0.0, 0.125, 0.125], [7.0,\n 0.375, 0.125, 0.0, 0.125, 0.125], [7.5, 0.375, 0.125, 0.0, 0.125, 0.125]]'], {}), '(o.recorder.main_component.story[:15], [[0.5, \n 0.25, 0.125, 0.0, 0.125, 0.0], [1.0, 0.25, 0.125, 0.0, 0.125, 0.0], [\n 1.5, 0.25, 0.125, 0.0, 0.125, 0.0], [2.0, 0.25, 0.125, 0.0, 0.125, 0.0],\n [2.5, 0.25, 0.125, 0.0, 0.125, 0.0], [3.0, 0.25, 0.125, 0.0, 0.125, 0.0\n ], [3.5, 0.25, 0.125, 0.0, 0.125, 0.0], [4.0, 0.25, 0.125, 0.0, 0.125, \n 0.125], [4.5, 0.25, 0.25, 0.0, 0.125, 0.125], [5.0, 0.375, 0.25, 0.0, \n 0.125, 0.125], [5.5, 0.375, 0.125, 0.0, 0.125, 0.125], [6.0, 0.375, \n 0.125, 0.0, 0.125, 0.125], [6.5, 0.375, 0.125, 0.0, 0.125, 0.125], [7.0,\n 0.375, 0.125, 0.0, 0.125, 0.125], [7.5, 0.375, 0.125, 0.0, 0.125, 0.125]])\n', (2451, 3104), True, 'import numpy as np\n'), ((3875, 3909), 'codit.society.strategic.TwoTrackTester', 'TwoTrackTester', ([], {'episodes_per_day': '(2)'}), '(episodes_per_day=2)\n', (3889, 3909), False, 'from codit.society.strategic import TwoTrackTester\n'), ((3914, 3929), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (3925, 3929), False, 'import random\n'), ((3934, 3952), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (3948, 3952), True, 'import numpy as np\n'), ((4199, 4849), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['o.recorder.main_component.story[:15]', '[[0.5, 0.25, 0.125, 0.0, 0.125, 0.0], [1.0, 0.25, 0.125, 0.0, 0.125, 0.0],\n [1.5, 0.25, 0.125, 0.0, 0.125, 0.0], [2.0, 0.25, 0.125, 0.0, 0.125, 0.0\n ], [2.5, 0.25, 0.125, 0.0, 0.125, 0.0], [3.0, 0.25, 0.125, 0.0, 0.125, \n 0.0], [3.5, 0.25, 0.125, 0.0, 0.125, 0.0], [4.0, 0.375, 0.125, 0.0, \n 0.125, 0.0], [4.5, 0.375, 0.25, 0.0, 0.125, 0.0], [5.0, 0.5, 0.25, 0.0,\n 0.125, 0.0], [5.5, 0.5, 0.125, 0.0, 0.125, 0.0], [6.0, 0.5, 0.125, 0.0,\n 0.125, 0.0], [6.5, 0.5, 0.125, 0.0, 0.125, 0.0], [7.0, 0.5, 0.125, 0.0,\n 0.125, 0.0], [7.5, 0.5, 0.125, 0.0, 0.125, 0.0]]'], {}), '(o.recorder.main_component.story[:15], [[0.5, \n 0.25, 0.125, 0.0, 0.125, 0.0], [1.0, 0.25, 0.125, 0.0, 0.125, 0.0], [\n 1.5, 0.25, 0.125, 0.0, 0.125, 0.0], [2.0, 0.25, 0.125, 0.0, 0.125, 0.0],\n [2.5, 0.25, 0.125, 0.0, 0.125, 0.0], [3.0, 0.25, 0.125, 0.0, 0.125, 0.0\n ], [3.5, 0.25, 0.125, 0.0, 0.125, 0.0], [4.0, 0.375, 0.125, 0.0, 0.125,\n 0.0], [4.5, 0.375, 0.25, 0.0, 0.125, 0.0], [5.0, 0.5, 0.25, 0.0, 0.125,\n 0.0], [5.5, 0.5, 0.125, 0.0, 0.125, 0.0], [6.0, 0.5, 0.125, 0.0, 0.125,\n 0.0], [6.5, 0.5, 0.125, 0.0, 0.125, 0.0], [7.0, 0.5, 0.125, 0.0, 0.125,\n 0.0], [7.5, 0.5, 0.125, 0.0, 0.125, 0.0]])\n', (4225, 4849), True, 'import numpy as np\n'), ((5627, 5661), 'codit.society.strategic.TwoTrackTester', 'TwoTrackTester', ([], {'episodes_per_day': '(2)'}), '(episodes_per_day=2)\n', (5641, 5661), False, 'from codit.society.strategic import TwoTrackTester\n'), ((477, 484), 'codit.disease.Covid', 'Covid', ([], {}), '()\n', (482, 484), False, 'from codit.disease import Covid\n'), ((2210, 2217), 'codit.disease.Covid', 'Covid', ([], {}), '()\n', (2215, 2217), False, 'from codit.disease import Covid\n'), ((3973, 3980), 'codit.disease.Covid', 'Covid', ([], {}), '()\n', (3978, 3980), False, 'from codit.disease import Covid\n'), ((5682, 5689), 'codit.disease.Covid', 'Covid', ([], {}), '()\n', (5687, 5689), False, 'from codit.disease import Covid\n'), ((5786, 5804), 'codit.outbreak_recorder.VariantComponent', 'VariantComponent', ([], {}), '()\n', (5802, 5804), False, 'from codit.outbreak_recorder import VariantComponent\n')] |
"""
Copyright (c) 2022-2022 Blue Brain Project/EPFL
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import numpy as np
from vascpy.utils.adjacency import AdjacencyMatrix
DISTANCE_FACTOR = 10.0
L = logging.getLogger(__name__)
def curate_point_graph(
point_graph,
remove_self_loops=False,
remove_very_long_edges=False,
remove_high_degree_vertices=False,
remove_isolated_vertices=False,
):
"""
Points and edges curation
"""
points, edges = point_graph.points, point_graph.edges
edges_to_keep = np.ones(len(edges), dtype=bool)
vertices_to_keep = np.ones(len(points), dtype=bool)
if remove_very_long_edges:
edges_to_keep &= _edges_shorter_than(points, edges, DISTANCE_FACTOR)
if remove_self_loops:
edges_to_keep &= _edges_no_self_loops(edges)
if remove_high_degree_vertices:
adjacency = AdjacencyMatrix(edges[edges_to_keep], n_vertices=len(points))
vertices_to_keep &= adjacency.degrees <= 4
edges_to_keep &= np.all(vertices_to_keep[edges], axis=1)
if remove_isolated_vertices:
adjacency = AdjacencyMatrix(edges[edges_to_keep], n_vertices=len(points))
vertices_to_keep &= adjacency.degrees > 0
edges_to_keep &= np.all(vertices_to_keep[edges], axis=1)
point_graph.remove(
node_indices=np.where(~vertices_to_keep)[0], edge_indices=np.where(~edges_to_keep)[0]
)
def _edges_shorter_than(points, edges, distance_factor):
distances = np.linalg.norm(points[edges[:, 1]] - points[edges[:, 0]], axis=1)
return distances < distance_factor * np.median(distances)
def _edges_no_self_loops(edges):
return edges[:, 0] != edges[:, 1]
| [
"numpy.median",
"logging.getLogger",
"numpy.where",
"numpy.linalg.norm",
"numpy.all"
] | [((697, 724), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (714, 724), False, 'import logging\n'), ((1977, 2042), 'numpy.linalg.norm', 'np.linalg.norm', (['(points[edges[:, 1]] - points[edges[:, 0]])'], {'axis': '(1)'}), '(points[edges[:, 1]] - points[edges[:, 0]], axis=1)\n', (1991, 2042), True, 'import numpy as np\n'), ((1506, 1545), 'numpy.all', 'np.all', (['vertices_to_keep[edges]'], {'axis': '(1)'}), '(vertices_to_keep[edges], axis=1)\n', (1512, 1545), True, 'import numpy as np\n'), ((1737, 1776), 'numpy.all', 'np.all', (['vertices_to_keep[edges]'], {'axis': '(1)'}), '(vertices_to_keep[edges], axis=1)\n', (1743, 1776), True, 'import numpy as np\n'), ((2084, 2104), 'numpy.median', 'np.median', (['distances'], {}), '(distances)\n', (2093, 2104), True, 'import numpy as np\n'), ((1823, 1850), 'numpy.where', 'np.where', (['(~vertices_to_keep)'], {}), '(~vertices_to_keep)\n', (1831, 1850), True, 'import numpy as np\n'), ((1868, 1892), 'numpy.where', 'np.where', (['(~edges_to_keep)'], {}), '(~edges_to_keep)\n', (1876, 1892), True, 'import numpy as np\n')] |
class Metrics(object):
def sum(self, array, axis=None):
raise NotImplementedError('Abstract method')
def max(self, array, axis=None):
raise NotImplementedError('Abstract method')
def min(self, array, axis=None):
raise NotImplementedError('Abstract method')
def expand_dims(self, array, axis):
raise NotImplementedError('Abstract method')
def sqrt(self, x):
raise NotImplementedError('Abstract method')
def top_k(self, x, axis):
raise NotImplementedError('Abstract method')
def _size_check(self, s1, s2):
for s1s, s2s in zip(s1.shape[:-2], s2.shape[:-2]):
if s1s != s2s and not (s1s == 1 or s2s == 1):
raise ValueError(
'Invalid shape for s1, s2: %s, %s'
% (str(s1.shape), str(s2.shape)))
if s1.shape[-1] != s2.shape[-1]:
raise ValueError(
'last dim of s1 and s2 must be same, but got %d, %d'
% (s1.shape[-1], s2.shape[-1]))
def _dist2(self, s1, s2):
s1 = self.expand_dims(s1, axis=-2)
s2 = self.expand_dims(s2, axis=-3)
# diff = s1 - s2
# return self.sum(diff*diff, axis=-1)
return self.sum((s1 - s2)**2, axis=-1)
def _unidirectional_chamfer(self, dist2, reverse=False):
return self.sum(self.min(dist2, axis=-2 if reverse else -1), axis=-1)
def unidirectional_chamfer(self, s1, s2, reverse=False):
self._size_check(s1, s2)
dist2 = self._dist2(s1, s2)
return self._unidirectional_chamfer(dist2, reverse=reverse)
def _bidirectional_chamfer(self, s1, s2):
dist2 = self._dist2(s1, s2)
return self._unidirectional_chamfer(dist2, reverse=False) + \
self._unidirectional_chamfer(dist2, reverse=True)
def bidirectional_chamfer(self, s1, s2):
self._size_check(s1, s2)
return self._bidirectional_chamfer(s1, s2)
def chamfer(self, s1, s2):
return self.bidirectional_chamfer(s1, s2)
def _unidirectional_n_chamfer(self, n, neg_dist2, reverse=False):
values, _ = self.top_k(neg_dist2, n, axis=-2 if reverse else -1)
return -self.sum(values, axis=(-2, -1))
def unidirectional_n_chamfer(self, n, s1, s2, reverse=False):
self._check_sizes(s1, s2)
neg_dist2 = -self.dist2(s1, s2)
return self._unidirectional_n_chamfer(n, neg_dist2, reverse=reverse)
def _bidirectional_n_chamfer(self, n, s1, s2):
neg_dist2 = -self._dist2(s1, s2)
return self._unidirectional_n_chamfer(n, neg_dist2, reverse=False) + \
self._unidirectional_n_chamfer(n, neg_dist2, reverse=True)
def bidirectional_n_chamfer(self, n, s1, s2):
self._size_check(s1, s2)
return self._bidirectional_n_chamfer(n, s1, s2)
def n_chamfer(self, n, s1, s2):
return self.bidirectional_n_chamfer(n, s1, s2)
def _unidirectional_hausdorff(self, dist2, reverse=False):
return self.max(self.min(dist2, axis=-2 if reverse else -1), axis=-1)
def unidirectional_hausdorff(self, s1, s2, reverse=False):
self._size_check(s1, s2)
return self._unidirectional_hausdorff(s1, s2, reverse=reverse)
def _bidirectional_hausdorff(self, s1, s2):
dist2 = self._dist2(s1, s2)
return max(
self._unidirectional_hausdorff(dist2, reverse=False),
self._unidirectional_hausdorff(dist2, reverse=True))
def bidirectional_hausdorff(self, s1, s2):
self._size_check(s1, s2)
return self._bidirectional_hausdorff(s1, s2)
def hausdorff(self, s1, s2):
return self.bidirectional_hausdorff(s1, s2)
def unidirectional_modified_chamfer(self, s1, s2, reverse=False):
self._size_check(s1, s2)
dist2 = self._dist2(s1, s2)
dist = self.sqrt(dist2)
return self._unidirectional_chamfer(dist, reverse=reverse)
def _bidirectional_modified_chamfer(self, s1, s2):
dist2 = self._dist2(s1, s2)
dist = self.sqrt(dist2)
return self._unidirectional_chamfer(dist, reverse=False) + \
self._unidirectional_chamfer(dist, reverse=True)
def bidirectional_modified_chamfer(self, s1, s2):
self._size_check(s1, s2)
return self._bidirectional_modified_chamfer(s1, s2)
def modified_chamfer(self, s1, s2):
return self.bidirectional_modified_chamfer(s1, s2)
import numpy as np
class NumpyMetrics(Metrics):
def sum(self, array, axis=None):
return np.sum(array, axis=axis)
def max(self, array, axis=None):
return np.max(array, axis=axis)
def min(self, array, axis=None):
return np.min(array, axis=axis)
def expand_dims(self, array, axis):
return np.expand_dims(array, axis=axis)
def sqrt(self, x):
return np.sqrt(x)
def top_k(self, x, k, axis=-1):
raise NotImplementedError()
def emd(self, p0, p1):
# from pyemd import emd
from emd import emd
assert(p0.shape == p1.shape)
assert(len(p0.shape) == 2)
return emd(p0, p1)
# n = p0.shape[0]
# dist = np.zeros((n*2, n*2))
# d0 = np.sqrt(self._dist2(p0, p1))
# dist[:n, n:] = d0
# dist[n:, :n] = d0.T
#
# assert(np.allclose(dist, dist.T))
# h0 = np.zeros((2*n,), dtype=np.float64)
# h0[:n] = 1.0
# h1 = np.zeros((2*n,), dtype=np.float64)
# h1[n:] = 1.0
# return emd(h0, h1, dist)
| [
"numpy.sum",
"numpy.expand_dims",
"emd.emd",
"numpy.max",
"numpy.min",
"numpy.sqrt"
] | [((4533, 4557), 'numpy.sum', 'np.sum', (['array'], {'axis': 'axis'}), '(array, axis=axis)\n', (4539, 4557), True, 'import numpy as np\n'), ((4611, 4635), 'numpy.max', 'np.max', (['array'], {'axis': 'axis'}), '(array, axis=axis)\n', (4617, 4635), True, 'import numpy as np\n'), ((4689, 4713), 'numpy.min', 'np.min', (['array'], {'axis': 'axis'}), '(array, axis=axis)\n', (4695, 4713), True, 'import numpy as np\n'), ((4770, 4802), 'numpy.expand_dims', 'np.expand_dims', (['array'], {'axis': 'axis'}), '(array, axis=axis)\n', (4784, 4802), True, 'import numpy as np\n'), ((4842, 4852), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (4849, 4852), True, 'import numpy as np\n'), ((5101, 5112), 'emd.emd', 'emd', (['p0', 'p1'], {}), '(p0, p1)\n', (5104, 5112), False, 'from emd import emd\n')] |
#!/usr/bin/env python
# coding: utf8
""" Unit testing for Separator class. """
__email__ = '<EMAIL>'
__author__ = '<NAME>'
__license__ = 'MIT License'
from os import makedirs
from os.path import join
from tempfile import TemporaryDirectory
import pytest
import numpy as np
from spleeter.__main__ import evaluate
from spleeter.audio.adapter import AudioAdapter
BACKENDS = ['tensorflow', 'librosa']
TEST_CONFIGURATIONS = {el: el for el in BACKENDS}
res_4stems = {
'vocals': {
'SDR': 3.25e-05,
'SAR': -11.153575,
'SIR': -1.3849,
'ISR': 2.75e-05
},
'drums': {
'SDR': -0.079505,
'SAR': -15.7073575,
'SIR': -4.972755,
'ISR': 0.0013575
},
'bass': {
'SDR': 2.5e-06,
'SAR': -10.3520575,
'SIR': -4.272325,
'ISR': 2.5e-06
},
'other': {
'SDR': -1.359175,
'SAR': -14.7076775,
'SIR': -4.761505,
'ISR': -0.01528
}
}
def generate_fake_eval_dataset(path):
"""
generate fake evaluation dataset
"""
aa = AudioAdapter.default()
n_songs = 2
fs = 44100
duration = 3
n_channels = 2
rng = np.random.RandomState(seed=0)
for song in range(n_songs):
song_path = join(path, 'test', f'song{song}')
makedirs(song_path, exist_ok=True)
for instr in ['mixture', 'vocals', 'bass', 'drums', 'other']:
filename = join(song_path, f'{instr}.wav')
data = rng.rand(duration*fs, n_channels)-0.5
aa.save(filename, data, fs)
@pytest.mark.parametrize('backend', TEST_CONFIGURATIONS)
def test_evaluate(backend):
with TemporaryDirectory() as dataset:
with TemporaryDirectory() as evaluation:
generate_fake_eval_dataset(dataset)
metrics = evaluate(
adapter='spleeter.audio.ffmpeg.FFMPEGProcessAudioAdapter',
output_path=evaluation,
stft_backend=backend,
params_filename='spleeter:4stems',
mus_dir=dataset,
mwf=False,
verbose=False)
for instrument, metric in metrics.items():
for m, value in metric.items():
assert np.allclose(
np.median(value),
res_4stems[instrument][m],
atol=1e-3)
| [
"tempfile.TemporaryDirectory",
"os.makedirs",
"spleeter.__main__.evaluate",
"numpy.median",
"spleeter.audio.adapter.AudioAdapter.default",
"numpy.random.RandomState",
"pytest.mark.parametrize",
"os.path.join"
] | [((1558, 1613), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""backend"""', 'TEST_CONFIGURATIONS'], {}), "('backend', TEST_CONFIGURATIONS)\n", (1581, 1613), False, 'import pytest\n'), ((1074, 1096), 'spleeter.audio.adapter.AudioAdapter.default', 'AudioAdapter.default', ([], {}), '()\n', (1094, 1096), False, 'from spleeter.audio.adapter import AudioAdapter\n'), ((1174, 1203), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(0)'}), '(seed=0)\n', (1195, 1203), True, 'import numpy as np\n'), ((1256, 1289), 'os.path.join', 'join', (['path', '"""test"""', 'f"""song{song}"""'], {}), "(path, 'test', f'song{song}')\n", (1260, 1289), False, 'from os.path import join\n'), ((1298, 1332), 'os.makedirs', 'makedirs', (['song_path'], {'exist_ok': '(True)'}), '(song_path, exist_ok=True)\n', (1306, 1332), False, 'from os import makedirs\n'), ((1651, 1671), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (1669, 1671), False, 'from tempfile import TemporaryDirectory\n'), ((1426, 1457), 'os.path.join', 'join', (['song_path', 'f"""{instr}.wav"""'], {}), "(song_path, f'{instr}.wav')\n", (1430, 1457), False, 'from os.path import join\n'), ((1697, 1717), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (1715, 1717), False, 'from tempfile import TemporaryDirectory\n'), ((1803, 2003), 'spleeter.__main__.evaluate', 'evaluate', ([], {'adapter': '"""spleeter.audio.ffmpeg.FFMPEGProcessAudioAdapter"""', 'output_path': 'evaluation', 'stft_backend': 'backend', 'params_filename': '"""spleeter:4stems"""', 'mus_dir': 'dataset', 'mwf': '(False)', 'verbose': '(False)'}), "(adapter='spleeter.audio.ffmpeg.FFMPEGProcessAudioAdapter',\n output_path=evaluation, stft_backend=backend, params_filename=\n 'spleeter:4stems', mus_dir=dataset, mwf=False, verbose=False)\n", (1811, 2003), False, 'from spleeter.__main__ import evaluate\n'), ((2275, 2291), 'numpy.median', 'np.median', (['value'], {}), '(value)\n', (2284, 2291), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
import os
import re
import sys
import logging
import numpy as np
import matplotlib.pyplot as plt
import labelme
from PyQt5.QtWidgets import *
import json
import traceback
import platform
import subprocess
from libs.shape import *
platform = sys.platform
labelImageIndex = 0
labelImageList = []
# QMainWindow是QWidget的派生类
# class UIExploreDialog(QMainWindow):
class UIExploreDialog(QDialog):
def __init__(self, canvas=None, ui=None):
super().__init__()
self.__logger = logging.getLogger('sdktool')
self.__samplePath = './data/UIExplore/sample'
self.__canvas = canvas
self.__ui = ui
icon = QIcon()
icon.addPixmap(QPixmap(":/menu/import.jpg"), QIcon.Normal, QIcon.Off)
vBoxOpenFile = QVBoxLayout()
labelBegin = QLabel('UI自动化探索: step1-->step5, 请依次进行\n')
label1 = QLabel("Step1: 打开样本文件夹:")
btnOpenFile = QPushButton()
btnOpenFile.setIcon(icon)
btnOpenFile.setToolTip("打开样本图像路径!")
# btnOpenFIle.setStatusTip("打开样本图像路径StatusTip!")
btnOpenFile.clicked.connect(self.funOpenFile)
hBoxOpenFile = QHBoxLayout()
# 单行文本框
self.lineEdit = QLineEdit(self.__samplePath)
self.lineEdit.selectAll()
self.lineEdit.returnPressed.connect(self.funOK)
hBoxOpenFile.addWidget(self.lineEdit)
hBoxOpenFile.addWidget(btnOpenFile)
vBoxOpenFile.addWidget(labelBegin)
vBoxOpenFile.addWidget(label1)
vBoxOpenFile.addLayout(hBoxOpenFile)
# 布局
vBoxAutoLabel = QVBoxLayout()
label2 = QLabel("Step2: 样本自动标记")
self.progressBar = QProgressBar(self)
vBoxAutoLabel.addWidget(label2)
vBoxAutoLabel.addWidget(self.progressBar)
vBoxLabelImg = QVBoxLayout()
label3 = QLabel("Step3: 样本重标记")
vBoxLabelImg.addWidget(label3)
reLabelBtn = QPushButton("开始标记")
vBoxLabelImg.addWidget(reLabelBtn)
reLabelBtn.clicked.connect(self.LabelImage)
vBoxTrain = QVBoxLayout()
label4 = QLabel("Step4: 训练")
vBoxTrain.addWidget(label4)
trainBtn = QPushButton("开始训练")
vBoxTrain.addWidget(trainBtn)
trainBtn.clicked.connect(self.train)
vBoxUIGraph = QVBoxLayout()
labelGraph = QLabel("Step5: UI自动化探索结果")
vBoxUIGraph.addWidget(labelGraph)
btn5 = QPushButton("结果分析")
vBoxUIGraph.addWidget(btn5)
# 布局
vBox = QVBoxLayout()
vBox.addLayout(vBoxOpenFile)
vBox.addLayout(vBoxAutoLabel)
# vBox.addWidget(label2)
# vBox.addWidget(self.progressBar)
vBox.addLayout(vBoxLabelImg)
vBox.addLayout(vBoxTrain)
vBox.addLayout(vBoxUIGraph)
# widget = QWidget()
# self.setCentralWidget(widget) # 建立的widget在窗体的中间位置
# widget.setLayout(vBox)
# 布局完毕后,才可得到焦点
# self.lineEdit.setFocus()
# Window设置
self.resize(500, 150)
self.center()
self.setFont(QFont('宋体', 14))
self.setWindowTitle('UI Explore')
buttonBox = bb = QDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.Cancel,
Qt.Horizontal,
self,
)
bb.button(bb.Ok).setIcon(labelme.utils.newIcon('done'))
bb.button(bb.Cancel).setIcon(labelme.utils.newIcon('undo'))
bb.accepted.connect(self.validate)
bb.rejected.connect(self.reject)
vBox.addWidget(buttonBox)
self.setLayout(vBox)
# self.setWindowIcon(QIcon('10.png'))
self.show()
# self.labelImageIndex = -1
# self.labelImageList = []
self.image = None
def validate(self):
self.confirmFlag = True
self.accept()
def center(self):
# 得到主窗体的框架信息
qr = self.frameGeometry()
# 得到桌面的中心
cp = QDesktopWidget().availableGeometry().center()
# 框架的中心与桌面中心对齐
qr.moveCenter(cp)
# 自身窗体的左上角与框架的左上角对齐
self.move(qr.topLeft())
def funOK(self):
try:
text = self.lineEdit.text()
self.textBrowser.append("{} = <b>{}</b>".format(text, eval(text)))
except:
self.textBrowser.append("输入的表达式<font color=red>“{}”</font>无效!".format(text))
def funCancel(self):
self.lineEdit.clear()
def GetfilesCount(self, directoryName, formatList=[".png", ".bmp", ".jpg"]):
file_count = 0
for dirPath, dirNames, fileNames in os.walk(directoryName):
for file in fileNames:
if os.path.splitext(file)[1] in formatList:
file_count = file_count + 1
return file_count
def funOpenFile(self):
FileDialog = QFileDialog()
FileDialog.setFileMode(QFileDialog.Directory)
FileDialog.setFilter(QDir.Dirs | QDir.NoDotAndDotDot)
if FileDialog.exec():
# 获取到导入文件夹的名字
directoryName = FileDialog.selectedFiles()[0]
if directoryName == "":
return
else:
return
self.__samplePath = directoryName
self.lineEdit.setText(directoryName)
self.progressBar.setMinimum(0)
picNumber = self.GetfilesCount(directoryName)
self.progressBar.setMaximum(picNumber)
self.RunAutoLabelImage()
self.__logger.info("run over label image....")
jsonFileNumber = self.GetfilesCount(directoryName, formatList=[".json"])
self.progressBar.setValue(jsonFileNumber)
self.__logger.info("picture number is {}, json file number is {}".format(picNumber, jsonFileNumber))
def RunAutoLabelImage(self):
currentPath = os.getcwd()
os.chdir("bin/RefineDet/")
args = [
'python',
'detect_one_image.py',
]
if platform == 'win32':
if hasattr(os.sys, 'winver'):
pro = subprocess.Popen(args, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
else:
pro = subprocess.Popen(args)
else:
pro = subprocess.Popen("python detect_one_image.py", shell=True, preexec_fn=os.setsid)
os.chdir(currentPath)
if pro is None:
self.__logger.error("open action sample failed")
return
else:
self.__logger.info('Run ActionSample Create PID: {} SubProcess'.format(pro.pid))
def keyPressEvent(self, e):
if e.key() == Qt.Key_Escape:
self.close()
def closeEvent(self, QCloseEvent):
reply = QMessageBox.question(self,
'UI自动化探索配置项',
"是否要退出?",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if reply == QMessageBox.Yes:
QCloseEvent.accept()
else:
QCloseEvent.ignore()
def LabelImage(self):
self.__logger.info("begin label....")
# self.labelImageList = list() # 所有图像的list
labelImageIndex = 0 # 当前显示的图像在所有图像list中的索引
# 读取所有图像,如果图像有对应的标签,则不显示它,将labelImageIndex设置为第一个没有标签的图像,从第一个没有标签的图像开始显示
indexFlag = True
for root, dirs, files in os.walk(self.__samplePath):
for file in files:
if os.path.splitext(file)[1] in [".png", ".bmp", ".jpg"]:
# self.labelImageList.append(os.path.join(root, file))
labelImageList.append(os.path.join(root, file))
# self.ui.fileListWidget.addItem(os.path.join(root, file))
if os.path.exists(root + '/' + file[: file.rfind('.')] + ".json") and indexFlag is True:
# self.labelImageIndex += 1
labelImageIndex += 1
else:
indexFlag = False
# if self.labelImageIndex >= len(self.labelImageList):
# self.labelImageIndex = 0
self.__logger.info("image list len is {}, index is {} ".format(len(labelImageList), labelImageIndex))
if labelImageIndex >= len(labelImageList):
labelImageIndex = 0
self.__logger.info("image list len is {}, index is {} ".format(len(labelImageList), labelImageIndex))
self.__canvas.resetState()
self.__canvas.addUIGraph(None)
self.LoadLabelImage()
self.__canvas.setTreeWidget(self.__ui.treeWidget)
self.__canvas.setUI(self.__ui)
self.__ui.pushButton_prev.setEnabled(True)
self.__ui.pushButton_next.setEnabled(True)
def train(self):
loss_x = []
loss_y = []
plt.ion()
plt.figure(figsize=(15, 9))
font1 = {'family': 'Times New Roman',
'weight': 'normal',
'size': 23
}
try:
with open("./data/log.txt") as f: # 根据自己的目录做对应的修改
preEpoch = 0
lossList = []
for line in f:
line = line.strip(' ')
self.__logger.debug("line {}".format(line))
if len(line.split("AL:")) == 2:
strLossList = re.findall(r"AL:(.+?) AC", line)
if len(strLossList) < 1:
exit()
curLoss = float(strLossList[0].strip())
lossList.append(curLoss)
strEpoch = re.findall(r"Epoch:(.+?) \|| epochiter:", line)
if len(strEpoch) < 1:
exit()
curEpoch = float(strEpoch[0].strip())
if curEpoch > preEpoch:
loss = np.mean(lossList)
loss_y.append(loss)
loss_x.append(curEpoch)
preEpoch = curEpoch
lossList.clear()
plt.plot(loss_x, loss_y, '', c='g')
plt.title('RefinDet Training', font1)
plt.xlabel('epoch', font1)
plt.ylabel('loss', font1)
plt.grid(loss_x)
plt.pause(0.1)
else:
continue
plt.ioff()
plt.show()
except Exception as e:
self.__logger.error("train error {}".format(e))
def popUp(self, move=True):
if move:
self.move(QCursor.pos())
return self.confirmFlag if self.exec() else None
def getFilePath(self):
return self.__samplePath
'''
展示UI标签图像
'''
def LoadLabelImage(self):
self.__logger.info("image list len is {}".format(len(labelImageList)))
if len(labelImageList) < 0:
self.__logger.error("folder{} has no image, please check".format(self.__samplePath))
return
labelImageIndex = 0
fileName = labelImageList[labelImageIndex]
self.PaintImage(fileName)
sceneTreeItem = self.CreateTreeItem(key='fileName')
sceneTreeItem.setText(1, os.path.basename(fileName))
sceneTreeItem.setText(2, fileName)
self.__ui.treeWidget.addTopLevelItem(sceneTreeItem)
sceneTreeItem = self.CreateTreeItem(key='scene', edit=True)
self.__ui.treeWidget.addTopLevelItem(sceneTreeItem)
sceneTreeItem = self.CreateTreeItem(key='labels')
self.__ui.treeWidget.addTopLevelItem(sceneTreeItem)
self.LoadLabelJson(fileName)
self.__canvas.setLabel()
self.__canvas.labelFlag = True
def CreateTreeItem(self, key, value=None, type=None, edit=False):
child = QTreeWidgetItem()
child.setText(0, str(key))
if value is not None:
child.setText(1, str(value))
if type is not None:
child.setText(2, type)
# child.setIcon(0, self.treeIcon)
if edit is True:
child.setFlags(child.flags() | Qt.ItemIsEditable)
return child
def PaintImage(self, imgPath):
if imgPath == "" or imgPath is None:
self.__logger.error('wrong imgPath: {}'.format(imgPath))
return
try:
if not os.path.exists(imgPath):
imgPath = "./project/" + self.projectName + "/v1.0/" + imgPath
if not os.path.exists(imgPath):
raise Exception("there is no file {}".format(imgPath))
frame = QImage(imgPath)
if self.__canvas.uiGraph is not None:
scaleW, scaleH = self.canvas.uiGraph.GetWindowScale()
frame = frame.scaled(frame.width() * scaleW, frame.height() * scaleH)
self.image = frame
pix = QPixmap.fromImage(frame)
self.__canvas.loadPixmap(pix)
self.__canvas.setEnabled(True)
# self.adjustScale(initial=True)
self.paintCanvas()
except Exception as e:
self.__logger.error('read image failed, imgPath: {}'.format(imgPath))
self.__logger.error(traceback.format_exc())
def LoadLabelJson(self, labelImageName):
# 清除画布的一些数据
self.__canvas.shapeItem.clear()
self.__canvas.itemShape.clear()
# 读取json文件
labelJsonPath = labelImageName[:labelImageName.rfind('.')] + ".json"
if os.path.exists(labelJsonPath) is False:
return
try:
with open(labelJsonPath, 'r') as f:
labelJsonDict = json.load(f)
except Exception as e:
self.__logger.error(e)
self.__logger.error(traceback.format_exc())
return
sceneName = labelJsonDict["scene"]
self.__ui.treeWidget.topLevelItem(1).setText(1, sceneName)
# 对每个label,读取其内容并展示在画布上
for labelShape in labelJsonDict["labels"]:
labelText = labelShape["label"]
# labelName = labelShape["name"]
if "clickNum" in labelShape.keys():
labelClickNum = int(labelShape["clickNum"])
else:
labelClickNum = 0
self.__canvas.labelDialog.addLabelHistory(labelText)
treeLabelItem = None
labelFlag = False
labelTreeItem = self.__ui.treeWidget.topLevelItem(2)
for itemIndex in range(labelTreeItem.childCount()):
treeItem = labelTreeItem.child(itemIndex)
if treeItem.text(0) == labelText:
labelFlag = True
treeLabelItem = treeItem
break
if labelFlag is False:
treeLabelItem = self.CreateTreeItem(key=labelText)
labelTreeItem.addChild(treeLabelItem)
# 创建shape(方框),表示标签,展示在画布上
shape = Shape(name=Shape.RECT)
# shape.setLabel(labelName)
shape.setLabel(labelText)
if labelClickNum > 0:
shape.setLabel(str(labelClickNum))
width = labelShape['w']
height = labelShape['h']
if width < 0 or height < 0:
self.__logger.error("{}file is wrong".format(labelJsonPath))
point1 = QPoint(int(labelShape['x']), int(labelShape['y']))
point2 = QPoint(int(labelShape['x']) + int(labelShape['w']),
int(labelShape['y']))
point3 = QPoint(int(labelShape['x']) + int(labelShape['w']),
int(labelShape['y']) + int(labelShape['h']))
point4 = QPoint(int(labelShape['x']),
int(labelShape['y']) + int(labelShape['h']))
shape.addPoint(point1)
shape.addPoint(point2)
shape.addPoint(point3)
shape.addPoint(point4)
self.__canvas.shapes.append(shape)
self.__canvas.shapeItem[shape] = treeLabelItem
if labelText not in self.__canvas.itemShape.keys():
self.__canvas.itemShape[labelText] = list()
self.__canvas.itemShape[labelText].append(shape)
# def adjustScale(self, initial=False):
# value = self.scalers[self.FIT_WINDOW]() # 相当于执行self.scaleFitWindow()
# # self.paintCanvas()
# if self.__canvas.uiGraph:
# (scaleX, scaleY) = self.__canvas.uiGraph.GetWindowScale()
# value = value * max(scaleX, scaleY)
# self.zoomWidget.setValue(int(100 * value))
def paintCanvas(self, scale=1.0):
assert not self.image.isNull(), "cannot paint null image"
# self.__canvas.scale = 0.01 * self.zoomWidget.value()
self.__canvas.adjustSize()
self.__canvas.update()
@staticmethod
def GetFileList(parent=None):
dialog = UIExploreDialog(parent)
# result = dialog.exec_()
return dialog.labelImageList, dialog.labelImageIndex
if __name__ == '__main__':
app = QApplication(sys.argv)
MainWindow = UIExploreDialog()
sys.exit(app.exec_()) | [
"matplotlib.pyplot.title",
"os.walk",
"matplotlib.pyplot.figure",
"numpy.mean",
"os.path.join",
"os.chdir",
"labelme.utils.newIcon",
"os.path.exists",
"re.findall",
"traceback.format_exc",
"matplotlib.pyplot.pause",
"subprocess.Popen",
"matplotlib.pyplot.show",
"os.path.basename",
"matpl... | [((796, 824), 'logging.getLogger', 'logging.getLogger', (['"""sdktool"""'], {}), "('sdktool')\n", (813, 824), False, 'import logging\n'), ((4774, 4796), 'os.walk', 'os.walk', (['directoryName'], {}), '(directoryName)\n', (4781, 4796), False, 'import os\n'), ((5967, 5978), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5976, 5978), False, 'import os\n'), ((5987, 6013), 'os.chdir', 'os.chdir', (['"""bin/RefineDet/"""'], {}), "('bin/RefineDet/')\n", (5995, 6013), False, 'import os\n'), ((6452, 6473), 'os.chdir', 'os.chdir', (['currentPath'], {}), '(currentPath)\n', (6460, 6473), False, 'import os\n'), ((7519, 7545), 'os.walk', 'os.walk', (['self.__samplePath'], {}), '(self.__samplePath)\n', (7526, 7545), False, 'import os\n'), ((8931, 8940), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (8938, 8940), True, 'import matplotlib.pyplot as plt\n'), ((8949, 8976), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 9)'}), '(figsize=(15, 9))\n', (8959, 8976), True, 'import matplotlib.pyplot as plt\n'), ((3555, 3584), 'labelme.utils.newIcon', 'labelme.utils.newIcon', (['"""done"""'], {}), "('done')\n", (3576, 3584), False, 'import labelme\n'), ((3623, 3652), 'labelme.utils.newIcon', 'labelme.utils.newIcon', (['"""undo"""'], {}), "('undo')\n", (3644, 3652), False, 'import labelme\n'), ((6363, 6448), 'subprocess.Popen', 'subprocess.Popen', (['"""python detect_one_image.py"""'], {'shell': '(True)', 'preexec_fn': 'os.setsid'}), "('python detect_one_image.py', shell=True, preexec_fn=os.setsid\n )\n", (6379, 6448), False, 'import subprocess\n'), ((10629, 10639), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (10637, 10639), True, 'import matplotlib.pyplot as plt\n'), ((10652, 10662), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10660, 10662), True, 'import matplotlib.pyplot as plt\n'), ((11462, 11488), 'os.path.basename', 'os.path.basename', (['fileName'], {}), '(fileName)\n', (11478, 11488), False, 'import os\n'), ((13699, 13728), 'os.path.exists', 'os.path.exists', (['labelJsonPath'], {}), '(labelJsonPath)\n', (13713, 13728), False, 'import os\n'), ((6194, 6267), 'subprocess.Popen', 'subprocess.Popen', (['args'], {'creationflags': 'subprocess.CREATE_NEW_PROCESS_GROUP'}), '(args, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)\n', (6210, 6267), False, 'import subprocess\n'), ((6308, 6330), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (6324, 6330), False, 'import subprocess\n'), ((12578, 12601), 'os.path.exists', 'os.path.exists', (['imgPath'], {}), '(imgPath)\n', (12592, 12601), False, 'import os\n'), ((12701, 12724), 'os.path.exists', 'os.path.exists', (['imgPath'], {}), '(imgPath)\n', (12715, 12724), False, 'import os\n'), ((13852, 13864), 'json.load', 'json.load', (['f'], {}), '(f)\n', (13861, 13864), False, 'import json\n'), ((13421, 13443), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (13441, 13443), False, 'import traceback\n'), ((13963, 13985), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (13983, 13985), False, 'import traceback\n'), ((4852, 4874), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (4868, 4874), False, 'import os\n'), ((7597, 7619), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (7613, 7619), False, 'import os\n'), ((7769, 7793), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (7781, 7793), False, 'import os\n'), ((9472, 9503), 're.findall', 're.findall', (['"""AL:(.+?) AC"""', 'line'], {}), "('AL:(.+?) AC', line)\n", (9482, 9503), False, 'import re\n'), ((9738, 9785), 're.findall', 're.findall', (['"""Epoch:(.+?) \\\\|| epochiter:"""', 'line'], {}), "('Epoch:(.+?) \\\\|| epochiter:', line)\n", (9748, 9785), False, 'import re\n'), ((10012, 10029), 'numpy.mean', 'np.mean', (['lossList'], {}), '(lossList)\n', (10019, 10029), True, 'import numpy as np\n'), ((10251, 10286), 'matplotlib.pyplot.plot', 'plt.plot', (['loss_x', 'loss_y', '""""""'], {'c': '"""g"""'}), "(loss_x, loss_y, '', c='g')\n", (10259, 10286), True, 'import matplotlib.pyplot as plt\n'), ((10315, 10352), 'matplotlib.pyplot.title', 'plt.title', (['"""RefinDet Training"""', 'font1'], {}), "('RefinDet Training', font1)\n", (10324, 10352), True, 'import matplotlib.pyplot as plt\n'), ((10381, 10407), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""', 'font1'], {}), "('epoch', font1)\n", (10391, 10407), True, 'import matplotlib.pyplot as plt\n'), ((10436, 10461), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""', 'font1'], {}), "('loss', font1)\n", (10446, 10461), True, 'import matplotlib.pyplot as plt\n'), ((10490, 10506), 'matplotlib.pyplot.grid', 'plt.grid', (['loss_x'], {}), '(loss_x)\n', (10498, 10506), True, 'import matplotlib.pyplot as plt\n'), ((10535, 10549), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (10544, 10549), True, 'import matplotlib.pyplot as plt\n')] |
import os
import pdb
import random
import tempfile
import fastestimator as fe
import numpy as np
import tensorflow as tf
from fastestimator.dataset import LabeledDirDataset
from fastestimator.op.numpyop.meta import OneOf
from fastestimator.op.numpyop.numpyop import NumpyOp
from fastestimator.op.numpyop.univariate import ReadImage
from fastestimator.op.tensorop.loss import CrossEntropy
from fastestimator.op.tensorop.model import ModelOp, UpdateOp
from fastestimator.schedule import EpochScheduler, cosine_decay
from fastestimator.search import GridSearch
from fastestimator.trace.adapt import LRScheduler
from fastestimator.trace.metric import Accuracy
from PIL import Image, ImageEnhance, ImageOps, ImageTransform
from tensorflow.keras import layers
class Rotate(NumpyOp):
""" rotate between 0 to 90 degree
"""
def __init__(self, level, inputs=None, outputs=None, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.degree = level * 3.0
def forward(self, data, state):
im = Image.fromarray(data)
degree = random.uniform(-self.degree, self.degree)
im = im.rotate(degree)
return np.copy(np.asarray(im))
class Identity(NumpyOp):
def __init__(self, level, inputs=None, outputs=None, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
class AutoContrast(NumpyOp):
def __init__(self, level, inputs=None, outputs=None, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
def forward(self, data, state):
im = Image.fromarray(data)
im = ImageOps.autocontrast(im)
return np.copy(np.asarray(im))
class Equalize(NumpyOp):
def __init__(self, level, inputs=None, outputs=None, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
def forward(self, data, state):
im = Image.fromarray(data)
im = ImageOps.equalize(im)
return np.copy(np.asarray(im))
class Posterize(NumpyOp):
# resuce the number of bits for each channel, this may be inconsistent with original implementation
def __init__(self, level, inputs=None, outputs=None, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.bit_loss_limit = level / 30 * 7
def forward(self, data, state):
im = Image.fromarray(data)
bits_to_keep = 8 - round(random.uniform(0, self.bit_loss_limit))
im = ImageOps.posterize(im, bits_to_keep)
return np.copy(np.asarray(im))
class Solarize(NumpyOp):
# this may be inconsistent with original implementation
def __init__(self, level, inputs=None, outputs=None, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.loss_limit = level / 30 * 256
def forward(self, data, state):
threshold = 256 - round(random.uniform(0, self.loss_limit))
data = np.where(data < threshold, data, 255 - data)
return data
class Sharpness(NumpyOp):
def __init__(self, level, inputs=None, outputs=None, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.diff_limit = level / 30 * 0.9
def forward(self, data, state):
im = Image.fromarray(data)
factor = 1.0 + random.uniform(-self.diff_limit, self.diff_limit)
im = ImageEnhance.Sharpness(im).enhance(factor)
return np.copy(np.asarray(im))
class Contrast(NumpyOp):
def __init__(self, level, inputs=None, outputs=None, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.diff_limit = level / 30 * 0.9
def forward(self, data, state):
im = Image.fromarray(data)
factor = 1.0 + random.uniform(-self.diff_limit, self.diff_limit)
im = ImageEnhance.Contrast(im).enhance(factor)
return np.copy(np.asarray(im))
class Color(NumpyOp):
def __init__(self, level, inputs=None, outputs=None, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.diff_limit = level / 30 * 0.9
def forward(self, data, state):
im = Image.fromarray(data)
factor = 1.0 + random.uniform(-self.diff_limit, self.diff_limit)
im = ImageEnhance.Color(im).enhance(factor)
return np.copy(np.asarray(im))
class Brightness(NumpyOp):
def __init__(self, level, inputs=None, outputs=None, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.diff_limit = level / 30 * 0.9
def forward(self, data, state):
im = Image.fromarray(data)
factor = 1.0 + random.uniform(-self.diff_limit, self.diff_limit)
im = ImageEnhance.Brightness(im).enhance(factor)
return np.copy(np.asarray(im))
class ShearX(NumpyOp):
def __init__(self, level, inputs=None, outputs=None, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.shear_coef = level / 30 * 0.5
def forward(self, data, state):
im = Image.fromarray(data)
shear_coeff = random.uniform(-self.shear_coef, self.shear_coef)
width, height = im.size
xshift = round(abs(shear_coeff) * width)
new_width = width + xshift
im = im.transform((new_width, height),
ImageTransform.AffineTransform(
(1.0, shear_coeff, -xshift if shear_coeff > 0 else 0.0, 0.0, 1.0, 0.0)),
resample=Image.BICUBIC)
im = im.resize((width, height))
return np.copy(np.asarray(im))
class ShearY(NumpyOp):
def __init__(self, level, inputs=None, outputs=None, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.shear_coef = level / 30 * 0.5
def forward(self, data, state):
im = Image.fromarray(data)
shear_coeff = random.uniform(-self.shear_coef, self.shear_coef)
width, height = im.size
yshift = round(abs(shear_coeff) * height)
newheight = height + yshift
im = im.transform((width, newheight),
ImageTransform.AffineTransform(
(1.0, 0.0, 0.0, shear_coeff, 1.0, -yshift if shear_coeff > 0 else 0.0)),
resample=Image.BICUBIC)
im = im.resize((width, height))
return np.copy(np.asarray(im))
class TranslateX(NumpyOp):
def __init__(self, level, inputs=None, outputs=None, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.level = level
def forward(self, data, state):
im = Image.fromarray(data)
width, height = im.size
displacement = random.uniform(-self.level / 30 * height / 3, self.level / 30 * height / 3)
im = im.transform((width, height),
ImageTransform.AffineTransform((1.0, 0.0, displacement, 0.0, 1.0, 0.0)),
resample=Image.BICUBIC)
return np.copy(np.asarray(im))
class TranslateY(NumpyOp):
def __init__(self, level, inputs=None, outputs=None, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.level = level
def forward(self, data, state):
im = Image.fromarray(data)
width, height = im.size
displacement = random.uniform(-self.level / 30 * height / 3, self.level / 30 * height / 3)
im = im.transform((width, height),
ImageTransform.AffineTransform((1.0, 0.0, 0.0, 0.0, 1.0, displacement)),
resample=Image.BICUBIC)
return np.copy(np.asarray(im))
def scaled_dot_product_attention(q, k, v):
matmul_qk = tf.matmul(q, k, transpose_b=True)
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)
output = tf.matmul(attention_weights, v)
return output
def point_wise_feed_forward_network(em_dim, dff):
return tf.keras.Sequential([
tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(em_dim) # (batch_size, seq_len, em_dim)
])
class MultiHeadAttention(layers.Layer):
def __init__(self, em_dim, num_heads):
super().__init__()
assert em_dim % num_heads == 0, "model dimension must be multiple of number of heads"
self.num_heads = num_heads
self.em_dim = em_dim
self.depth = em_dim // self.num_heads
self.wq = layers.Dense(em_dim)
self.wk = layers.Dense(em_dim)
self.wv = layers.Dense(em_dim)
self.dense = layers.Dense(em_dim)
def split_heads(self, x, batch_size):
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3]) # B, num_heads, seq_len, depth
def call(self, v, k, q):
batch_size = tf.shape(q)[0]
q = self.wq(q) # B, seq_len, em_dim
k = self.wk(k) # B, seq_len, em_dim
v = self.wv(v) # B, seq_len, em_dim
q = self.split_heads(q, batch_size)
k = self.split_heads(k, batch_size)
v = self.split_heads(v, batch_size)
scaled_attention = scaled_dot_product_attention(q, k, v)
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) #B, seq_len, num_heads, depth
concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.em_dim)) # B, seq_len, em_dim
output = self.dense(concat_attention)
return output
class EncoderLayer(layers.Layer):
def __init__(self, em_dim, num_heads, dff, rate=0.1):
super().__init__()
self.mha = MultiHeadAttention(em_dim, num_heads)
self.ffn = point_wise_feed_forward_network(em_dim, dff)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(rate)
self.dropout2 = layers.Dropout(rate)
def call(self, x, training):
attn_output = self.mha(x, x, x)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output)
return out2
class Encoder(layers.Layer):
def __init__(self, num_layers, em_dim, num_heads, dff, rate=0.1):
super().__init__()
self.num_layers = num_layers
self.enc_layers = [EncoderLayer(em_dim, num_heads, dff, rate) for _ in range(num_layers)]
self.dropout = layers.Dropout(rate)
def call(self, x, training=None):
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.enc_layers[i](x, training)
return x
class PositionEmbedding(layers.Layer):
def __init__(self, image_size, patch_size, em_dim):
super().__init__()
h, w, _ = image_size
assert h % patch_size == 0 and w % patch_size == 0, "image size must be an integer multiple of patch size"
self.position_embedding = tf.Variable(tf.zeros(shape=(1, h * w // patch_size**2 + 1, em_dim)),
trainable=True,
name="position_embedding")
def call(self, x):
return x + self.position_embedding
class ClsToken(layers.Layer):
def __init__(self, em_dim):
super().__init__()
self.cls_token = tf.Variable(tf.zeros(shape=(1, 1, em_dim)), trainable=True, name="cls_token")
self.em_dim = em_dim
def call(self, x):
batch_size = tf.shape(x)[0]
return tf.concat([tf.broadcast_to(self.cls_token, (batch_size, 1, self.em_dim)), x], axis=1)
def transformer_encoder(image_size, patch_size=16, num_layers=12, em_dim=768, num_heads=12, dff=3072, rate=0.1):
inputs = layers.Input(shape=image_size)
# Patch Embedding
x = layers.Conv2D(em_dim, kernel_size=patch_size, strides=patch_size, use_bias=False)(inputs) #[B, H, W, em_dim]
x = layers.Reshape((-1, em_dim))(x) # [B, num_patches, em_dim]
x = ClsToken(em_dim)(x) # [B, num_patches + 1, em_dim]
x = PositionEmbedding(image_size, patch_size, em_dim)(x)
x = Encoder(num_layers=num_layers, em_dim=em_dim, num_heads=num_heads, dff=dff, rate=rate)(x)
x = layers.LayerNormalization(epsilon=1e-6)(x[:, 0, :]) # only need the embedding w.r.t [cls] token
return tf.keras.Model(inputs=inputs, outputs=x)
def vision_transformer(num_class,
image_size,
patch_size=16,
num_layers=12,
em_dim=768,
num_heads=12,
dff=3072,
rate=0.1):
inputs = layers.Input(shape=image_size)
backbone = transformer_encoder(image_size, patch_size, num_layers, em_dim, num_heads, dff, rate)
x = backbone(inputs)
x = layers.Dense(num_class)(x)
return tf.keras.Model(inputs=inputs, outputs=x)
class Rescale(NumpyOp):
def forward(self, data, state):
return np.float32(data / 255)
def lr_schedule_warmup(step, train_steps_epoch, init_lr):
warmup_steps = train_steps_epoch * 5
if step < warmup_steps:
lr = init_lr / warmup_steps * step
else:
lr = init_lr
return lr
def get_estimator(N, M, init_lr=0.1, batch_size=512, epochs=100, data_dir="/data/shared_data/tiny-imagenet-200"):
print("trying N: {}, M: {}".format(N, M))
train_data = LabeledDirDataset(os.path.join(data_dir, "train"))
test_data = LabeledDirDataset(os.path.join(data_dir, "val"))
aug_options = [
Rotate(level=M, inputs="x", outputs="x", mode="train"),
Identity(level=M, inputs="x", outputs="x", mode="train"),
AutoContrast(level=M, inputs="x", outputs="x", mode="train"),
Equalize(level=M, inputs="x", outputs="x", mode="train"),
Posterize(level=M, inputs="x", outputs="x", mode="train"),
Solarize(level=M, inputs="x", outputs="x", mode="train"),
Sharpness(level=M, inputs="x", outputs="x", mode="train"),
Contrast(level=M, inputs="x", outputs="x", mode="train"),
Color(level=M, inputs="x", outputs="x", mode="train"),
Brightness(level=M, inputs="x", outputs="x", mode="train"),
ShearX(level=M, inputs="x", outputs="x", mode="train"),
ShearY(level=M, inputs="x", outputs="x", mode="train"),
TranslateX(level=M, inputs="x", outputs="x", mode="train"),
TranslateY(level=M, inputs="x", outputs="x", mode="train")
]
rua_ops = [OneOf(*aug_options) for _ in range(N)]
pipeline = fe.Pipeline(train_data=train_data,
test_data=test_data,
batch_size=batch_size,
ops=[ReadImage(inputs="x", outputs="x")] + rua_ops + [Rescale(inputs="x", outputs="x")])
model = fe.build(
model_fn=lambda: vision_transformer(
num_class=200, image_size=(64, 64, 3), patch_size=4, num_layers=6, em_dim=256, num_heads=8, dff=512),
optimizer_fn=lambda: tf.optimizers.SGD(init_lr, momentum=0.9))
network = fe.Network(ops=[
ModelOp(model=model, inputs="x", outputs="y_pred"),
CrossEntropy(inputs=("y_pred", "y"), outputs="ce", from_logits=True),
UpdateOp(model=model, loss_name="ce")
])
lr_schedule = {
1:
LRScheduler(
model=model,
lr_fn=lambda step: lr_schedule_warmup(
step, train_steps_epoch=np.ceil(len(train_data) / batch_size), init_lr=init_lr)),
6:
LRScheduler(
model=model,
lr_fn=lambda epoch: cosine_decay(
epoch, cycle_length=epochs - 5, init_lr=init_lr, min_lr=init_lr / 100, start=6))
}
estimator = fe.Estimator(pipeline=pipeline,
network=network,
epochs=epochs,
traces=[Accuracy(true_key="y", pred_key="y_pred"), EpochScheduler(lr_schedule)])
return estimator
def score_fn(search_idx, N, M):
est = get_estimator(N=N, M=M)
est.fit(warmup=False)
hist = est.test(summary="exp")
best_acc = float(max(hist.history["test"]["accuracy"].values()))
print("Evaluated N:{} M:{}, results:{}".format(N, M, best_acc))
return best_acc
def fastestimator_run(restore_dir=tempfile.mkdtemp()):
restore_dir = os.path.join(restore_dir, "svhn")
score_fn_in_use = lambda search_idx, N, M: score_fn(search_idx, N, M)
gss = GridSearch(score_fn=score_fn_in_use,
params={
"N": [x + 1 for x in range(10)], "M": [3 * (x + 1) for x in range(10)]
})
gss.fit(save_dir=restore_dir)
print("search history:")
print(gss.get_search_results())
print("=======================")
print("best result:")
print(gss.get_best_results())
| [
"PIL.ImageEnhance.Brightness",
"fastestimator.schedule.cosine_decay",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Dense",
"tensorflow.reshape",
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.matmul",
"fastestimator.op.numpyop.meta.OneOf",
"os.path.join",
"tensorflow.nn.s... | [((7694, 7727), 'tensorflow.matmul', 'tf.matmul', (['q', 'k'], {'transpose_b': '(True)'}), '(q, k, transpose_b=True)\n', (7703, 7727), True, 'import tensorflow as tf\n'), ((7857, 7904), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['scaled_attention_logits'], {'axis': '(-1)'}), '(scaled_attention_logits, axis=-1)\n', (7870, 7904), True, 'import tensorflow as tf\n'), ((7918, 7949), 'tensorflow.matmul', 'tf.matmul', (['attention_weights', 'v'], {}), '(attention_weights, v)\n', (7927, 7949), True, 'import tensorflow as tf\n'), ((11981, 12011), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': 'image_size'}), '(shape=image_size)\n', (11993, 12011), False, 'from tensorflow.keras import layers\n'), ((12555, 12595), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'x'}), '(inputs=inputs, outputs=x)\n', (12569, 12595), True, 'import tensorflow as tf\n'), ((12895, 12925), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': 'image_size'}), '(shape=image_size)\n', (12907, 12925), False, 'from tensorflow.keras import layers\n'), ((13098, 13138), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'x'}), '(inputs=inputs, outputs=x)\n', (13112, 13138), True, 'import tensorflow as tf\n'), ((16518, 16536), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (16534, 16536), False, 'import tempfile\n'), ((16557, 16590), 'os.path.join', 'os.path.join', (['restore_dir', '"""svhn"""'], {}), "(restore_dir, 'svhn')\n", (16569, 16590), False, 'import os\n'), ((1047, 1068), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (1062, 1068), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((1086, 1127), 'random.uniform', 'random.uniform', (['(-self.degree)', 'self.degree'], {}), '(-self.degree, self.degree)\n', (1100, 1127), False, 'import random\n'), ((1580, 1601), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (1595, 1601), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((1615, 1640), 'PIL.ImageOps.autocontrast', 'ImageOps.autocontrast', (['im'], {}), '(im)\n', (1636, 1640), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((1894, 1915), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (1909, 1915), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((1929, 1950), 'PIL.ImageOps.equalize', 'ImageOps.equalize', (['im'], {}), '(im)\n', (1946, 1950), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((2354, 2375), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (2369, 2375), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((2462, 2498), 'PIL.ImageOps.posterize', 'ImageOps.posterize', (['im', 'bits_to_keep'], {}), '(im, bits_to_keep)\n', (2480, 2498), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((2925, 2969), 'numpy.where', 'np.where', (['(data < threshold)', 'data', '(255 - data)'], {}), '(data < threshold, data, 255 - data)\n', (2933, 2969), True, 'import numpy as np\n'), ((3248, 3269), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (3263, 3269), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((3695, 3716), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (3710, 3716), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((4138, 4159), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (4153, 4159), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((4583, 4604), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (4598, 4604), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((5029, 5050), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (5044, 5050), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((5073, 5122), 'random.uniform', 'random.uniform', (['(-self.shear_coef)', 'self.shear_coef'], {}), '(-self.shear_coef, self.shear_coef)\n', (5087, 5122), False, 'import random\n'), ((5831, 5852), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (5846, 5852), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((5875, 5924), 'random.uniform', 'random.uniform', (['(-self.shear_coef)', 'self.shear_coef'], {}), '(-self.shear_coef, self.shear_coef)\n', (5889, 5924), False, 'import random\n'), ((6622, 6643), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (6637, 6643), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((6699, 6774), 'random.uniform', 'random.uniform', (['(-self.level / 30 * height / 3)', '(self.level / 30 * height / 3)'], {}), '(-self.level / 30 * height / 3, self.level / 30 * height / 3)\n', (6713, 6774), False, 'import random\n'), ((7249, 7270), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (7264, 7270), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((7326, 7401), 'random.uniform', 'random.uniform', (['(-self.level / 30 * height / 3)', '(self.level / 30 * height / 3)'], {}), '(-self.level / 30 * height / 3, self.level / 30 * height / 3)\n', (7340, 7401), False, 'import random\n'), ((7816, 7832), 'tensorflow.math.sqrt', 'tf.math.sqrt', (['dk'], {}), '(dk)\n', (7828, 7832), True, 'import tensorflow as tf\n'), ((8550, 8570), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['em_dim'], {}), '(em_dim)\n', (8562, 8570), False, 'from tensorflow.keras import layers\n'), ((8589, 8609), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['em_dim'], {}), '(em_dim)\n', (8601, 8609), False, 'from tensorflow.keras import layers\n'), ((8628, 8648), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['em_dim'], {}), '(em_dim)\n', (8640, 8648), False, 'from tensorflow.keras import layers\n'), ((8670, 8690), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['em_dim'], {}), '(em_dim)\n', (8682, 8690), False, 'from tensorflow.keras import layers\n'), ((8746, 8805), 'tensorflow.reshape', 'tf.reshape', (['x', '(batch_size, -1, self.num_heads, self.depth)'], {}), '(x, (batch_size, -1, self.num_heads, self.depth))\n', (8756, 8805), True, 'import tensorflow as tf\n'), ((8821, 8855), 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[0, 2, 1, 3]'}), '(x, perm=[0, 2, 1, 3])\n', (8833, 8855), True, 'import tensorflow as tf\n'), ((9313, 9362), 'tensorflow.transpose', 'tf.transpose', (['scaled_attention'], {'perm': '[0, 2, 1, 3]'}), '(scaled_attention, perm=[0, 2, 1, 3])\n', (9325, 9362), True, 'import tensorflow as tf\n'), ((9421, 9480), 'tensorflow.reshape', 'tf.reshape', (['scaled_attention', '(batch_size, -1, self.em_dim)'], {}), '(scaled_attention, (batch_size, -1, self.em_dim))\n', (9431, 9480), True, 'import tensorflow as tf\n'), ((9839, 9879), 'tensorflow.keras.layers.LayerNormalization', 'layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (9864, 9879), False, 'from tensorflow.keras import layers\n'), ((9905, 9945), 'tensorflow.keras.layers.LayerNormalization', 'layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (9930, 9945), False, 'from tensorflow.keras import layers\n'), ((9969, 9989), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['rate'], {}), '(rate)\n', (9983, 9989), False, 'from tensorflow.keras import layers\n'), ((10014, 10034), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['rate'], {}), '(rate)\n', (10028, 10034), False, 'from tensorflow.keras import layers\n'), ((10683, 10703), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['rate'], {}), '(rate)\n', (10697, 10703), False, 'from tensorflow.keras import layers\n'), ((12042, 12128), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['em_dim'], {'kernel_size': 'patch_size', 'strides': 'patch_size', 'use_bias': '(False)'}), '(em_dim, kernel_size=patch_size, strides=patch_size, use_bias=\n False)\n', (12055, 12128), False, 'from tensorflow.keras import layers\n'), ((12160, 12188), 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(-1, em_dim)'], {}), '((-1, em_dim))\n', (12174, 12188), False, 'from tensorflow.keras import layers\n'), ((12447, 12487), 'tensorflow.keras.layers.LayerNormalization', 'layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (12472, 12487), False, 'from tensorflow.keras import layers\n'), ((13060, 13083), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['num_class'], {}), '(num_class)\n', (13072, 13083), False, 'from tensorflow.keras import layers\n'), ((13216, 13238), 'numpy.float32', 'np.float32', (['(data / 255)'], {}), '(data / 255)\n', (13226, 13238), True, 'import numpy as np\n'), ((13653, 13684), 'os.path.join', 'os.path.join', (['data_dir', '"""train"""'], {}), "(data_dir, 'train')\n", (13665, 13684), False, 'import os\n'), ((13720, 13749), 'os.path.join', 'os.path.join', (['data_dir', '"""val"""'], {}), "(data_dir, 'val')\n", (13732, 13749), False, 'import os\n'), ((14718, 14737), 'fastestimator.op.numpyop.meta.OneOf', 'OneOf', (['*aug_options'], {}), '(*aug_options)\n', (14723, 14737), False, 'from fastestimator.op.numpyop.meta import OneOf\n'), ((1182, 1196), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (1192, 1196), True, 'import numpy as np\n'), ((1664, 1678), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (1674, 1678), True, 'import numpy as np\n'), ((1974, 1988), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (1984, 1988), True, 'import numpy as np\n'), ((2522, 2536), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (2532, 2536), True, 'import numpy as np\n'), ((3293, 3342), 'random.uniform', 'random.uniform', (['(-self.diff_limit)', 'self.diff_limit'], {}), '(-self.diff_limit, self.diff_limit)\n', (3307, 3342), False, 'import random\n'), ((3422, 3436), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (3432, 3436), True, 'import numpy as np\n'), ((3740, 3789), 'random.uniform', 'random.uniform', (['(-self.diff_limit)', 'self.diff_limit'], {}), '(-self.diff_limit, self.diff_limit)\n', (3754, 3789), False, 'import random\n'), ((3868, 3882), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (3878, 3882), True, 'import numpy as np\n'), ((4183, 4232), 'random.uniform', 'random.uniform', (['(-self.diff_limit)', 'self.diff_limit'], {}), '(-self.diff_limit, self.diff_limit)\n', (4197, 4232), False, 'import random\n'), ((4308, 4322), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (4318, 4322), True, 'import numpy as np\n'), ((4628, 4677), 'random.uniform', 'random.uniform', (['(-self.diff_limit)', 'self.diff_limit'], {}), '(-self.diff_limit, self.diff_limit)\n', (4642, 4677), False, 'import random\n'), ((4758, 4772), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (4768, 4772), True, 'import numpy as np\n'), ((5312, 5419), 'PIL.ImageTransform.AffineTransform', 'ImageTransform.AffineTransform', (['(1.0, shear_coeff, -xshift if shear_coeff > 0 else 0.0, 0.0, 1.0, 0.0)'], {}), '((1.0, shear_coeff, -xshift if shear_coeff > \n 0 else 0.0, 0.0, 1.0, 0.0))\n', (5342, 5419), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((5560, 5574), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (5570, 5574), True, 'import numpy as np\n'), ((6115, 6221), 'PIL.ImageTransform.AffineTransform', 'ImageTransform.AffineTransform', (['(1.0, 0.0, 0.0, shear_coeff, 1.0, -yshift if shear_coeff > 0 else 0.0)'], {}), '((1.0, 0.0, 0.0, shear_coeff, 1.0, -yshift if\n shear_coeff > 0 else 0.0))\n', (6145, 6221), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((6363, 6377), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (6373, 6377), True, 'import numpy as np\n'), ((6844, 6915), 'PIL.ImageTransform.AffineTransform', 'ImageTransform.AffineTransform', (['(1.0, 0.0, displacement, 0.0, 1.0, 0.0)'], {}), '((1.0, 0.0, displacement, 0.0, 1.0, 0.0))\n', (6874, 6915), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((6990, 7004), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (7000, 7004), True, 'import numpy as np\n'), ((7471, 7542), 'PIL.ImageTransform.AffineTransform', 'ImageTransform.AffineTransform', (['(1.0, 0.0, 0.0, 0.0, 1.0, displacement)'], {}), '((1.0, 0.0, 0.0, 0.0, 1.0, displacement))\n', (7501, 7542), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((7617, 7631), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (7627, 7631), True, 'import numpy as np\n'), ((7745, 7756), 'tensorflow.shape', 'tf.shape', (['k'], {}), '(k)\n', (7753, 7756), True, 'import tensorflow as tf\n'), ((8061, 8106), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['dff'], {'activation': '"""relu"""'}), "(dff, activation='relu')\n", (8082, 8106), True, 'import tensorflow as tf\n'), ((8146, 8175), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['em_dim'], {}), '(em_dim)\n', (8167, 8175), True, 'import tensorflow as tf\n'), ((8939, 8950), 'tensorflow.shape', 'tf.shape', (['q'], {}), '(q)\n', (8947, 8950), True, 'import tensorflow as tf\n'), ((11210, 11267), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '(1, h * w // patch_size ** 2 + 1, em_dim)'}), '(shape=(1, h * w // patch_size ** 2 + 1, em_dim))\n', (11218, 11267), True, 'import tensorflow as tf\n'), ((11597, 11627), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '(1, 1, em_dim)'}), '(shape=(1, 1, em_dim))\n', (11605, 11627), True, 'import tensorflow as tf\n'), ((11737, 11748), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (11745, 11748), True, 'import tensorflow as tf\n'), ((2409, 2447), 'random.uniform', 'random.uniform', (['(0)', 'self.bit_loss_limit'], {}), '(0, self.bit_loss_limit)\n', (2423, 2447), False, 'import random\n'), ((2874, 2908), 'random.uniform', 'random.uniform', (['(0)', 'self.loss_limit'], {}), '(0, self.loss_limit)\n', (2888, 2908), False, 'import random\n'), ((3356, 3382), 'PIL.ImageEnhance.Sharpness', 'ImageEnhance.Sharpness', (['im'], {}), '(im)\n', (3378, 3382), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((3803, 3828), 'PIL.ImageEnhance.Contrast', 'ImageEnhance.Contrast', (['im'], {}), '(im)\n', (3824, 3828), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((4246, 4268), 'PIL.ImageEnhance.Color', 'ImageEnhance.Color', (['im'], {}), '(im)\n', (4264, 4268), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((4691, 4718), 'PIL.ImageEnhance.Brightness', 'ImageEnhance.Brightness', (['im'], {}), '(im)\n', (4714, 4718), False, 'from PIL import Image, ImageEnhance, ImageOps, ImageTransform\n'), ((11778, 11839), 'tensorflow.broadcast_to', 'tf.broadcast_to', (['self.cls_token', '(batch_size, 1, self.em_dim)'], {}), '(self.cls_token, (batch_size, 1, self.em_dim))\n', (11793, 11839), True, 'import tensorflow as tf\n'), ((15231, 15271), 'tensorflow.optimizers.SGD', 'tf.optimizers.SGD', (['init_lr'], {'momentum': '(0.9)'}), '(init_lr, momentum=0.9)\n', (15248, 15271), True, 'import tensorflow as tf\n'), ((15312, 15362), 'fastestimator.op.tensorop.model.ModelOp', 'ModelOp', ([], {'model': 'model', 'inputs': '"""x"""', 'outputs': '"""y_pred"""'}), "(model=model, inputs='x', outputs='y_pred')\n", (15319, 15362), False, 'from fastestimator.op.tensorop.model import ModelOp, UpdateOp\n'), ((15372, 15440), 'fastestimator.op.tensorop.loss.CrossEntropy', 'CrossEntropy', ([], {'inputs': "('y_pred', 'y')", 'outputs': '"""ce"""', 'from_logits': '(True)'}), "(inputs=('y_pred', 'y'), outputs='ce', from_logits=True)\n", (15384, 15440), False, 'from fastestimator.op.tensorop.loss import CrossEntropy\n'), ((15450, 15487), 'fastestimator.op.tensorop.model.UpdateOp', 'UpdateOp', ([], {'model': 'model', 'loss_name': '"""ce"""'}), "(model=model, loss_name='ce')\n", (15458, 15487), False, 'from fastestimator.op.tensorop.model import ModelOp, UpdateOp\n'), ((16102, 16143), 'fastestimator.trace.metric.Accuracy', 'Accuracy', ([], {'true_key': '"""y"""', 'pred_key': '"""y_pred"""'}), "(true_key='y', pred_key='y_pred')\n", (16110, 16143), False, 'from fastestimator.trace.metric import Accuracy\n'), ((16145, 16172), 'fastestimator.schedule.EpochScheduler', 'EpochScheduler', (['lr_schedule'], {}), '(lr_schedule)\n', (16159, 16172), False, 'from fastestimator.schedule import EpochScheduler, cosine_decay\n'), ((15810, 15907), 'fastestimator.schedule.cosine_decay', 'cosine_decay', (['epoch'], {'cycle_length': '(epochs - 5)', 'init_lr': 'init_lr', 'min_lr': '(init_lr / 100)', 'start': '(6)'}), '(epoch, cycle_length=epochs - 5, init_lr=init_lr, min_lr=\n init_lr / 100, start=6)\n', (15822, 15907), False, 'from fastestimator.schedule import EpochScheduler, cosine_decay\n'), ((14937, 14971), 'fastestimator.op.numpyop.univariate.ReadImage', 'ReadImage', ([], {'inputs': '"""x"""', 'outputs': '"""x"""'}), "(inputs='x', outputs='x')\n", (14946, 14971), False, 'from fastestimator.op.numpyop.univariate import ReadImage\n')] |
import numpy as np
class Grid():
"""
This class contains all data related to the grid in which the game is contained.
The information is stored as a numpy array of pixels.
The grid is treated as a cartesian [x,y] plane in which [0,0] is located at
the upper left most pixel and [max_x, max_y] is located at the lower right most pixel.
Note that it is assumed spaces that can kill a snake have a non-zero value as their 0 channel.
It is also assumed that HEAD_COLOR has a 255 value as its 0 channel.
"""
BODY_COLOR = np.array([1,0,0], dtype=np.uint8)
HEAD_COLOR = np.array([255, 0, 0], dtype=np.uint8)
FOOD_COLOR = np.array([0,0,255], dtype=np.uint8)
SPACE_COLOR = np.array([0,255,0], dtype=np.uint8)
def __init__(self, grid_size=[30,30], unit_size=10, unit_gap=1):
"""
grid_size - tuple, list, or ndarray specifying number of atomic units in
both the x and y direction
unit_size - integer denoting the atomic size of grid units in pixels
"""
self.unit_size = int(unit_size)
self.unit_gap = unit_gap
self.grid_size = np.asarray(grid_size, dtype=np.int) # size in terms of units
height = self.grid_size[1]*self.unit_size
width = self.grid_size[0]*self.unit_size
channels = 3
self.grid = np.zeros((height, width, channels), dtype=np.uint8)
self.grid[:,:,:] = self.SPACE_COLOR
self.open_space = grid_size[0]*grid_size[1]
def check_death(self, head_coord):
"""
Checks the grid to see if argued head_coord has collided with a death space (i.e. snake or wall)
head_coord - x,y integer coordinates as a tuple, list, or ndarray
"""
return self.off_grid(head_coord) or self.snake_space(head_coord)
def color_of(self, coord):
"""
Returns the color of the specified coordinate
coord - x,y integer coordinates as a tuple, list, or ndarray
"""
return self.grid[int(coord[1]*self.unit_size), int(coord[0]*self.unit_size), :]
def connect(self, coord1, coord2, color=BODY_COLOR):
"""
Draws connection between two adjacent pieces using the specified color.
Created to indicate the relative ordering of the snake's body.
coord1 and coord2 must be adjacent.
coord1 - x,y integer coordinates as a tuple, list, or ndarray
coord2 - x,y integer coordinates as a tuple, list, or ndarray
color - [R,G,B] values as a tuple, list, or ndarray
"""
# Check for adjacency
# Next to one another:
adjacency1 = (np.abs(coord1[0]-coord2[0]) == 1 and np.abs(coord1[1]-coord2[1]) == 0)
# Stacked on one another:
adjacency2 = (np.abs(coord1[0]-coord2[0]) == 0 and np.abs(coord1[1]-coord2[1]) == 1)
assert adjacency1 or adjacency2
if adjacency1: # x values differ
min_x, max_x = sorted([coord1[0], coord2[0]])
min_x = min_x*self.unit_size+self.unit_size-self.unit_gap
max_x = max_x*self.unit_size
self.grid[coord1[1]*self.unit_size, min_x:max_x, :] = color
self.grid[coord1[1]*self.unit_size+self.unit_size-self.unit_gap-1, min_x:max_x, :] = color
else: # y values differ
min_y, max_y = sorted([coord1[1], coord2[1]])
min_y = min_y*self.unit_size+self.unit_size-self.unit_gap
max_y = max_y*self.unit_size
self.grid[min_y:max_y, coord1[0]*self.unit_size, :] = color
self.grid[min_y:max_y, coord1[0]*self.unit_size+self.unit_size-self.unit_gap-1, :] = color
def cover(self, coord, color):
"""
Colors a single space on the grid. Use erase if creating an empty space on the grid.
This function is used like draw but without affecting the open_space count.
coord - x,y integer coordinates as a tuple, list, or ndarray
color - [R,G,B] values as a tuple, list, or ndarray
"""
if self.off_grid(coord):
return False
x = int(coord[0]*self.unit_size)
end_x = x+self.unit_size-self.unit_gap
y = int(coord[1]*self.unit_size)
end_y = y+self.unit_size-self.unit_gap
self.grid[y:end_y, x:end_x, :] = np.asarray(color, dtype=np.uint8)
return True
def draw(self, coord, color):
"""
Colors a single space on the grid. Use erase if creating an empty space on the grid.
Affects the open_space count.
coord - x,y integer coordinates as a tuple, list, or ndarray
color - [R,G,B] values as a tuple, list, or ndarray
"""
if self.cover(coord, color):
self.open_space -= 1
return True
else:
return False
def draw_snake(self, snake, head_color=HEAD_COLOR):
"""
Draws a snake with the given head color.
snake - Snake object
head_color - [R,G,B] values as a tuple, list, or ndarray
"""
self.draw(snake.head, head_color)
prev_coord = None
for i in range(len(snake.body)):
coord = snake.body.popleft()
self.draw(coord, self.BODY_COLOR)
if prev_coord is not None:
self.connect(prev_coord, coord, self.BODY_COLOR)
snake.body.append(coord)
prev_coord = coord
self.connect(prev_coord, snake.head, self.BODY_COLOR)
def erase(self, coord):
"""
Colors the entire coordinate with SPACE_COLOR to erase potential
connection lines.
coord - (x,y) as tuple, list, or ndarray
"""
if self.off_grid(coord):
return False
self.open_space += 1
x = int(coord[0]*self.unit_size)
end_x = x+self.unit_size
y = int(coord[1]*self.unit_size)
end_y = y+self.unit_size
self.grid[y:end_y, x:end_x, :] = self.SPACE_COLOR
return True
def erase_connections(self, coord):
"""
Colors the dead space of the given coordinate with SPACE_COLOR to erase potential
connection lines
coord - (x,y) as tuple, list, or ndarray
"""
if self.off_grid(coord):
return False
# Erase Horizontal Row Below Coord
x = int(coord[0]*self.unit_size)
end_x = x+self.unit_size
y = int(coord[1]*self.unit_size)+self.unit_size-self.unit_gap
end_y = y+self.unit_gap
self.grid[y:end_y, x:end_x, :] = self.SPACE_COLOR
# Erase the Vertical Column to Right of Coord
x = int(coord[0]*self.unit_size)+self.unit_size-self.unit_gap
end_x = x+self.unit_gap
y = int(coord[1]*self.unit_size)
end_y = y+self.unit_size
self.grid[y:end_y, x:end_x, :] = self.SPACE_COLOR
return True
def erase_snake_body(self, snake):
"""
Removes the argued snake's body and head from the grid.
snake - Snake object
"""
for i in range(len(snake.body)):
self.erase(snake.body.popleft())
def food_space(self, coord):
"""
Checks if argued coord is snake food
coord - x,y integer coordinates as a tuple, list, or ndarray
"""
return np.array_equal(self.color_of(coord), self.FOOD_COLOR)
def place_food(self, coord):
"""
Draws a food at the coord. Ensures the same placement for
each food at the beginning of a new episode. This is useful for
experimentation with curiosity driven behaviors.
num - the integer denoting the
"""
if self.open_space < 1 or not np.array_equal(self.color_of(coord), self.SPACE_COLOR):
return False
self.draw(coord, self.FOOD_COLOR)
return True
def new_food(self):
"""
Draws a food on a random, open unit of the grid.
Returns true if space left. Otherwise returns false.
"""
if self.open_space < 1:
return False
coord_not_found = True
while(coord_not_found):
coord = (np.random.randint(0,self.grid_size[0]), np.random.randint(0,self.grid_size[1]))
if np.array_equal(self.color_of(coord), self.SPACE_COLOR):
coord_not_found = False
self.draw(coord, self.FOOD_COLOR)
return True
def off_grid(self, coord):
"""
Checks if argued coord is off of the grid
coord - x,y integer coordinates as a tuple, list, or ndarray
"""
return coord[0]<0 or coord[0]>=self.grid_size[0] or coord[1]<0 or coord[1]>=self.grid_size[1]
def snake_space(self, coord):
"""
Checks if argued coord is occupied by a snake
coord - x,y integer coordinates as a tuple, list, or ndarray
"""
color = self.color_of(coord)
return np.array_equal(color, self.BODY_COLOR) or color[0] == self.HEAD_COLOR[0]
| [
"numpy.abs",
"numpy.asarray",
"numpy.zeros",
"numpy.random.randint",
"numpy.array",
"numpy.array_equal"
] | [((556, 591), 'numpy.array', 'np.array', (['[1, 0, 0]'], {'dtype': 'np.uint8'}), '([1, 0, 0], dtype=np.uint8)\n', (564, 591), True, 'import numpy as np\n'), ((607, 644), 'numpy.array', 'np.array', (['[255, 0, 0]'], {'dtype': 'np.uint8'}), '([255, 0, 0], dtype=np.uint8)\n', (615, 644), True, 'import numpy as np\n'), ((662, 699), 'numpy.array', 'np.array', (['[0, 0, 255]'], {'dtype': 'np.uint8'}), '([0, 0, 255], dtype=np.uint8)\n', (670, 699), True, 'import numpy as np\n'), ((716, 753), 'numpy.array', 'np.array', (['[0, 255, 0]'], {'dtype': 'np.uint8'}), '([0, 255, 0], dtype=np.uint8)\n', (724, 753), True, 'import numpy as np\n'), ((1150, 1185), 'numpy.asarray', 'np.asarray', (['grid_size'], {'dtype': 'np.int'}), '(grid_size, dtype=np.int)\n', (1160, 1185), True, 'import numpy as np\n'), ((1351, 1402), 'numpy.zeros', 'np.zeros', (['(height, width, channels)'], {'dtype': 'np.uint8'}), '((height, width, channels), dtype=np.uint8)\n', (1359, 1402), True, 'import numpy as np\n'), ((4291, 4324), 'numpy.asarray', 'np.asarray', (['color'], {'dtype': 'np.uint8'}), '(color, dtype=np.uint8)\n', (4301, 4324), True, 'import numpy as np\n'), ((8887, 8925), 'numpy.array_equal', 'np.array_equal', (['color', 'self.BODY_COLOR'], {}), '(color, self.BODY_COLOR)\n', (8901, 8925), True, 'import numpy as np\n'), ((2647, 2676), 'numpy.abs', 'np.abs', (['(coord1[0] - coord2[0])'], {}), '(coord1[0] - coord2[0])\n', (2653, 2676), True, 'import numpy as np\n'), ((2684, 2713), 'numpy.abs', 'np.abs', (['(coord1[1] - coord2[1])'], {}), '(coord1[1] - coord2[1])\n', (2690, 2713), True, 'import numpy as np\n'), ((2775, 2804), 'numpy.abs', 'np.abs', (['(coord1[0] - coord2[0])'], {}), '(coord1[0] - coord2[0])\n', (2781, 2804), True, 'import numpy as np\n'), ((2812, 2841), 'numpy.abs', 'np.abs', (['(coord1[1] - coord2[1])'], {}), '(coord1[1] - coord2[1])\n', (2818, 2841), True, 'import numpy as np\n'), ((8119, 8158), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.grid_size[0]'], {}), '(0, self.grid_size[0])\n', (8136, 8158), True, 'import numpy as np\n'), ((8159, 8198), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.grid_size[1]'], {}), '(0, self.grid_size[1])\n', (8176, 8198), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import HFSSLibrary as hfss
precision_frequency = 10.0050125313283 # GHz
N = 8
# beam_ports = np.array([0,1])
beam_ports = np.arange(N)
# N=8
#Open Circular Array File Before running script
[oAnsys, oDesktop] = hfss.openHFSS()
oProject = oDesktop.setActiveProject('cyl_array') #cyl_array
phases_df = None
# Read in CSV file and Set active design
if N == 4:
oDesign = oProject.SetActiveDesign('4_Element_Radius')
phases_df = pd.read_csv('4x4_phase.csv')
else:
oDesign = oProject.SetActiveDesign('8_Element_Radius')
phases_df = pd.read_csv('8x8_phase.csv')
row = phases_df.index[phases_df['Freq [GHz]']==10].tolist()
S_Phase = np.zeros((2*N, 2*N)) # Store Phases indexed by s parameter
print(S_Phase)
# row = freq_list.index[freq_list==].tolist()
print(row)
# print(freq_list.head())
# Loop Through S Paramanters in CSV, Read values from Dataframe into ndarray
for i in range(2*N):
for j in range(2*N):
column_string = 'cang_deg(S({0},{1})) [deg]'.format(i+1,j+1)
# print(column_string)
# print(phases_df[column_string].iloc[row])
S_Phase[i, j] = phases_df[column_string].iloc[row]
# Get only one frequency
modes = np.ones((N,1))
amplitudes = np.ones((N,1))
phases = np.zeros((N,1))
# print(phases)
source_list = []
# Add phase contributions from Each active Beam port into array port
for array_port in range(N):
source_list.append(str(array_port+1))
for beam_port in beam_ports:
# print(N+1+array_port,beam_port+1)
# print(S_Phase[N+array_port,beam_port])
phases[array_port] += S_Phase[N+array_port,beam_port]
# print(phases)
i = 0
# fixed_phase = [ 264.34337681, 57.65556644, 257.34443356, 680.70074766]
fixed_phase = [311.22491017 , 216.35612484, 113.93161896, 16.9062831, 264.4062831, 628.57866275, 1021.16389506, 1263.95080223]
for phase in phases:
print(phase+fixed_phase[i])
i += 1
# hfss.edit_sources(oDesign,source_list,modes,amplitudes,phases,'W','deg') | [
"pandas.read_csv",
"numpy.zeros",
"numpy.ones",
"numpy.arange",
"HFSSLibrary.openHFSS"
] | [((164, 176), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (173, 176), True, 'import numpy as np\n'), ((253, 268), 'HFSSLibrary.openHFSS', 'hfss.openHFSS', ([], {}), '()\n', (266, 268), True, 'import HFSSLibrary as hfss\n'), ((688, 712), 'numpy.zeros', 'np.zeros', (['(2 * N, 2 * N)'], {}), '((2 * N, 2 * N))\n', (696, 712), True, 'import numpy as np\n'), ((1225, 1240), 'numpy.ones', 'np.ones', (['(N, 1)'], {}), '((N, 1))\n', (1232, 1240), True, 'import numpy as np\n'), ((1253, 1268), 'numpy.ones', 'np.ones', (['(N, 1)'], {}), '((N, 1))\n', (1260, 1268), True, 'import numpy as np\n'), ((1277, 1293), 'numpy.zeros', 'np.zeros', (['(N, 1)'], {}), '((N, 1))\n', (1285, 1293), True, 'import numpy as np\n'), ((475, 503), 'pandas.read_csv', 'pd.read_csv', (['"""4x4_phase.csv"""'], {}), "('4x4_phase.csv')\n", (486, 503), True, 'import pandas as pd\n'), ((585, 613), 'pandas.read_csv', 'pd.read_csv', (['"""8x8_phase.csv"""'], {}), "('8x8_phase.csv')\n", (596, 613), True, 'import pandas as pd\n')] |
from numpy import array, polyfit, RankWarning, seterr
from warnings import catch_warnings, filterwarnings
# Represents a polynomial function on a graph
# Uses polynomial regression to get terms of the polynomial form of the function
# Also overloads the call operator for testing the function
class Function:
def __init__(self, degree=2):
self.degree = degree
self.clear()
# Clear the function's data points
def clear(self):
self.x = []
self.y = []
# Add data point to function for regression
def add_point(self, x, y):
self.x.append(x)
self.y.append(y)
# Get the function's polynomial terms as a list of floats
def as_terms(self):
x = array(self.x)
y = array(self.y)
z = polyfit(x, y, self.degree)
return list(z)
# Get the function as a c++ expression as a function of `var`
def as_cpp(self, var='x'):
result = ''
for i, n in enumerate(self.as_terms()[::-1]):
# If you want smaller coefficients in the C++ code, replace
# `{n}` with `{round(n, 6)}` using any number you want in place of 6
result += f' {n} * pow({var}, {i}) +'
return result[1:-2]
# Call the function obtained through regression
def __call__(self, x):
terms = self.as_terms()
result = 0
# Calculate the value of each term in the polynomial
for exp, n in enumerate(terms[::-1]):
result += n * (x ** exp)
return result
| [
"numpy.array",
"numpy.polyfit"
] | [((725, 738), 'numpy.array', 'array', (['self.x'], {}), '(self.x)\n', (730, 738), False, 'from numpy import array, polyfit, RankWarning, seterr\n'), ((751, 764), 'numpy.array', 'array', (['self.y'], {}), '(self.y)\n', (756, 764), False, 'from numpy import array, polyfit, RankWarning, seterr\n'), ((777, 803), 'numpy.polyfit', 'polyfit', (['x', 'y', 'self.degree'], {}), '(x, y, self.degree)\n', (784, 803), False, 'from numpy import array, polyfit, RankWarning, seterr\n')] |
import matplotlib.pyplot as plt
import numpy as np
from multiprocessing import cpu_count
from sklearn.datasets import load_wine
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
# Set random seed for reproducibility
np.random.seed(1000)
if __name__ == '__main__':
# Load the dataset
X, Y = load_wine(return_X_y=True)
# Test Logistic regression
lr = LogisticRegression(max_iter=1000, random_state=1000)
print('Logistic Regression CV score: {}'.format(np.mean(cross_val_score(lr, X, Y, cv=10))))
# Test Decision Tree
dt = DecisionTreeClassifier(criterion='entropy', random_state=1000)
print('Decistion Tree CV score: {}'.format(np.mean(cross_val_score(dt, X, Y, cv=10))))
# Test Polynomial SVM
svm = SVC(kernel='poly', random_state=1000)
print('Polynomial SVM CV score: {}'.format(np.mean(cross_val_score(svm, X, Y, cv=10))))
# Test Random Forest
rf = RandomForestClassifier(n_estimators=50, n_jobs=cpu_count(), random_state=1000)
scores = cross_val_score(rf, X, Y, cv=10)
print('Random Forest CV score: {}'.format(np.mean(scores)))
# Plot CV scores
fig, ax = plt.subplots(figsize=(15, 7))
ax.plot(scores)
ax.set_xlabel('Number of Trees (x10)')
ax.set_ylabel('10-fold Cross-Validation Accuracy')
ax.grid()
plt.show()
# Show feature importances
rf.fit(X, Y)
wine = load_wine()
features = [wine['feature_names'][x] for x in np.argsort(rf.feature_importances_)][::-1]
fig, ax = plt.subplots(figsize=(15, 8))
ax.bar([i for i in range(13)], np.sort(rf.feature_importances_)[::-1], align='center')
ax.set_ylabel('Feature Importance')
plt.xticks([i for i in range(13)], features, rotation=60)
plt.show()
# Select the most important features
sfm = SelectFromModel(estimator=rf, prefit=True, threshold=0.02)
X_sfm = sfm.transform(X)
print('Feature selection shape: {}'.format(X_sfm.shape))
| [
"sklearn.datasets.load_wine",
"numpy.random.seed",
"matplotlib.pyplot.show",
"sklearn.model_selection.cross_val_score",
"sklearn.tree.DecisionTreeClassifier",
"numpy.argsort",
"sklearn.feature_selection.SelectFromModel",
"sklearn.linear_model.LogisticRegression",
"numpy.mean",
"numpy.sort",
"skl... | [((469, 489), 'numpy.random.seed', 'np.random.seed', (['(1000)'], {}), '(1000)\n', (483, 489), True, 'import numpy as np\n'), ((558, 584), 'sklearn.datasets.load_wine', 'load_wine', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (567, 584), False, 'from sklearn.datasets import load_wine\n'), ((629, 681), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'max_iter': '(1000)', 'random_state': '(1000)'}), '(max_iter=1000, random_state=1000)\n', (647, 681), False, 'from sklearn.linear_model import LogisticRegression\n'), ((817, 879), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'criterion': '"""entropy"""', 'random_state': '(1000)'}), "(criterion='entropy', random_state=1000)\n", (839, 879), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1012, 1049), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""poly"""', 'random_state': '(1000)'}), "(kernel='poly', random_state=1000)\n", (1015, 1049), False, 'from sklearn.svm import SVC\n'), ((1274, 1306), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['rf', 'X', 'Y'], {'cv': '(10)'}), '(rf, X, Y, cv=10)\n', (1289, 1306), False, 'from sklearn.model_selection import cross_val_score\n'), ((1411, 1440), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 7)'}), '(figsize=(15, 7))\n', (1423, 1440), True, 'import matplotlib.pyplot as plt\n'), ((1584, 1594), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1592, 1594), True, 'import matplotlib.pyplot as plt\n'), ((1661, 1672), 'sklearn.datasets.load_wine', 'load_wine', ([], {}), '()\n', (1670, 1672), False, 'from sklearn.datasets import load_wine\n'), ((1784, 1813), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 8)'}), '(figsize=(15, 8))\n', (1796, 1813), True, 'import matplotlib.pyplot as plt\n'), ((2017, 2027), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2025, 2027), True, 'import matplotlib.pyplot as plt\n'), ((2083, 2141), 'sklearn.feature_selection.SelectFromModel', 'SelectFromModel', ([], {'estimator': 'rf', 'prefit': '(True)', 'threshold': '(0.02)'}), '(estimator=rf, prefit=True, threshold=0.02)\n', (2098, 2141), False, 'from sklearn.feature_selection import SelectFromModel\n'), ((1228, 1239), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (1237, 1239), False, 'from multiprocessing import cpu_count\n'), ((1354, 1369), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (1361, 1369), True, 'import numpy as np\n'), ((1852, 1884), 'numpy.sort', 'np.sort', (['rf.feature_importances_'], {}), '(rf.feature_importances_)\n', (1859, 1884), True, 'import numpy as np\n'), ((743, 775), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['lr', 'X', 'Y'], {'cv': '(10)'}), '(lr, X, Y, cv=10)\n', (758, 775), False, 'from sklearn.model_selection import cross_val_score\n'), ((936, 968), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['dt', 'X', 'Y'], {'cv': '(10)'}), '(dt, X, Y, cv=10)\n', (951, 968), False, 'from sklearn.model_selection import cross_val_score\n'), ((1106, 1139), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['svm', 'X', 'Y'], {'cv': '(10)'}), '(svm, X, Y, cv=10)\n', (1121, 1139), False, 'from sklearn.model_selection import cross_val_score\n'), ((1724, 1759), 'numpy.argsort', 'np.argsort', (['rf.feature_importances_'], {}), '(rf.feature_importances_)\n', (1734, 1759), True, 'import numpy as np\n')] |
from acados_template import *
import acados_template as at
from export_ode_model import *
import numpy as np
import scipy.linalg
from ctypes import *
FORMULATION = 1 # 0 for linear soft bounds,
# 1 for equivalent nonlinear soft constraint
def export_nonlinear_constraint():
con_name = 'nl_con'
# set up states & controls
x1 = SX.sym('x1')
theta = SX.sym('theta')
v1 = SX.sym('v1')
dtheta = SX.sym('dtheta')
x = vertcat(x1, v1, theta, dtheta)
# controls
F = SX.sym('F')
u = vertcat(F)
# voltage sphere
constraint = acados_constraint()
constraint.expr = u
constraint.x = x
constraint.u = u
constraint.nc = 1
constraint.name = con_name
return constraint
# create render arguments
ocp = acados_ocp_nlp()
# export model
model = export_ode_model()
# set model_name
ocp.model_name = model.name
Tf = 2.0
nx = model.x.size()[0]
nu = model.u.size()[0]
ny = nx + nu
ny_e = nx
N = 50
# set ocp_nlp_dimensions
nlp_dims = ocp.dims
nlp_dims.nx = nx
nlp_dims.ny = ny
nlp_dims.ny_e = ny_e
nlp_dims.nbx = 0
nlp_dims.ns = nu
nlp_dims.nu = model.u.size()[0]
nlp_dims.N = N
if FORMULATION == 1:
nlp_dims.nh = model.u.size()[0]
nlp_dims.nsh = model.u.size()[0]
nlp_dims.nbu = 0
nlp_dims.nsbu = 0
else:
nlp_dims.nh = 0
nlp_dims.nsh = 0
nlp_dims.nbu = model.u.size()[0]
nlp_dims.nsbu = model.u.size()[0]
# set weighting matrices
nlp_cost = ocp.cost
Q = np.eye(4)
Q[0,0] = 1e0
Q[1,1] = 1e2
Q[2,2] = 1e-3
Q[3,3] = 1e-2
R = np.eye(1)
R[0,0] = 1e0
nlp_cost.W = scipy.linalg.block_diag(Q, R)
Vx = np.zeros((ny, nx))
Vx[0,0] = 1.0
Vx[1,1] = 1.0
Vx[2,2] = 1.0
Vx[3,3] = 1.0
nlp_cost.Vx = Vx
Vu = np.zeros((ny, nu))
Vu[4,0] = 1.0
nlp_cost.Vu = Vu
nlp_cost.W_e = Q
Vx_e = np.zeros((ny_e, nx))
Vx_e[0,0] = 1.0
Vx_e[1,1] = 1.0
Vx_e[2,2] = 1.0
Vx_e[3,3] = 1.0
nlp_cost.Vx_e = Vx_e
nlp_cost.yref = np.zeros((ny, ))
nlp_cost.yref_e = np.zeros((ny_e, ))
nlp_cost.zl = 500*np.ones((1, ))
nlp_cost.Zl = 0*np.ones((1, 1))
nlp_cost.zu = 500*np.ones((1, ))
nlp_cost.Zu = 0*np.ones((1, 1))
# setting bounds
Fmax = 2.0
nlp_con = ocp.constraints
nlp_con.x0 = np.array([0.0, 3.14, 0.0, 0.0])
constraint = export_nonlinear_constraint()
if FORMULATION == 1:
nlp_con.lh = np.array([-Fmax])
nlp_con.uh = np.array([+Fmax])
nlp_con.lsh = 0*np.array([-Fmax])
nlp_con.ush = 0*np.array([+Fmax])
nlp_con.idxsh = np.array([0])
else:
nlp_con.lbu = np.array([-Fmax])
nlp_con.ubu = np.array([+Fmax])
nlp_con.lsbu = 0*np.array([-Fmax])
nlp_con.usbu = 0*np.array([+Fmax])
nlp_con.idxbu = np.array([0])
nlp_con.idxsbu = np.array([0])
# set QP solver
ocp.solver_config.qp_solver = 'PARTIAL_CONDENSING_HPIPM'
ocp.solver_config.hessian_approx = 'GAUSS_NEWTON'
ocp.solver_config.integrator_type = 'ERK'
# set prediction horizon
ocp.solver_config.tf = Tf
ocp.solver_config.nlp_solver_type = 'SQP'
# set header path
ocp.acados_include_path = '/usr/local/include'
ocp.acados_lib_path = '/usr/local/lib'
# json_layout = acados_ocp2json_layout(ocp)
# with open('acados_layout.json', 'w') as f:
# json.dump(json_layout, f, default=np_array_to_list)
# exit()
if FORMULATION == 1:
ocp.con_h_name = 'nl_con'
acados_solver = generate_solver(model, ocp, con_h=constraint, json_file = 'acados_ocp.json')
else:
acados_solver = generate_solver(model, ocp, json_file = 'acados_ocp.json')
Nsim = 100
simX = np.ndarray((Nsim, nx))
simU = np.ndarray((Nsim, nu))
for i in range(Nsim):
status = acados_solver.solve()
# get solution
x0 = acados_solver.get(0, "x")
u0 = acados_solver.get(0, "u")
for j in range(nx):
simX[i,j] = x0[j]
for j in range(nu):
simU[i,j] = u0[j]
# update initial condition
x0 = acados_solver.get(1, "x")
acados_solver.set(0, "lbx", x0)
acados_solver.set(0, "ubx", x0)
# plot results
import matplotlib
import matplotlib.pyplot as plt
t = np.linspace(0.0, Tf/N, Nsim)
plt.subplot(2, 1, 1)
plt.step(t, simU, color='r')
plt.title('closed-loop simulation')
plt.ylabel('u')
plt.xlabel('t')
plt.grid(True)
plt.subplot(2, 1, 2)
plt.plot(t, simX[:,1])
plt.ylabel('theta')
plt.xlabel('t')
plt.grid(True)
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.zeros",
"matplotlib.pyplot.ylabel",
"numpy.ones",
"matplotlib.pyplot.step",
"numpy.array",
"numpy.linspace",
"numpy.eye",
"matplotlib.pyplot.xlabel",
"numpy.ndarray",
"matplot... | [((1505, 1514), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1511, 1514), True, 'import numpy as np\n'), ((1574, 1583), 'numpy.eye', 'np.eye', (['(1)'], {}), '(1)\n', (1580, 1583), True, 'import numpy as np\n'), ((1648, 1666), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {}), '((ny, nx))\n', (1656, 1666), True, 'import numpy as np\n'), ((1747, 1765), 'numpy.zeros', 'np.zeros', (['(ny, nu)'], {}), '((ny, nu))\n', (1755, 1765), True, 'import numpy as np\n'), ((1824, 1844), 'numpy.zeros', 'np.zeros', (['(ny_e, nx)'], {}), '((ny_e, nx))\n', (1832, 1844), True, 'import numpy as np\n'), ((1949, 1964), 'numpy.zeros', 'np.zeros', (['(ny,)'], {}), '((ny,))\n', (1957, 1964), True, 'import numpy as np\n'), ((1984, 2001), 'numpy.zeros', 'np.zeros', (['(ny_e,)'], {}), '((ny_e,))\n', (1992, 2001), True, 'import numpy as np\n'), ((2202, 2233), 'numpy.array', 'np.array', (['[0.0, 3.14, 0.0, 0.0]'], {}), '([0.0, 3.14, 0.0, 0.0])\n', (2210, 2233), True, 'import numpy as np\n'), ((3487, 3509), 'numpy.ndarray', 'np.ndarray', (['(Nsim, nx)'], {}), '((Nsim, nx))\n', (3497, 3509), True, 'import numpy as np\n'), ((3517, 3539), 'numpy.ndarray', 'np.ndarray', (['(Nsim, nu)'], {}), '((Nsim, nu))\n', (3527, 3539), True, 'import numpy as np\n'), ((4008, 4038), 'numpy.linspace', 'np.linspace', (['(0.0)', '(Tf / N)', 'Nsim'], {}), '(0.0, Tf / N, Nsim)\n', (4019, 4038), True, 'import numpy as np\n'), ((4037, 4057), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (4048, 4057), True, 'import matplotlib.pyplot as plt\n'), ((4058, 4086), 'matplotlib.pyplot.step', 'plt.step', (['t', 'simU'], {'color': '"""r"""'}), "(t, simU, color='r')\n", (4066, 4086), True, 'import matplotlib.pyplot as plt\n'), ((4087, 4122), 'matplotlib.pyplot.title', 'plt.title', (['"""closed-loop simulation"""'], {}), "('closed-loop simulation')\n", (4096, 4122), True, 'import matplotlib.pyplot as plt\n'), ((4123, 4138), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""u"""'], {}), "('u')\n", (4133, 4138), True, 'import matplotlib.pyplot as plt\n'), ((4139, 4154), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (4149, 4154), True, 'import matplotlib.pyplot as plt\n'), ((4155, 4169), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4163, 4169), True, 'import matplotlib.pyplot as plt\n'), ((4170, 4190), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (4181, 4190), True, 'import matplotlib.pyplot as plt\n'), ((4191, 4214), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'simX[:, 1]'], {}), '(t, simX[:, 1])\n', (4199, 4214), True, 'import matplotlib.pyplot as plt\n'), ((4214, 4233), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""theta"""'], {}), "('theta')\n", (4224, 4233), True, 'import matplotlib.pyplot as plt\n'), ((4234, 4249), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (4244, 4249), True, 'import matplotlib.pyplot as plt\n'), ((4250, 4264), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4258, 4264), True, 'import matplotlib.pyplot as plt\n'), ((4265, 4275), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4273, 4275), True, 'import matplotlib.pyplot as plt\n'), ((2022, 2035), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (2029, 2035), True, 'import numpy as np\n'), ((2053, 2068), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (2060, 2068), True, 'import numpy as np\n'), ((2087, 2100), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (2094, 2100), True, 'import numpy as np\n'), ((2118, 2133), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (2125, 2133), True, 'import numpy as np\n'), ((2317, 2334), 'numpy.array', 'np.array', (['[-Fmax]'], {}), '([-Fmax])\n', (2325, 2334), True, 'import numpy as np\n'), ((2352, 2369), 'numpy.array', 'np.array', (['[+Fmax]'], {}), '([+Fmax])\n', (2360, 2369), True, 'import numpy as np\n'), ((2466, 2479), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2474, 2479), True, 'import numpy as np\n'), ((2504, 2521), 'numpy.array', 'np.array', (['[-Fmax]'], {}), '([-Fmax])\n', (2512, 2521), True, 'import numpy as np\n'), ((2540, 2557), 'numpy.array', 'np.array', (['[+Fmax]'], {}), '([+Fmax])\n', (2548, 2557), True, 'import numpy as np\n'), ((2656, 2669), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2664, 2669), True, 'import numpy as np\n'), ((2691, 2704), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2699, 2704), True, 'import numpy as np\n'), ((2390, 2407), 'numpy.array', 'np.array', (['[-Fmax]'], {}), '([-Fmax])\n', (2398, 2407), True, 'import numpy as np\n'), ((2428, 2445), 'numpy.array', 'np.array', (['[+Fmax]'], {}), '([+Fmax])\n', (2436, 2445), True, 'import numpy as np\n'), ((2579, 2596), 'numpy.array', 'np.array', (['[-Fmax]'], {}), '([-Fmax])\n', (2587, 2596), True, 'import numpy as np\n'), ((2618, 2635), 'numpy.array', 'np.array', (['[+Fmax]'], {}), '([+Fmax])\n', (2626, 2635), True, 'import numpy as np\n')] |
"""Various utilities for working with Webots scenarios."""
import math
import numpy as np
from scenic.core.geometry import normalizeAngle
def webotsToScenicPosition(pos):
"""Convert a Webots position to a Scenic position.
Drops the Webots Y coordinate.
"""
x, y, z = pos
return (x, -z)
def scenicToWebotsPosition(pos, y=0):
"""Convert a Scenic position to a Webots position."""
x, z = pos
return [x, y, -z]
def webotsToScenicRotation(rot, tolerance2D=None):
"""Convert a Webots rotation vector to a Scenic heading.
Assumes the object lies in the Webots X-Z plane, with a rotation axis
close to the Y axis. If ``tolerance2D`` is given, returns ``None`` if the
orientation of the object is not sufficiently close to being 2D.
"""
axis = np.array(rot[:3])
angle = rot[3]
if tolerance2D is not None and np.linalg.norm(axis - (0, 1, 0)) > tolerance2D:
return None
return normalizeAngle(angle + math.pi)
def scenicToWebotsRotation(heading):
"""Convert a Scenic heading to a Webots rotation vector."""
return [0, 1, 0, heading - math.pi]
| [
"scenic.core.geometry.normalizeAngle",
"numpy.array",
"numpy.linalg.norm"
] | [((799, 816), 'numpy.array', 'np.array', (['rot[:3]'], {}), '(rot[:3])\n', (807, 816), True, 'import numpy as np\n'), ((950, 981), 'scenic.core.geometry.normalizeAngle', 'normalizeAngle', (['(angle + math.pi)'], {}), '(angle + math.pi)\n', (964, 981), False, 'from scenic.core.geometry import normalizeAngle\n'), ((871, 903), 'numpy.linalg.norm', 'np.linalg.norm', (['(axis - (0, 1, 0))'], {}), '(axis - (0, 1, 0))\n', (885, 903), True, 'import numpy as np\n')] |
import glob
import hashlib
import logging
import os
import random
import sys
from itertools import product
import numpy as np
import pandas as pd
import yaml
from attrdict import AttrDict
from deepsense import neptune
from tqdm import tqdm
def read_yaml(filepath):
with open(filepath) as f:
config = yaml.load(f)
return AttrDict(config)
def init_logger():
logger = logging.getLogger('talking-data')
logger.setLevel(logging.INFO)
message_format = logging.Formatter(fmt='%(asctime)s %(name)s >>> %(message)s',
datefmt='%Y-%m-%d %H-%M-%S')
# console handler for validation info
ch_va = logging.StreamHandler(sys.stdout)
ch_va.setLevel(logging.INFO)
ch_va.setFormatter(fmt=message_format)
# add the handlers to the logger
logger.addHandler(ch_va)
return logger
def get_logger():
return logging.getLogger('talking-data')
def create_submission(meta, predictions):
submission = pd.DataFrame({'click_id': meta['click_id'].tolist(),
'is_attributed': predictions
})
return submission
def read_params(ctx):
if ctx.params.__class__.__name__ == 'OfflineContextParams':
neptune_config = read_yaml('neptune.yaml')
params = neptune_config.parameters
else:
params = ctx.params
return params
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
def log_loss_row(y_true, y_pred, eps=1e-15):
y_pred = np.clip(y_pred, eps, 1 - eps)
scores = y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred)
return scores
def save_evaluation_predictions(experiment_dir, y_true, y_pred, raw_data):
raw_data['y_pred'] = y_pred
raw_data['score'] = log_loss_row(y_true, y_pred)
raw_data.sort_values('score', ascending=False, inplace=True)
filepath = os.path.join(experiment_dir, 'evaluation_predictions.csv')
raw_data.to_csv(filepath, index=None)
def cut_data_in_time_chunks(data, timestamp_column, chunks_dir, logger=None):
data[timestamp_column] = pd.to_datetime(data[timestamp_column], format='%Y-%m-%d %H:%M:%S')
times = pd.DatetimeIndex(data[timestamp_column])
grouped_train = data.groupby([times.day, times.hour])
for (day, hour), train_chunk in grouped_train:
chunk_filename = 'train_day{}_hour{}.csv'.format(day, hour)
if logger is not None:
logger.info('saving {}'.format(chunk_filename))
else:
print('saving {}'.format(chunk_filename))
chunk_filepath = os.path.join(chunks_dir, chunk_filename)
train_chunk.to_csv(chunk_filepath, index=None)
def read_csv_time_chunks(chunks_dir, days=[], hours=[], usecols=None, dtype=None, logger=None):
filepaths = []
for day, hour in product(days, hours):
filepaths.extend(glob.glob('{}/train_day{}_hour{}.csv'.format(chunks_dir, day, hour)))
data_chunks = []
for filepath in tqdm(filepaths):
data_chunk = pd.read_csv(filepath, usecols=usecols, dtype=dtype)
if logger is not None:
logger.info('read in chunk {} of shape {}'.format(filepath, data_chunk.shape))
else:
print('read in chunk {} of shape {}'.format(filepath, data_chunk.shape))
data_chunks.append(data_chunk)
data_chunks = pd.concat(data_chunks, axis=0).reset_index(drop=True)
data_chunks['click_time'] = pd.to_datetime(data_chunks['click_time'], format='%Y-%m-%d %H:%M:%S')
if logger is not None:
logger.info('combined dataset shape: {}'.format(data_chunks.shape))
else:
print('combined dataset shape: {}'.format(data_chunks.shape))
return data_chunks
def data_hash_channel_send(ctx, name, data):
hash_channel = ctx.create_channel(name=name, channel_type=neptune.ChannelType.TEXT)
data_hash = create_data_hash(data)
hash_channel.send(y=data_hash)
def create_data_hash(data):
if isinstance(data, pd.DataFrame):
data_hash = hashlib.sha256(data.to_json().encode()).hexdigest()
else:
raise NotImplementedError('only pandas.DataFrame and pandas.Series are supported')
return str(data_hash)
def safe_eval(obj):
try:
return eval(obj)
except Exception:
return obj
| [
"tqdm.tqdm",
"yaml.load",
"numpy.random.seed",
"numpy.log",
"pandas.read_csv",
"logging.StreamHandler",
"numpy.clip",
"pandas.DatetimeIndex",
"logging.Formatter",
"random.seed",
"pandas.to_datetime",
"itertools.product",
"attrdict.AttrDict",
"os.path.join",
"pandas.concat",
"logging.ge... | [((339, 355), 'attrdict.AttrDict', 'AttrDict', (['config'], {}), '(config)\n', (347, 355), False, 'from attrdict import AttrDict\n'), ((390, 423), 'logging.getLogger', 'logging.getLogger', (['"""talking-data"""'], {}), "('talking-data')\n", (407, 423), False, 'import logging\n'), ((479, 574), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': '"""%(asctime)s %(name)s >>> %(message)s"""', 'datefmt': '"""%Y-%m-%d %H-%M-%S"""'}), "(fmt='%(asctime)s %(name)s >>> %(message)s', datefmt=\n '%Y-%m-%d %H-%M-%S')\n", (496, 574), False, 'import logging\n'), ((664, 697), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (685, 697), False, 'import logging\n'), ((892, 925), 'logging.getLogger', 'logging.getLogger', (['"""talking-data"""'], {}), "('talking-data')\n", (909, 925), False, 'import logging\n'), ((1420, 1437), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1431, 1437), False, 'import random\n'), ((1442, 1462), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1456, 1462), True, 'import numpy as np\n'), ((1523, 1552), 'numpy.clip', 'np.clip', (['y_pred', 'eps', '(1 - eps)'], {}), '(y_pred, eps, 1 - eps)\n', (1530, 1552), True, 'import numpy as np\n'), ((1888, 1946), 'os.path.join', 'os.path.join', (['experiment_dir', '"""evaluation_predictions.csv"""'], {}), "(experiment_dir, 'evaluation_predictions.csv')\n", (1900, 1946), False, 'import os\n'), ((2098, 2164), 'pandas.to_datetime', 'pd.to_datetime', (['data[timestamp_column]'], {'format': '"""%Y-%m-%d %H:%M:%S"""'}), "(data[timestamp_column], format='%Y-%m-%d %H:%M:%S')\n", (2112, 2164), True, 'import pandas as pd\n'), ((2177, 2217), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['data[timestamp_column]'], {}), '(data[timestamp_column])\n', (2193, 2217), True, 'import pandas as pd\n'), ((2813, 2833), 'itertools.product', 'product', (['days', 'hours'], {}), '(days, hours)\n', (2820, 2833), False, 'from itertools import product\n'), ((2971, 2986), 'tqdm.tqdm', 'tqdm', (['filepaths'], {}), '(filepaths)\n', (2975, 2986), False, 'from tqdm import tqdm\n'), ((3425, 3494), 'pandas.to_datetime', 'pd.to_datetime', (["data_chunks['click_time']"], {'format': '"""%Y-%m-%d %H:%M:%S"""'}), "(data_chunks['click_time'], format='%Y-%m-%d %H:%M:%S')\n", (3439, 3494), True, 'import pandas as pd\n'), ((315, 327), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (324, 327), False, 'import yaml\n'), ((2579, 2619), 'os.path.join', 'os.path.join', (['chunks_dir', 'chunk_filename'], {}), '(chunks_dir, chunk_filename)\n', (2591, 2619), False, 'import os\n'), ((3009, 3060), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {'usecols': 'usecols', 'dtype': 'dtype'}), '(filepath, usecols=usecols, dtype=dtype)\n', (3020, 3060), True, 'import pandas as pd\n'), ((1575, 1589), 'numpy.log', 'np.log', (['y_pred'], {}), '(y_pred)\n', (1581, 1589), True, 'import numpy as np\n'), ((1607, 1625), 'numpy.log', 'np.log', (['(1 - y_pred)'], {}), '(1 - y_pred)\n', (1613, 1625), True, 'import numpy as np\n'), ((3339, 3369), 'pandas.concat', 'pd.concat', (['data_chunks'], {'axis': '(0)'}), '(data_chunks, axis=0)\n', (3348, 3369), True, 'import pandas as pd\n')] |
import numpy as np
import pandas as pd
from talib import abstract
from lib.strategy.base_strategy import BaseStrategy
class DcBreakout(BaseStrategy):
# settings
low_period = 10
high_period = 20
def __init__(self, feed: pd.DataFrame):
super().__init__(feed)
if self.has_enough_feed():
dch = abstract.MAX(self.feed["close"], timeperiod=self.high_period)
dcl = abstract.MIN(self.feed["close"], timeperiod=self.low_period)
self.feed["dch"] = dch
self.feed["dcl"] = dcl
prev_close = self.feed["close"].shift(1)
prev_dch = self.feed["dch"].shift(1)
prev_dcl = self.feed["dcl"].shift(1)
self.feed["cross_up"] = (self.feed["close"] >= self.feed["dch"]) & (
prev_close < prev_dch
)
self.feed["cross_down"] = (self.feed["close"] <= self.feed["dcl"]) & (
prev_close > prev_dcl
)
def get_name(self) -> str:
return "dc_breakout"
def should_buy(self) -> bool:
return self.feed.iloc[-1]["cross_up"]
def should_sell(self) -> bool:
return self.feed.iloc[-1]["cross_down"]
def is_valid(self) -> bool:
return not np.isnan(self.feed.iloc[-1]["dch"]) and not np.isnan(self.feed.iloc[-1]["dcl"])
| [
"numpy.isnan",
"talib.abstract.MAX",
"talib.abstract.MIN"
] | [((340, 401), 'talib.abstract.MAX', 'abstract.MAX', (["self.feed['close']"], {'timeperiod': 'self.high_period'}), "(self.feed['close'], timeperiod=self.high_period)\n", (352, 401), False, 'from talib import abstract\n'), ((420, 480), 'talib.abstract.MIN', 'abstract.MIN', (["self.feed['close']"], {'timeperiod': 'self.low_period'}), "(self.feed['close'], timeperiod=self.low_period)\n", (432, 480), False, 'from talib import abstract\n'), ((1258, 1293), 'numpy.isnan', 'np.isnan', (["self.feed.iloc[-1]['dch']"], {}), "(self.feed.iloc[-1]['dch'])\n", (1266, 1293), True, 'import numpy as np\n'), ((1302, 1337), 'numpy.isnan', 'np.isnan', (["self.feed.iloc[-1]['dcl']"], {}), "(self.feed.iloc[-1]['dcl'])\n", (1310, 1337), True, 'import numpy as np\n')] |
import sys
import torch
import wandb
import numpy as np
sys.path.insert(1, "../../model")
from dataset import GrooveMidiDatasetInfilling
from torch.utils.data import DataLoader
sys.path.insert(1, "../../../GrooveEvaluator")
sys.path.insert(1, "../../../BaseGrooveTransformers/")
sys.path.append('../../../preprocessed_dataset/')
sys.path.insert(1, "../../../hvo_sequence")
from models.train import initialize_model, calculate_loss, train_loop
from Subset_Creators.subsetters import GrooveMidiSubsetter
from hvo_sequence.drum_mappings import ROLAND_REDUCED_MAPPING
from utils import get_hvo_idx_for_voice
from evaluator import InfillingEvaluator
import os
use_wand = True
os.environ['WANDB_MODE'] = 'online' if use_wand else 'offline'
wandb.init()
params = {
"model": {
'encoder_only': True,
'optimizer': 'sgd',
'd_model': 128,
'n_heads': 8,
'dim_feedforward': 1280,
'dropout': 0.1,
'num_encoder_layers': 5,
'num_decoder_layers': 5,
'max_len': 32,
'embedding_size_src': 16, # mso
'embedding_size_tgt': 27, # hvo
'device': 'cuda' if torch.cuda.is_available() else 'cpu'
},
"training": {
'learning_rate': 1e-3,
'batch_size': 64,
'lr_scheduler_step_size': 30,
'lr_scheduler_gamma': 0.1
},
"dataset": {
"subset_info": {
"pickle_source_path": '../../../preprocessed_dataset/datasets_extracted_locally/GrooveMidi/hvo_0.4.5/Processed_On_14_06_2021_at_14_26_hrs',
"subset": 'GrooveMIDI_processed_train',
"metadata_csv_filename": 'metadata.csv',
"hvo_pickle_filename": 'hvo_sequence_data.obj',
"filters": {
"beat_type": ["beat"],
"time_signature": ["4-4"],
# "master_id": ["drummer9/session1/8"]
"master_id": ["drummer1/session1/201"]
}
},
'max_len': 32,
'mso_params': {'sr': 44100, 'n_fft': 1024, 'win_length': 1024, 'hop_length':
441, 'n_bins_per_octave': 16, 'n_octaves': 9, 'f_min': 40, 'mean_filter_size': 22},
'voices_params': {'voice_idx': [2], 'min_n_voices_to_remove': 1, # closed hh
'max_n_voices_to_remove': 1, 'prob': [1], 'k': None},
'sf_path': ['../../soundfonts/filtered_soundfonts/Standard_Drum_Kit.sf2'],
'max_n_sf': 1,
'max_aug_items': 1,
'dataset_name': None
},
"evaluator": {"n_samples_to_use": 12,
"n_samples_to_synthesize_visualize_per_subset": 10},
"cp_paths": {
'checkpoint_path': '../train_results/',
'checkpoint_save_str': '../train_results/transformer_groove_infilling-epoch-{}'
},
"load_model": None,
}
# load model
model, optimizer, ep = initialize_model(params)
_preprocess_dataset = False
if _preprocess_dataset:
_, subset_list = GrooveMidiSubsetter(pickle_source_path=params["dataset"]["subset_info"]["pickle_source_path"],
subset=params["dataset"]["subset_info"]["subset"],
hvo_pickle_filename=params["dataset"]["subset_info"]["hvo_pickle_filename"],
list_of_filter_dicts_for_subsets=[
params['dataset']["subset_info"]['filters']]).create_subsets()
dataset = GrooveMidiDatasetInfilling(data=subset_list[0], **params['dataset'])
else:
load_dataset_path = '../dataset/Dataset_16_06_2021_at_11_52_hrs'
dataset = GrooveMidiDatasetInfilling(load_dataset_path=load_dataset_path)
params["dataset"] = dataset.get_params()
print(dataset.__len__())
dataloader = DataLoader(dataset, batch_size=params['training']['batch_size'], shuffle=True)
# instance evaluator and set gt
evaluator = InfillingEvaluator(pickle_source_path=params["dataset"]["subset_info"]["pickle_source_path"],
set_subfolder=params["dataset"]["subset_info"]["subset"],
hvo_pickle_filename=params["dataset"]["subset_info"]["hvo_pickle_filename"],
max_hvo_shape=(32, 27),
n_samples_to_use=params["evaluator"]["n_samples_to_use"],
n_samples_to_synthesize_visualize_per_subset=params["evaluator"][
"n_samples_to_synthesize_visualize_per_subset"],
disable_tqdm=False,
analyze_heatmap=True,
analyze_global_features=True,
dataset=dataset,
model=model,
n_epochs=100)
# TEST set_gt() method
pre_gt = evaluator.get_gmd_ground_truth_hvo_sequences() # gt without infilling processing
preprocessed_dataset = evaluator.dataset.preprocess_dataset(pre_gt)
gt_eval_processed_inputs = preprocessed_dataset["processed_inputs"]
gt_eval_processed_gt = preprocessed_dataset["hvo_sequences"]
eval_hvo_sequences_inputs = preprocessed_dataset["hvo_sequences_inputs"]
eval_hvo_sequences_gt = preprocessed_dataset["hvo_sequences_outputs"]
gt_eval_hvo_index = preprocessed_dataset["hvo_index"]
gt_eval_voices_reduced = preprocessed_dataset["voices_reduced"]
gt_eval_soundfonts = preprocessed_dataset["soundfonts"]
eval_hvo_array = np.stack([hvo_seq.hvo for hvo_seq in eval_hvo_sequences_gt])
print("set_gt()", np.all(evaluator._gt_hvos_array == eval_hvo_array))
# train for 1 epoch, updates model
train_loop(dataloader=dataloader, groove_transformer=model, encoder_only=params["model"]["encoder_only"],
opt = optimizer, epoch = ep, loss_fn = calculate_loss, bce_fn = torch.nn.BCEWithLogitsLoss(
reduction='none'), mse_fn = torch.nn.MSELoss(reduction='none'), save = False, device = params["model"]['device'])
# TEST set_pred() method
evaluator.set_pred()
eval_pred = model.predict(gt_eval_processed_inputs, use_thres=True, thres=0.5)
eval_pred_hvo_array = np.concatenate(eval_pred, axis=2)
eval_pred = np.zeros_like(eval_pred_hvo_array)
for idx in range(eval_pred_hvo_array.shape[0]): # N
h_idx, v_idx, o_idx = get_hvo_idx_for_voice(voice_idx=gt_eval_voices_reduced[idx],
n_voices=eval_pred_hvo_array.shape[2] // 3)
eval_pred[idx, :, h_idx] = eval_pred_hvo_array[idx][:, h_idx]
eval_pred[idx, :, v_idx] = eval_pred_hvo_array[idx][:, h_idx] * eval_pred_hvo_array[idx][:, v_idx]
eval_pred[idx, :, o_idx] = eval_pred_hvo_array[idx][:, h_idx] * eval_pred_hvo_array[idx][:, o_idx]
print("set_pred()", np.all(evaluator._prediction_hvos_array == eval_pred))
media = evaluator.get_wandb_logging_media()
wandb.log(media) | [
"sys.path.append",
"numpy.stack",
"wandb.log",
"numpy.zeros_like",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.MSELoss",
"torch.utils.data.DataLoader",
"utils.get_hvo_idx_for_voice",
"Subset_Creators.subsetters.GrooveMidiSubsetter",
"dataset.GrooveMidiDatasetInfilling",
"sys.path.insert",
"numpy.a... | [((57, 90), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../model"""'], {}), "(1, '../../model')\n", (72, 90), False, 'import sys\n'), ((179, 225), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../../GrooveEvaluator"""'], {}), "(1, '../../../GrooveEvaluator')\n", (194, 225), False, 'import sys\n'), ((226, 280), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../../BaseGrooveTransformers/"""'], {}), "(1, '../../../BaseGrooveTransformers/')\n", (241, 280), False, 'import sys\n'), ((281, 330), 'sys.path.append', 'sys.path.append', (['"""../../../preprocessed_dataset/"""'], {}), "('../../../preprocessed_dataset/')\n", (296, 330), False, 'import sys\n'), ((331, 374), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../../hvo_sequence"""'], {}), "(1, '../../../hvo_sequence')\n", (346, 374), False, 'import sys\n'), ((740, 752), 'wandb.init', 'wandb.init', ([], {}), '()\n', (750, 752), False, 'import wandb\n'), ((2815, 2839), 'models.train.initialize_model', 'initialize_model', (['params'], {}), '(params)\n', (2831, 2839), False, 'from models.train import initialize_model, calculate_loss, train_loop\n'), ((3725, 3803), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': "params['training']['batch_size']", 'shuffle': '(True)'}), "(dataset, batch_size=params['training']['batch_size'], shuffle=True)\n", (3735, 3803), False, 'from torch.utils.data import DataLoader\n'), ((3849, 4426), 'evaluator.InfillingEvaluator', 'InfillingEvaluator', ([], {'pickle_source_path': "params['dataset']['subset_info']['pickle_source_path']", 'set_subfolder': "params['dataset']['subset_info']['subset']", 'hvo_pickle_filename': "params['dataset']['subset_info']['hvo_pickle_filename']", 'max_hvo_shape': '(32, 27)', 'n_samples_to_use': "params['evaluator']['n_samples_to_use']", 'n_samples_to_synthesize_visualize_per_subset': "params['evaluator']['n_samples_to_synthesize_visualize_per_subset']", 'disable_tqdm': '(False)', 'analyze_heatmap': '(True)', 'analyze_global_features': '(True)', 'dataset': 'dataset', 'model': 'model', 'n_epochs': '(100)'}), "(pickle_source_path=params['dataset']['subset_info'][\n 'pickle_source_path'], set_subfolder=params['dataset']['subset_info'][\n 'subset'], hvo_pickle_filename=params['dataset']['subset_info'][\n 'hvo_pickle_filename'], max_hvo_shape=(32, 27), n_samples_to_use=params\n ['evaluator']['n_samples_to_use'],\n n_samples_to_synthesize_visualize_per_subset=params['evaluator'][\n 'n_samples_to_synthesize_visualize_per_subset'], disable_tqdm=False,\n analyze_heatmap=True, analyze_global_features=True, dataset=dataset,\n model=model, n_epochs=100)\n", (3867, 4426), False, 'from evaluator import InfillingEvaluator\n'), ((5415, 5475), 'numpy.stack', 'np.stack', (['[hvo_seq.hvo for hvo_seq in eval_hvo_sequences_gt]'], {}), '([hvo_seq.hvo for hvo_seq in eval_hvo_sequences_gt])\n', (5423, 5475), True, 'import numpy as np\n'), ((6048, 6081), 'numpy.concatenate', 'np.concatenate', (['eval_pred'], {'axis': '(2)'}), '(eval_pred, axis=2)\n', (6062, 6081), True, 'import numpy as np\n'), ((6094, 6128), 'numpy.zeros_like', 'np.zeros_like', (['eval_pred_hvo_array'], {}), '(eval_pred_hvo_array)\n', (6107, 6128), True, 'import numpy as np\n'), ((6755, 6771), 'wandb.log', 'wandb.log', (['media'], {}), '(media)\n', (6764, 6771), False, 'import wandb\n'), ((3418, 3486), 'dataset.GrooveMidiDatasetInfilling', 'GrooveMidiDatasetInfilling', ([], {'data': 'subset_list[0]'}), "(data=subset_list[0], **params['dataset'])\n", (3444, 3486), False, 'from dataset import GrooveMidiDatasetInfilling\n'), ((3577, 3640), 'dataset.GrooveMidiDatasetInfilling', 'GrooveMidiDatasetInfilling', ([], {'load_dataset_path': 'load_dataset_path'}), '(load_dataset_path=load_dataset_path)\n', (3603, 3640), False, 'from dataset import GrooveMidiDatasetInfilling\n'), ((5495, 5545), 'numpy.all', 'np.all', (['(evaluator._gt_hvos_array == eval_hvo_array)'], {}), '(evaluator._gt_hvos_array == eval_hvo_array)\n', (5501, 5545), True, 'import numpy as np\n'), ((6209, 6318), 'utils.get_hvo_idx_for_voice', 'get_hvo_idx_for_voice', ([], {'voice_idx': 'gt_eval_voices_reduced[idx]', 'n_voices': '(eval_pred_hvo_array.shape[2] // 3)'}), '(voice_idx=gt_eval_voices_reduced[idx], n_voices=\n eval_pred_hvo_array.shape[2] // 3)\n', (6230, 6318), False, 'from utils import get_hvo_idx_for_voice\n'), ((6655, 6708), 'numpy.all', 'np.all', (['(evaluator._prediction_hvos_array == eval_pred)'], {}), '(evaluator._prediction_hvos_array == eval_pred)\n', (6661, 6708), True, 'import numpy as np\n'), ((5753, 5797), 'torch.nn.BCEWithLogitsLoss', 'torch.nn.BCEWithLogitsLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (5779, 5797), False, 'import torch\n'), ((5813, 5847), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (5829, 5847), False, 'import torch\n'), ((1139, 1164), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1162, 1164), False, 'import torch\n'), ((2914, 3236), 'Subset_Creators.subsetters.GrooveMidiSubsetter', 'GrooveMidiSubsetter', ([], {'pickle_source_path': "params['dataset']['subset_info']['pickle_source_path']", 'subset': "params['dataset']['subset_info']['subset']", 'hvo_pickle_filename': "params['dataset']['subset_info']['hvo_pickle_filename']", 'list_of_filter_dicts_for_subsets': "[params['dataset']['subset_info']['filters']]"}), "(pickle_source_path=params['dataset']['subset_info'][\n 'pickle_source_path'], subset=params['dataset']['subset_info']['subset'\n ], hvo_pickle_filename=params['dataset']['subset_info'][\n 'hvo_pickle_filename'], list_of_filter_dicts_for_subsets=[params[\n 'dataset']['subset_info']['filters']])\n", (2933, 3236), False, 'from Subset_Creators.subsetters import GrooveMidiSubsetter\n')] |
import numpy as np
def rle2mask(rle, img_shape):
width, height = img_shape[0], img_shape[1]
mask= np.zeros(width * height, dtype=np.uint8)
array = np.asarray([int(x) for x in rle.split()])
starts = array[0::2]
lengths = array[1::2]
current_position = 0
for index, start in enumerate(starts):
current_position += start
# see https://github.com/tensorflow/models/issues/3906#issuecomment-391998102
# The segmentation ground truth images in your custom dataset should have
# 1, 2, 3, ..., num_class grayscale value at each pixel (0 for background).
# For example if you have 2 classes, you should use 1 and 2 for corresponding pixel.
# Of course the segmentation mask will look almost "black". If you choose,
# say 96 and 128, for your segmentation mask to make the it looks more human friendly,
#the network may end up predicting labels greater than num_class,
# which leads to the error in this issue.
mask[current_position:current_position+lengths[index]] = 1 # Do NOT use 255
current_position += lengths[index]
return mask.reshape(width, height).T
| [
"numpy.zeros"
] | [((107, 147), 'numpy.zeros', 'np.zeros', (['(width * height)'], {'dtype': 'np.uint8'}), '(width * height, dtype=np.uint8)\n', (115, 147), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
#
# Copyright (c) 2018-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import grpc
import datetime
import argparse
import numpy as np
from tensorflow import make_tensor_proto, make_ndarray
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
parser = argparse.ArgumentParser(
description='Sends requests via TFS gRPC API using images in numpy format.'
' It measures performance statistics.')
parser.add_argument('--images_numpy_path',
required=True,
help='image in numpy format')
parser.add_argument('--labels_numpy_path',
required=False,
help='labels in numpy format')
parser.add_argument('--grpc_address',
required=False,
default='localhost',
help='Specify url to grpc service. default:localhost')
parser.add_argument('--grpc_port',
required=False,
default=9178,
help='Specify port to grpc service. default: 9178')
parser.add_argument('--input_name',
required=False,
default='input',
help='Specify input tensor name. default: input')
parser.add_argument('--output_name',
required=False,
default='prob',
help='Specify output tensor name. default: prob')
parser.add_argument('--iterations',
default=0,
help='Number of requests iterations, '
'as default use number of images in numpy memmap. '
'default: 0 (consume all frames)',
type=int)
parser.add_argument('--batchsize',
default=1,
help='Number of images in a single request. default: 1',
type=int)
parser.add_argument('--model_name',
default='resnet',
help='Define model name in payload. default: resnet')
parser.add_argument('--model_version',
default=1,
help='Model version number. default: 1',
type=int)
parser.add_argument('--report_every',
default=0,
help='Report performance every X iterations',
type=int)
parser.add_argument('--precision',
default=np.float32,
help='input precision',
type=np.dtype)
parser.add_argument('--id',
default='--',
help='Helps identifying client')
args = parser.parse_args()
accurracy_measuring_mode = args.labels_numpy_path is not None
channel = grpc.insecure_channel("{}:{}".format(
args.grpc_address,
args.grpc_port))
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
processing_times = np.zeros((0), int)
imgs = np.load(args.images_numpy_path, mmap_mode='r', allow_pickle=False)
imgs = imgs - np.min(imgs) # Normalization 0-255
imgs = imgs / np.ptp(imgs) * 255 # Normalization 0-255
imgs = imgs.astype(args.precision)
if accurracy_measuring_mode:
labels = np.load(args.labels_numpy_path, mmap_mode='r', allow_pickle=False)
matches_count = 0
total_count = 0
# If input numpy file has too few frames according to the
# value of iterations and the batch size,
# it will be duplicated to match requested number of frames.
while args.batchsize >= imgs.shape[0]:
imgs = np.append(imgs, imgs, axis=0)
if accurracy_measuring_mode:
labels = np.append(labels, labels, axis=0)
if args.iterations < 0:
print("Argument '--iterations' can't be lower than 0")
print("Exitting")
sys.exit(1)
elif args.iterations == 0:
iterations = int(imgs.shape[0] // args.batchsize)
else:
iterations = args.iterations
iteration = 0
print("[{:2}] Starting iterations".format(args.id))
while iteration <= iterations:
for x in range(0, imgs.shape[0] - args.batchsize + 1, args.batchsize):
iteration += 1
if iteration > iterations:
break
# Preparing image data
img = imgs[x:(x + args.batchsize)]
if accurracy_measuring_mode:
expected_label = labels[x:(x + args.batchsize)][0]
# Creating request object
request = predict_pb2.PredictRequest()
request.model_spec.name = args.model_name
request.model_spec.version.value = args.model_version
# Populating request with data
request.inputs[args.input_name].CopyFrom(
make_tensor_proto(img, shape=(img.shape)))
# Measuring gRPC request time
start_time = datetime.datetime.now()
result = stub.Predict(request, 10.0)
end_time = datetime.datetime.now()
# Aggregating processing time statistics
duration = (end_time - start_time).total_seconds() * 1000
processing_times = np.append(processing_times, np.array([duration]))
# If we want to check accurracy
if accurracy_measuring_mode:
output = np.array(make_ndarray(result.outputs[args.output_name]))
if args.model_name == "dummy":
if (img + 1 == output ).all():
matches_count += 1
total_count += 1
else:
actual_label = np.argmax(output[0])
if (expected_label == actual_label) :
matches_count += 1
total_count += 1
if args.report_every > 0 and iteration < iterations and iteration % args.report_every == 0:
print(f'[{args.id:2}] Iteration {iteration:5}/{iterations:5}; '
f'Current latency: {round(duration, 2):.2f}ms; '
f'Average latency: {round(np.average(processing_times), 2):.2f}ms')
# Latency and accurracy
if accurracy_measuring_mode:
accuracy = 100 * matches_count / total_count
print(f"[{args.id:2}] "
f"Iterations: {iterations:5}; "
f"Final average latency: {round(np.average(processing_times), 2):.2f}ms; "
f"Classification accuracy: {accuracy}%")
if accuracy < 100.0:
print('Accurracy is lower than 100')
exit(1)
# Latency only
else:
print(f"[{args.id:2}] "
f"Iterations: {iterations:5}; "
f"Final average latency: {round(np.average(processing_times), 2):.2f}ms")
| [
"numpy.load",
"tensorflow.make_tensor_proto",
"argparse.ArgumentParser",
"tensorflow_serving.apis.predict_pb2.PredictRequest",
"numpy.argmax",
"numpy.ptp",
"numpy.average",
"numpy.zeros",
"tensorflow.make_ndarray",
"numpy.append",
"tensorflow_serving.apis.prediction_service_pb2_grpc.PredictionSe... | [((871, 1017), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Sends requests via TFS gRPC API using images in numpy format. It measures performance statistics."""'}), "(description=\n 'Sends requests via TFS gRPC API using images in numpy format. It measures performance statistics.'\n )\n", (894, 1017), False, 'import argparse\n'), ((3406, 3464), 'tensorflow_serving.apis.prediction_service_pb2_grpc.PredictionServiceStub', 'prediction_service_pb2_grpc.PredictionServiceStub', (['channel'], {}), '(channel)\n', (3455, 3464), False, 'from tensorflow_serving.apis import prediction_service_pb2_grpc\n'), ((3485, 3501), 'numpy.zeros', 'np.zeros', (['(0)', 'int'], {}), '(0, int)\n', (3493, 3501), True, 'import numpy as np\n'), ((3512, 3578), 'numpy.load', 'np.load', (['args.images_numpy_path'], {'mmap_mode': '"""r"""', 'allow_pickle': '(False)'}), "(args.images_numpy_path, mmap_mode='r', allow_pickle=False)\n", (3519, 3578), True, 'import numpy as np\n'), ((3593, 3605), 'numpy.min', 'np.min', (['imgs'], {}), '(imgs)\n', (3599, 3605), True, 'import numpy as np\n'), ((3764, 3830), 'numpy.load', 'np.load', (['args.labels_numpy_path'], {'mmap_mode': '"""r"""', 'allow_pickle': '(False)'}), "(args.labels_numpy_path, mmap_mode='r', allow_pickle=False)\n", (3771, 3830), True, 'import numpy as np\n'), ((4085, 4114), 'numpy.append', 'np.append', (['imgs', 'imgs'], {'axis': '(0)'}), '(imgs, imgs, axis=0)\n', (4094, 4114), True, 'import numpy as np\n'), ((4309, 4320), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4317, 4320), False, 'import sys\n'), ((3643, 3655), 'numpy.ptp', 'np.ptp', (['imgs'], {}), '(imgs)\n', (3649, 3655), True, 'import numpy as np\n'), ((4165, 4198), 'numpy.append', 'np.append', (['labels', 'labels'], {'axis': '(0)'}), '(labels, labels, axis=0)\n', (4174, 4198), True, 'import numpy as np\n'), ((4920, 4948), 'tensorflow_serving.apis.predict_pb2.PredictRequest', 'predict_pb2.PredictRequest', ([], {}), '()\n', (4946, 4948), False, 'from tensorflow_serving.apis import predict_pb2\n'), ((5266, 5289), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5287, 5289), False, 'import datetime\n'), ((5354, 5377), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5375, 5377), False, 'import datetime\n'), ((5163, 5202), 'tensorflow.make_tensor_proto', 'make_tensor_proto', (['img'], {'shape': 'img.shape'}), '(img, shape=img.shape)\n', (5180, 5202), False, 'from tensorflow import make_tensor_proto, make_ndarray\n'), ((5549, 5569), 'numpy.array', 'np.array', (['[duration]'], {}), '([duration])\n', (5557, 5569), True, 'import numpy as np\n'), ((5687, 5733), 'tensorflow.make_ndarray', 'make_ndarray', (['result.outputs[args.output_name]'], {}), '(result.outputs[args.output_name])\n', (5699, 5733), False, 'from tensorflow import make_tensor_proto, make_ndarray\n'), ((5946, 5966), 'numpy.argmax', 'np.argmax', (['output[0]'], {}), '(output[0])\n', (5955, 5966), True, 'import numpy as np\n'), ((6630, 6658), 'numpy.average', 'np.average', (['processing_times'], {}), '(processing_times)\n', (6640, 6658), True, 'import numpy as np\n'), ((6935, 6963), 'numpy.average', 'np.average', (['processing_times'], {}), '(processing_times)\n', (6945, 6963), True, 'import numpy as np\n'), ((6381, 6409), 'numpy.average', 'np.average', (['processing_times'], {}), '(processing_times)\n', (6391, 6409), True, 'import numpy as np\n')] |
import operator
import warnings
import numpy as np
import pandas as pd
import Bio
import Bio.SeqIO
from Bio import pairwise2
from Bio.PDB import PDBParser
from biograph.structure import StructureModel
from biograph.downloader import PdbDownloader
from biograph.constants import amino_1code, valid_amino_3, valid_amino_1
from biograph import alignment
class Protein:
"""Main BioGraph class for representing proteins through dataframes.
Provides utilities for handling atoms, constructing structures or graphs
and target variables."""
# Bio often produces a couple of warnings when loading pdb files.
# This can cloud logs when dealing with a large amount of pdbs.
# We provide the option to suppress those warnings, and with
# this module-level flag we make sure to only warn about it once.
_warned_about_suppressing_bio = False
@staticmethod
def fetch(pdb_id, base_path=".", suppress_bio_warnings=True):
"""Fetch a PDB file and instantiate a Protein with it."""
dw = PdbDownloader([pdb_id], base_path = base_path)
filenames = dw.request_and_write()
if filenames[0] is not None:
return Protein(filenames[0], suppress_bio_warnings=suppress_bio_warnings)
raise Exception("PDB could not be downloaded")
def __init__(self, pdb, suppress_bio_warnings=True):
self.suppress_bio_warnings = suppress_bio_warnings
self.pdb = pdb
self.structure = None
self._df = None
self._seq = None
@property
def df(self):
if self._df is None:
self.generate_dataframe()
return self._df
@df.setter
def df(self, df):
self._df = df
@property
def sequences(self):
"""Tries to load the sequences from the PDB file if present.
It is also possible to use a PPBuilder for this, but sometimes
it breaks up a sequence into smaller parts and they stop
matching with the FASTA files.
Returns a dictionary of the form chain_id => sequence"""
if self._seq is not None:
return self._seq
if self.pdb_file is None:
return None
self._seq = {}
with open(self.pdb_file, "rU") as handle:
for record in Bio.SeqIO.parse(handle, "pdb-seqres"):
# record.name often is <unknown value>, but ID is not
# so we define the name as pdb id + chain id
chain_id = record.id #sometimes it's "A" (3RRF) sometimes it's "109T:A"
if ":" in chain_id:
chain_id = chain_id.split(":")[1]
name = "{}_{}".format(self.pdb.id, chain_id)
self._seq.update({name:record.seq})
return self._seq
@property
def pdb(self):
return self.__pdb
@pdb.setter
def pdb(self, pdb):
self.pdb_file = None
if isinstance(pdb, Bio.PDB.Structure.Structure):
self.__pdb = pdb
elif isinstance(pdb, str):
self.pdb_file = pdb
with warnings.catch_warnings():
if self.suppress_bio_warnings:
if not Protein._warned_about_suppressing_bio:
warnings.warn("suppress_bio_warnings=True, ignoring Bio's warnings.")
Protein._warned_about_suppressing_bio = True
warnings.simplefilter("ignore")
parser = PDBParser()
# Infer pdb_id from filename
pdb_id = pdb.split("/")[-1][:-4]
pdb = parser.get_structure(pdb_id, pdb)
self.__pdb = pdb
else:
raise Exception("""A Bio.PDB.Structure.Structure or a
valid path must be used""")
@pdb.deleter
def pdb(self):
del self.__pdb
def generate_structure(self, filter_rows):
"""
Generate a structure model from selected rows of the dataframe.
Parameters
----------
filter_rows: function
A filter function that is applied to each row of the dataframe
Returns
-------
structure: StructureModel
"""
rows = self.df.loc[
self.df.apply(filter_rows, axis=1),
["full_id", "coord"]].reset_index(drop=True)
ids, coords = rows["full_id"], rows["coord"]
self.structure = StructureModel(ids, coords)
return self.structure
def generate_graph(self, model, model_params):
self.graph = model.generate_graph(self, model_params)
return self.graph
def get_atoms(self, atom_as_dict=True, filter_atoms=lambda x: True,
filter_attr=lambda x: x):
"""Get atoms data from PDB.
This method uses Bio.PDB.Structure.Structure.get_atoms() method to
iterate through atoms. Atom can be represented both as a
Bio.PDB.Atom.Atom object or as a dict. In addition, filters can be
applied to choose what atoms and what atom's attributes retrieve.
Parameters
----------
atom_as_dict : bool
Whether to represent an atom as a dict or as a Bio.PDB.Atom.Atom
Default True
filter_atoms : function
Function applied to each atom, used to filter attributes.
filter_attr : function
Function applied to each atom, it must return True or False.
If True the atom is returned, if False, the atom is filtered.
Returns
-------
numpy.Array
Array of atoms.
Examples
-------
# Get CA atoms using dict representation
ca_atoms = prot.get_atoms(filter_atoms=lambda x: x["name"] == "CA")
# Get CA atoms using Bio representation
ca_atoms = prot.get_atoms(
filter_atoms=lambda x: x.name == "CA", atom_as_dict)
# Just retreieve coordinates
ca_atoms = prot.get_atoms(filter_attr=lambda x: x["coord"])
"""
if atom_as_dict:
atoms = [filter_attr(atom.__dict__) for atom in
self.pdb.get_atoms() if filter_atoms(atom.__dict__)]
else:
atoms = [filter_attr(atom) for atom in
self.pdb.get_atoms() if filter_atoms(atom)]
return np.array(atoms)
def get_residues_dict(self, filter_res=lambda x: True,
filter_attr=lambda x: x):
"""Get residues data from PDB as an array of dictionaries.
This method uses Bio.PDB.Structure.Structure.get_residues() method to
iterate through residues. In addition, filters can
be applied to choose which residues and which attributes to retrieve.
----------
Parameters
filter_res : function
Function applied to each residue, used to filter attributes.
filter_attr : function
Function applied to each residue, it must return True or False.
If True the residue is returned, if False, the residue is filtered.
Returns
-------
numpy.Array
Array of residue dicts.
Examples
-------
# Get HOH using dict representation
hoh = prot.get_residues(filter_res=lambda x: x["resname"] == "HOH")
hoh[0]
{'_id': ('W', 201, ' '),
'child_dict': {'O': <Atom O>},
'child_list': [<Atom O>],
'disordered': 0,
'full_id': ('1a3z', 0, 'A', ('W', 201, ' ')),
'level': 'R',
'parent': <Chain id=A>,
'resname': 'HOH',
'segid': ' ',
'xtra': {}}
# Get residues names
resnames = prot.get_residues(filter_attr=lambda x: x["resname"])
"""
atoms = [filter_attr(res.__dict__) for res in
self.pdb.get_residues() if filter_res(res.__dict__)]
return atoms
def get_residues(self, filter_res=lambda x: True,
filter_attr=lambda x: x):
"""Get residues data from PDB as an array of Bio.PDB.Residue.Residue.
This method uses Bio.PDB.Structure.Structure.get_residues() method to
iterate through residues. In addition, filters can
be applied to choose which residues and which attributes to retrieve.
----------
Parameters
filter_res : function
Function applied to each residue, used to filter attributes.
filter_attr : function
Function applied to each residue, it must return True or False.
If True the residue is returned, if False, the residue is filtered.
Returns
-------
numpy.Array
Array of residues.
Examples
-------
# Get HOH using dict representation
hoh = prot.get_residues(filter_res=lambda x: x.resname == "HOH")
hoh[0].__dict__
{'_id': ('W', 201, ' '),
'child_dict': {'O': <Atom O>},
'child_list': [<Atom O>],
'disordered': 0,
'full_id': ('1a3z', 0, 'A', ('W', 201, ' ')),
'level': 'R',
'parent': <Chain id=A>,
'resname': 'HOH',
'segid': ' ',
'xtra': {}}
# Get residues names
resnames = prot.get_residues(filter_attr=lambda x: x.resname)
"""
atoms = [filter_attr(res) for res in
self.pdb.get_residues() if filter_res(res)]
return atoms
def _get_bfactor_by_atom(self):
"""
Get bfactor data
:return: list of list of dicts, where each dict represents information of an atom and the list
represents a residue
"""
bfactor = [[{"enum": e[0], "bfactor": e[1].bfactor, "atom_full_id": e[1].full_id,
"res_full_id": e[1].parent.full_id} for e in enumerate(i.get_atoms())] for i in
self.pdb.get_residues()]
return bfactor
def _get_avg_bfactor_by_residue(self):
"""
Get average bfactor by residue
:return: list of dicts where each dict represents a residue
"""
bfactor = [{"res_full_id": i.full_id, "avg_bfactor": np.mean([a.bfactor for a in i])} for i in
self.pdb.get_residues()]
return bfactor
def get_conservation_features(self, path, chain_list = None):
"""
Calculates conservation features from aligning the sequence in the consurf file
located in `path`. Features are those specified in alignment.join_conservation_data.
Returns a dict that maps this protein's residue full ids to conservation features.
Parameters
----------
path: string
Full path to consurf grades file.
chain_list: list or None
List of chains to be considered for alignment, or None if you want to align all of
them. Each chain is aligned separately.
Returns
-------
all_features: dict
Maps res_full_ids to features. Format is taken from alignment.join_conservation_data
"""
all_features = {}
if chain_list is None:
chain_list = set([r.parent.id for r in self.get_residues()])
for chain in chain_list:
valid_residues = [r for r in self.get_residues()
if valid_amino_3(r.resname) and r.parent.id == chain]
self_sequence = ''.join([amino_1code(r.resname) for r in valid_residues])
features = {r.full_id:dict() for r in valid_residues}
# Join feature through alignment of sequences.
features = alignment.join_conservation_data(self_sequence, features, path)
all_features.update(features)
return all_features
def add_residue_features(self, features):
"""
Adds residue-level features to the dataframe.
Parameters
----------
features: dict
Dictionary mapping residue full ids to new features.
Returns
-------
None
"""
self.df = pd.concat(
[self.df, self.df.apply(
lambda row: features[row["res_full_id"]] if row["res_full_id"] in features else None,
axis = 1, result_type="expand")],
axis=1)
@staticmethod
def distance_bet_res(r1, r2):
atoms1 = np.array([(i.coord[0], i.coord[1], i.coord[2]) for i in r1.get_atoms()])
atoms2 = np.array([(i.coord[0], i.coord[1], i.coord[2]) for i in r2.get_atoms()])
v1 = np.repeat(atoms1.reshape(1,-1,3), atoms2.shape[0], axis=0)
v2 = np.repeat(atoms2, atoms1.shape[0], axis=0).reshape(atoms2.shape[0],-1,3)
return np.min(np.sum((v1 - v2)**2, axis=-1))
def head(self, n=10):
""" Return the first `n` rows (atoms) of pandas.DataFrame representation
of PDB.
Parameters
----------
n : int
Number of rows (atoms) to select.
Returns
-------
pandas.Dataframe
n first rows of pandas.DataFrame representation of PDB
"""
return self.df.head(n)
@staticmethod
def __get_coordinates(coord, raise_error=True):
if hasattr(coord, "__getitem__"):
return coord[0], coord[1], coord[2]
else:
if raise_error:
raise TypeError("""Non-suscriptable type when parsing file.
Please, check if there are null values in PDB
or use raise_error=False""")
else:
return None, None, None
def generate_dataframe(self,
columns=["bfactor", "chain", "coord",
"disordered_flag", "element",
"full_id", "res_full_id",
"mass", "resname", "occupancy"],
split_coordinates=True, raise_error=False):
""" Generate a Pandas DataFrame from the PDB
Parameters
----------
columns : list
list of column names to subset DataFrame
split_coordinates: bool
whether to return three extra columns x, y, z for coordinates or not
raise_error: bool
when trying to split coordinates you can choose to raise error or
to generate null values. Default is False.
Returns
-------
Pandas.DataFrame
DataFrame which has an atom per row
"""
full_atom = []
for model in self.pdb:
for chain in model:
for residue in chain:
for atom in residue:
atom_i = atom.__dict__
atom_i["resname"] = residue.resname
atom_i["res_full_id"] = residue.full_id
atom_i["chain"] = chain.id
atom_i["model"] = str(model.id)
full_atom.append(atom_i)
del(atom_i)
df = pd.DataFrame(full_atom).loc[:, columns]
if split_coordinates:
df["x"], df["y"], df["z"] = zip(*df.coord.apply(
lambda x: self.__get_coordinates(x, raise_error)))
self._df = df
def select_chains(self, chain_list):
"""Discards rows from the dataframe that are not in the chainlist."""
self.df = self.df[self.df.chain.isin(chain_list)]
def _filter_het_rows(self, allowed_ligands, discard_water, keep_normal_atoms):
"""Provides a pandas filter to handle ligands, water and normal atoms"""
return lambda res: ((keep_normal_atoms and (res[3][0][0:2] not in ["H_", "W"]))
or (res[3][0][0:2] == "H_" and res[3][0][2:] in allowed_ligands)
or (res[3][0] == "W" and not discard_water))
def select_ligands(self, allowed_ligands, discard_water=True):
"""Keep only heterogen atoms that are ligands in provided list.
`allowed_ligands` must be hetIDs or ligand IDs as found in the PDB,
e.g. ATP, BFS, GOL, etc.
`discard_water` controls whether to filter out water atoms.
"""
if isinstance(allowed_ligands, str):
allowed_ligands = [allowed_ligands]
allowed_rows = self.df.res_full_id.apply(
self._filter_het_rows(allowed_ligands, discard_water, True))
self.df = self.df.loc[allowed_rows]
def extract_ligands(self, allowed_ligands, remove_from_df=True):
"""Extract and return ligand rows from dataframe, optionally
removing from the dataframe as well (`remove_from_df`)"""
if isinstance(allowed_ligands, str):
allowed_ligands = [allowed_ligands]
is_ligand = self.df.res_full_id.apply(
self._filter_het_rows(allowed_ligands, True, False))
ligand_rows = self.df.loc[is_ligand]
if remove_from_df:
self.df = self.df.loc[~is_ligand]
return ligand_rows
def discard_ligands(self):
"""Discards rows from the dataframe that correspond to ligands or
heterogen atoms.
For more information read about the HET section in PDB files:
https://www.wwpdb.org/documentation/file-format-content/format33/sect4.html"""
het_rows = self.df.res_full_id.apply(
lambda res: res[3][0] == "W" or res[3][0][0:2] == "H_")
self.df = self.df.loc[~het_rows]
def discard_empty_coordinates(self):
"""Discards rows from the dataframe with missing coordinates.
This is necessary when building structure models and the like.
More info on missing coords:
pdb101.rcsb.org/learn/guide-to-understanding-pdb-data/missing-coordinates-and-biological-assemblies"""
self.df = self.df.loc[~self.df.coord.isnull()]
def add_distance_to_target_feature(self, target_rows):
"""Adds a `distance` feature to the dataframe which holds
the minimum distance of each atom to the atoms of target_rows.
This can be useful as a target for supervised learning."""
target_coords = target_rows.coord.to_list()
self.df["distance"] = self.df.coord.apply(
lambda atom: min(map(lambda target_atom: np.linalg.norm(atom-target_atom), target_coords))
) | [
"biograph.structure.StructureModel",
"pandas.DataFrame",
"biograph.constants.amino_1code",
"Bio.SeqIO.parse",
"numpy.sum",
"warnings.simplefilter",
"biograph.downloader.PdbDownloader",
"numpy.mean",
"numpy.array",
"warnings.catch_warnings",
"Bio.PDB.PDBParser",
"biograph.constants.valid_amino_... | [((1026, 1070), 'biograph.downloader.PdbDownloader', 'PdbDownloader', (['[pdb_id]'], {'base_path': 'base_path'}), '([pdb_id], base_path=base_path)\n', (1039, 1070), False, 'from biograph.downloader import PdbDownloader\n'), ((4392, 4419), 'biograph.structure.StructureModel', 'StructureModel', (['ids', 'coords'], {}), '(ids, coords)\n', (4406, 4419), False, 'from biograph.structure import StructureModel\n'), ((6292, 6307), 'numpy.array', 'np.array', (['atoms'], {}), '(atoms)\n', (6300, 6307), True, 'import numpy as np\n'), ((2268, 2305), 'Bio.SeqIO.parse', 'Bio.SeqIO.parse', (['handle', '"""pdb-seqres"""'], {}), "(handle, 'pdb-seqres')\n", (2283, 2305), False, 'import Bio\n'), ((11561, 11624), 'biograph.alignment.join_conservation_data', 'alignment.join_conservation_data', (['self_sequence', 'features', 'path'], {}), '(self_sequence, features, path)\n', (11593, 11624), False, 'from biograph import alignment\n'), ((12644, 12675), 'numpy.sum', 'np.sum', (['((v1 - v2) ** 2)'], {'axis': '(-1)'}), '((v1 - v2) ** 2, axis=-1)\n', (12650, 12675), True, 'import numpy as np\n'), ((10119, 10150), 'numpy.mean', 'np.mean', (['[a.bfactor for a in i]'], {}), '([a.bfactor for a in i])\n', (10126, 10150), True, 'import numpy as np\n'), ((12549, 12591), 'numpy.repeat', 'np.repeat', (['atoms2', 'atoms1.shape[0]'], {'axis': '(0)'}), '(atoms2, atoms1.shape[0], axis=0)\n', (12558, 12591), True, 'import numpy as np\n'), ((15012, 15035), 'pandas.DataFrame', 'pd.DataFrame', (['full_atom'], {}), '(full_atom)\n', (15024, 15035), True, 'import pandas as pd\n'), ((3055, 3080), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (3078, 3080), False, 'import warnings\n'), ((3435, 3446), 'Bio.PDB.PDBParser', 'PDBParser', ([], {}), '()\n', (3444, 3446), False, 'from Bio.PDB import PDBParser\n'), ((11364, 11386), 'biograph.constants.amino_1code', 'amino_1code', (['r.resname'], {}), '(r.resname)\n', (11375, 11386), False, 'from biograph.constants import amino_1code, valid_amino_3, valid_amino_1\n'), ((3378, 3409), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (3399, 3409), False, 'import warnings\n'), ((11276, 11300), 'biograph.constants.valid_amino_3', 'valid_amino_3', (['r.resname'], {}), '(r.resname)\n', (11289, 11300), False, 'from biograph.constants import amino_1code, valid_amino_3, valid_amino_1\n'), ((3219, 3288), 'warnings.warn', 'warnings.warn', (['"""suppress_bio_warnings=True, ignoring Bio\'s warnings."""'], {}), '("suppress_bio_warnings=True, ignoring Bio\'s warnings.")\n', (3232, 3288), False, 'import warnings\n'), ((18228, 18262), 'numpy.linalg.norm', 'np.linalg.norm', (['(atom - target_atom)'], {}), '(atom - target_atom)\n', (18242, 18262), True, 'import numpy as np\n')] |
"""
Generate a large batch of image samples from a model and save them as a large
numpy array. This can be used to produce samples for FID evaluation.
"""
import argparse
import os, time
os.environ["CUDA_VISIBLE_DEVICES"] = "6"
os.environ['OPENAI_LOGDIR'] = 'logs_test_2e-5'
if not os.path.exists(os.environ['OPENAI_LOGDIR']):
os.mkdir(os.environ['OPENAI_LOGDIR'])
import numpy as np
import torch as th
import torch.nn as nn
import torch.distributed as dist
from torchvision.utils import save_image
from improved_diffusion import dist_util, logger
from improved_diffusion.script_util import (
NUM_CLASSES,
model_and_diffusion_defaults,
create_model_and_diffusion,
add_dict_to_argparser,
args_to_dict,
)
def main():
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure()
logger.log("creating model and diffusion...")
model, diffusion = create_model_and_diffusion(
**args_to_dict(args, model_and_diffusion_defaults().keys())
)
if th.cuda.is_available():
model.load_state_dict(
dist_util.load_state_dict(args.model_path)
)
model.to(dist_util.dev())
model.eval()
logger.log("sampling...")
# kernel_code = th.randn((1, 1, 16, 16), device=dist_util.dev())
# kernel_code = th.load('tensor_2.pt').to(dist_util.dev())
# th.save(kernel_code, 'tensor_2.pt')
all_images = []
t0 = time.time()
while len(all_images) * args.batch_size < args.num_samples:
model_kwargs = {}
sample_fn = (
diffusion.p_sample_loop if not args.use_ddim else diffusion.ddim_sample_loop
)
sample = sample_fn(
model,
(args.batch_size, 1, args.image_size, args.image_size),
clip_denoised=args.clip_denoised,
model_kwargs=model_kwargs,
)
sample = sample.clamp(0, 0.15)
sample = sample.contiguous()
gathered_samples = [th.zeros_like(sample) for _ in range(dist.get_world_size())]
dist.all_gather(gathered_samples, sample) # gather not supported with NCCL
all_images.extend([sample.cpu().numpy() for sample in gathered_samples])
logger.log(f"created {len(all_images) * args.batch_size} samples")
t1 = time.time()
arr = np.concatenate(all_images, axis=0)
arr = th.tensor(arr)
filename = 'generated_samples' + '.png'
# rescale the maximum value to 1 for visualization, from
samples_max, _ = arr.flatten(2).max(2, keepdim=True)
samples = arr / samples_max.unsqueeze(3)
save_image(samples, os.path.join(logger.get_dir(), filename), nrow=10, normalize=True)
dist.barrier()
logger.log("sampling complete")
logger.log("sampling time: {}".format(t1-t0))
def create_argparser():
defaults = dict(
clip_denoised=True,
num_samples=100,
batch_size=10,
use_ddim=False,
model_path="experiments/logs_1000_0.0001_KL/ema_0.9999_116000.pt",
)
defaults.update(model_and_diffusion_defaults())
parser = argparse.ArgumentParser()
add_dict_to_argparser(parser, defaults)
return parser
if __name__ == "__main__":
main()
| [
"os.mkdir",
"argparse.ArgumentParser",
"torch.distributed.all_gather",
"torch.distributed.get_world_size",
"improved_diffusion.dist_util.load_state_dict",
"improved_diffusion.dist_util.setup_dist",
"improved_diffusion.script_util.model_and_diffusion_defaults",
"os.path.exists",
"improved_diffusion.d... | [((285, 328), 'os.path.exists', 'os.path.exists', (["os.environ['OPENAI_LOGDIR']"], {}), "(os.environ['OPENAI_LOGDIR'])\n", (299, 328), False, 'import os, time\n'), ((334, 371), 'os.mkdir', 'os.mkdir', (["os.environ['OPENAI_LOGDIR']"], {}), "(os.environ['OPENAI_LOGDIR'])\n", (342, 371), False, 'import os, time\n'), ((794, 816), 'improved_diffusion.dist_util.setup_dist', 'dist_util.setup_dist', ([], {}), '()\n', (814, 816), False, 'from improved_diffusion import dist_util, logger\n'), ((821, 839), 'improved_diffusion.logger.configure', 'logger.configure', ([], {}), '()\n', (837, 839), False, 'from improved_diffusion import dist_util, logger\n'), ((845, 890), 'improved_diffusion.logger.log', 'logger.log', (['"""creating model and diffusion..."""'], {}), "('creating model and diffusion...')\n", (855, 890), False, 'from improved_diffusion import dist_util, logger\n'), ((1023, 1045), 'torch.cuda.is_available', 'th.cuda.is_available', ([], {}), '()\n', (1043, 1045), True, 'import torch as th\n'), ((1192, 1217), 'improved_diffusion.logger.log', 'logger.log', (['"""sampling..."""'], {}), "('sampling...')\n", (1202, 1217), False, 'from improved_diffusion import dist_util, logger\n'), ((1428, 1439), 'time.time', 'time.time', ([], {}), '()\n', (1437, 1439), False, 'import os, time\n'), ((2285, 2296), 'time.time', 'time.time', ([], {}), '()\n', (2294, 2296), False, 'import os, time\n'), ((2307, 2341), 'numpy.concatenate', 'np.concatenate', (['all_images'], {'axis': '(0)'}), '(all_images, axis=0)\n', (2321, 2341), True, 'import numpy as np\n'), ((2352, 2366), 'torch.tensor', 'th.tensor', (['arr'], {}), '(arr)\n', (2361, 2366), True, 'import torch as th\n'), ((2672, 2686), 'torch.distributed.barrier', 'dist.barrier', ([], {}), '()\n', (2684, 2686), True, 'import torch.distributed as dist\n'), ((2691, 2722), 'improved_diffusion.logger.log', 'logger.log', (['"""sampling complete"""'], {}), "('sampling complete')\n", (2701, 2722), False, 'from improved_diffusion import dist_util, logger\n'), ((3066, 3091), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3089, 3091), False, 'import argparse\n'), ((3096, 3135), 'improved_diffusion.script_util.add_dict_to_argparser', 'add_dict_to_argparser', (['parser', 'defaults'], {}), '(parser, defaults)\n', (3117, 3135), False, 'from improved_diffusion.script_util import NUM_CLASSES, model_and_diffusion_defaults, create_model_and_diffusion, add_dict_to_argparser, args_to_dict\n'), ((1153, 1168), 'improved_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (1166, 1168), False, 'from improved_diffusion import dist_util, logger\n'), ((2035, 2076), 'torch.distributed.all_gather', 'dist.all_gather', (['gathered_samples', 'sample'], {}), '(gathered_samples, sample)\n', (2050, 2076), True, 'import torch.distributed as dist\n'), ((3021, 3051), 'improved_diffusion.script_util.model_and_diffusion_defaults', 'model_and_diffusion_defaults', ([], {}), '()\n', (3049, 3051), False, 'from improved_diffusion.script_util import NUM_CLASSES, model_and_diffusion_defaults, create_model_and_diffusion, add_dict_to_argparser, args_to_dict\n'), ((1086, 1128), 'improved_diffusion.dist_util.load_state_dict', 'dist_util.load_state_dict', (['args.model_path'], {}), '(args.model_path)\n', (1111, 1128), False, 'from improved_diffusion import dist_util, logger\n'), ((1966, 1987), 'torch.zeros_like', 'th.zeros_like', (['sample'], {}), '(sample)\n', (1979, 1987), True, 'import torch as th\n'), ((2613, 2629), 'improved_diffusion.logger.get_dir', 'logger.get_dir', ([], {}), '()\n', (2627, 2629), False, 'from improved_diffusion import dist_util, logger\n'), ((2003, 2024), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (2022, 2024), True, 'import torch.distributed as dist\n'), ((971, 1001), 'improved_diffusion.script_util.model_and_diffusion_defaults', 'model_and_diffusion_defaults', ([], {}), '()\n', (999, 1001), False, 'from improved_diffusion.script_util import NUM_CLASSES, model_and_diffusion_defaults, create_model_and_diffusion, add_dict_to_argparser, args_to_dict\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import date
import pandas_datareader as pdr
from keras.models import load_model
import streamlit as st
start = '2015-01-01'
end = date.isoformat(date.today())
st.title("STOCK PRICE PREDICTION")
input= st.text_input("enter the stock ticker(ex : for Apple it is AAPL)",'AAPL')
dataset=pdr.DataReader(input,'yahoo',start,end)
#describing the data
st.subheader('Data from 2015 - till date')
st.write(dataset.describe())
#splitting the data set into test and train
training_set= pd.DataFrame(dataset["Close"][0:int(len(dataset)*.70)])
testing_set= pd.DataFrame(dataset["Close"][int(len(dataset)*.70): int(len(dataset))])
#training data
from sklearn.preprocessing import MinMaxScaler
sc=MinMaxScaler(feature_range=(0,1))
dt_train_sc=sc.fit_transform(training_set)
x_train=[]
y_train=[]
for i in range(10,dt_train_sc.shape[0]):
x_train.append(dt_train_sc[i-10:i])
y_train.append(dt_train_sc[i,0])
x_train,y_train=np.array(x_train),np.array(y_train)
#visualisations
st.subheader('closing price vs time chart')
fig=plt.figure(figsize=(12,6))
plt.plot(dataset.Close)
st.pyplot(fig)
st.subheader('closing price vs time chart with mv100 & mv200')
st.write("The prices are going to increase if the moving average 100 crosses over the moving average 200 otherwise the price will fall")
mv100=dataset.Close.rolling(100).mean()
mv200=dataset.Close.rolling(200).mean()
fig=plt.figure(figsize=(12,6))
plt.plot(mv100,'r',label="mv 100")
plt.plot(mv200,'g',label="mv 200")
plt.xlabel("Time")
plt.ylabel("Price")
plt.legend()
plt.plot(dataset.Close,'b',label="true price")
st.pyplot(fig)
from sklearn.preprocessing import MinMaxScaler
sc=MinMaxScaler(feature_range=(0,1))
dt_train_sc=sc.fit_transform(training_set)
regressor=load_model('stock_model1.h5')
#tesing part
sc_test=MinMaxScaler(feature_range=(0,1))
prev_data=training_set.tail(70)
f_test=prev_data.append(testing_set,ignore_index=True)
f_test_sc=sc_test.fit_transform(f_test)
x_test=[]
y_test=[]
for i in range(70,f_test_sc.shape[0]):
x_test.append(f_test_sc[i-70:i])
y_test.append(f_test_sc[i])
x_test,y_test=np.array(x_test),np.array(y_test)
y_predicted= regressor.predict(x_test)
y_test=sc_test.inverse_transform(y_test)
y_predicted=sc_test.inverse_transform(y_predicted)
fig1=plt.figure(figsize=(12,6))
plt.plot(y_test,'b',label="TRUE PRICE")
plt.plot(y_predicted,'r',label="Predicted Price")
plt.xlabel("Time")
plt.ylabel("Price")
plt.legend()
plt.show()
st.subheader("True VS Predicted prices")
st.pyplot(fig1)
chuma=pd.DataFrame(dataset['Close'].tail(70))
chuma=pd.DataFrame(sc.fit_transform(chuma))
arr=[]
arr.append(chuma.iloc[:].values)
arr=np.array(arr)
st.subheader("The next day prediction is :")
result=sc.inverse_transform(regressor.predict(arr))
st.text(result[0][0])
prev_close=sc.inverse_transform([[chuma.iloc[-1,0]]])
#st.text(prev_close)
temp=result[0][0]-prev_close[0][0]
if temp > 0 :
trend='rise'
else :
trend='fall'
str1=result[0][0]
te='There could be a '+trend+' in the stock price'
st.write(te)
st.subheader('*The predictions are only for research purpose and it may vary by 10-15 % in the real time')
| [
"pandas_datareader.DataReader",
"streamlit.subheader",
"keras.models.load_model",
"streamlit.text_input",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"sklearn.preprocessing.MinMaxScaler",
"datetime.date.today",
"streamlit.title",
"streamlit.write",
"matplotl... | [((255, 289), 'streamlit.title', 'st.title', (['"""STOCK PRICE PREDICTION"""'], {}), "('STOCK PRICE PREDICTION')\n", (263, 289), True, 'import streamlit as st\n'), ((298, 372), 'streamlit.text_input', 'st.text_input', (['"""enter the stock ticker(ex : for Apple it is AAPL)"""', '"""AAPL"""'], {}), "('enter the stock ticker(ex : for Apple it is AAPL)', 'AAPL')\n", (311, 372), True, 'import streamlit as st\n'), ((381, 423), 'pandas_datareader.DataReader', 'pdr.DataReader', (['input', '"""yahoo"""', 'start', 'end'], {}), "(input, 'yahoo', start, end)\n", (395, 423), True, 'import pandas_datareader as pdr\n'), ((446, 488), 'streamlit.subheader', 'st.subheader', (['"""Data from 2015 - till date"""'], {}), "('Data from 2015 - till date')\n", (458, 488), True, 'import streamlit as st\n'), ((800, 834), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (812, 834), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1098, 1141), 'streamlit.subheader', 'st.subheader', (['"""closing price vs time chart"""'], {}), "('closing price vs time chart')\n", (1110, 1141), True, 'import streamlit as st\n'), ((1147, 1174), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (1157, 1174), True, 'import matplotlib.pyplot as plt\n'), ((1175, 1198), 'matplotlib.pyplot.plot', 'plt.plot', (['dataset.Close'], {}), '(dataset.Close)\n', (1183, 1198), True, 'import matplotlib.pyplot as plt\n'), ((1200, 1214), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (1209, 1214), True, 'import streamlit as st\n'), ((1218, 1280), 'streamlit.subheader', 'st.subheader', (['"""closing price vs time chart with mv100 & mv200"""'], {}), "('closing price vs time chart with mv100 & mv200')\n", (1230, 1280), True, 'import streamlit as st\n'), ((1282, 1428), 'streamlit.write', 'st.write', (['"""The prices are going to increase if the moving average 100 crosses over the moving average 200 otherwise the price will fall"""'], {}), "(\n 'The prices are going to increase if the moving average 100 crosses over the moving average 200 otherwise the price will fall'\n )\n", (1290, 1428), True, 'import streamlit as st\n'), ((1506, 1533), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (1516, 1533), True, 'import matplotlib.pyplot as plt\n'), ((1534, 1570), 'matplotlib.pyplot.plot', 'plt.plot', (['mv100', '"""r"""'], {'label': '"""mv 100"""'}), "(mv100, 'r', label='mv 100')\n", (1542, 1570), True, 'import matplotlib.pyplot as plt\n'), ((1570, 1606), 'matplotlib.pyplot.plot', 'plt.plot', (['mv200', '"""g"""'], {'label': '"""mv 200"""'}), "(mv200, 'g', label='mv 200')\n", (1578, 1606), True, 'import matplotlib.pyplot as plt\n'), ((1606, 1624), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (1616, 1624), True, 'import matplotlib.pyplot as plt\n'), ((1626, 1645), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (1636, 1645), True, 'import matplotlib.pyplot as plt\n'), ((1647, 1659), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1657, 1659), True, 'import matplotlib.pyplot as plt\n'), ((1661, 1709), 'matplotlib.pyplot.plot', 'plt.plot', (['dataset.Close', '"""b"""'], {'label': '"""true price"""'}), "(dataset.Close, 'b', label='true price')\n", (1669, 1709), True, 'import matplotlib.pyplot as plt\n'), ((1709, 1723), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (1718, 1723), True, 'import streamlit as st\n'), ((1780, 1814), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (1792, 1814), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1871, 1900), 'keras.models.load_model', 'load_model', (['"""stock_model1.h5"""'], {}), "('stock_model1.h5')\n", (1881, 1900), False, 'from keras.models import load_model\n'), ((1928, 1962), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (1940, 1962), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((2418, 2445), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (2428, 2445), True, 'import matplotlib.pyplot as plt\n'), ((2446, 2487), 'matplotlib.pyplot.plot', 'plt.plot', (['y_test', '"""b"""'], {'label': '"""TRUE PRICE"""'}), "(y_test, 'b', label='TRUE PRICE')\n", (2454, 2487), True, 'import matplotlib.pyplot as plt\n'), ((2487, 2538), 'matplotlib.pyplot.plot', 'plt.plot', (['y_predicted', '"""r"""'], {'label': '"""Predicted Price"""'}), "(y_predicted, 'r', label='Predicted Price')\n", (2495, 2538), True, 'import matplotlib.pyplot as plt\n'), ((2538, 2556), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (2548, 2556), True, 'import matplotlib.pyplot as plt\n'), ((2558, 2577), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (2568, 2577), True, 'import matplotlib.pyplot as plt\n'), ((2579, 2591), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2589, 2591), True, 'import matplotlib.pyplot as plt\n'), ((2593, 2603), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2601, 2603), True, 'import matplotlib.pyplot as plt\n'), ((2605, 2645), 'streamlit.subheader', 'st.subheader', (['"""True VS Predicted prices"""'], {}), "('True VS Predicted prices')\n", (2617, 2645), True, 'import streamlit as st\n'), ((2647, 2662), 'streamlit.pyplot', 'st.pyplot', (['fig1'], {}), '(fig1)\n', (2656, 2662), True, 'import streamlit as st\n'), ((2804, 2817), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (2812, 2817), True, 'import numpy as np\n'), ((2819, 2863), 'streamlit.subheader', 'st.subheader', (['"""The next day prediction is :"""'], {}), "('The next day prediction is :')\n", (2831, 2863), True, 'import streamlit as st\n'), ((2918, 2939), 'streamlit.text', 'st.text', (['result[0][0]'], {}), '(result[0][0])\n', (2925, 2939), True, 'import streamlit as st\n'), ((3184, 3196), 'streamlit.write', 'st.write', (['te'], {}), '(te)\n', (3192, 3196), True, 'import streamlit as st\n'), ((3200, 3316), 'streamlit.subheader', 'st.subheader', (['"""*The predictions are only for research purpose and it may vary by 10-15 % in the real time"""'], {}), "(\n '*The predictions are only for research purpose and it may vary by 10-15 % in the real time'\n )\n", (3212, 3316), True, 'import streamlit as st\n'), ((240, 252), 'datetime.date.today', 'date.today', ([], {}), '()\n', (250, 252), False, 'from datetime import date\n'), ((1042, 1059), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (1050, 1059), True, 'import numpy as np\n'), ((1060, 1077), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (1068, 1077), True, 'import numpy as np\n'), ((2240, 2256), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (2248, 2256), True, 'import numpy as np\n'), ((2257, 2273), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (2265, 2273), True, 'import numpy as np\n')] |
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
def plot(data, title):
plot.i += 1
plt.subplot(2,3,plot.i)
plt.imshow(data)
plt.title(title)
plot.i = 0
def plot_his(data, title):
plot.i += 1
plt.subplot(2,3,plot.i)
plt.plot(data)
plt.title(title)
plot.i = 0
img = Image.open('../src/lena_gray.jpg').convert('L')
img.save('img_BW2.png')
#### plotting original histogram ####
pixels=np.array(img, dtype=np.int64)
row= len(pixels)
col= len(pixels[0])
his=np.zeros((256))
for i in range(row):
for j in range(col):
his[pixels[i][j]]=his[pixels[i][j]]+1
plot_his(his, 'Original histogram')
#### plotting cumulative histogram ####
hiscum=np.zeros((256))
hiscum[0]=his[0]
for i in range(1,256):
hiscum[i]=hiscum[i-1]+his[i]
plot_his(hiscum, 'Cumulative histogram')
#### finding transformtion function to stretch the histogram ####
transfunc=np.zeros((256))
for i in range(256):
transfunc[i]= round((255.0/(row*col))*hiscum[i])
plot_his(transfunc,'transformation function')
npixels=np.zeros((row,col))
for i in range(row):
for j in range(col):
npixels[i][j]=transfunc[pixels[i][j]]
#### plotting new histogram ####
nhis=np.zeros((256))
for i in range(row):
for j in range(col):
nhis[npixels[i][j]]=nhis[npixels[i][j]]+1
plot_his(nhis,'new histogram')
plot(pixels,'Original')
plot(npixels,'new image')
plt.show() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"PIL.Image.open",
"numpy.array"
] | [((442, 471), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.int64'}), '(img, dtype=np.int64)\n', (450, 471), True, 'import numpy as np\n'), ((513, 526), 'numpy.zeros', 'np.zeros', (['(256)'], {}), '(256)\n', (521, 526), True, 'import numpy as np\n'), ((714, 727), 'numpy.zeros', 'np.zeros', (['(256)'], {}), '(256)\n', (722, 727), True, 'import numpy as np\n'), ((926, 939), 'numpy.zeros', 'np.zeros', (['(256)'], {}), '(256)\n', (934, 939), True, 'import numpy as np\n'), ((1071, 1091), 'numpy.zeros', 'np.zeros', (['(row, col)'], {}), '((row, col))\n', (1079, 1091), True, 'import numpy as np\n'), ((1222, 1235), 'numpy.zeros', 'np.zeros', (['(256)'], {}), '(256)\n', (1230, 1235), True, 'import numpy as np\n'), ((1416, 1426), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1424, 1426), True, 'import matplotlib.pyplot as plt\n'), ((117, 142), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', 'plot.i'], {}), '(2, 3, plot.i)\n', (128, 142), True, 'import matplotlib.pyplot as plt\n'), ((145, 161), 'matplotlib.pyplot.imshow', 'plt.imshow', (['data'], {}), '(data)\n', (155, 161), True, 'import matplotlib.pyplot as plt\n'), ((166, 182), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (175, 182), True, 'import matplotlib.pyplot as plt\n'), ((242, 267), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', 'plot.i'], {}), '(2, 3, plot.i)\n', (253, 267), True, 'import matplotlib.pyplot as plt\n'), ((270, 284), 'matplotlib.pyplot.plot', 'plt.plot', (['data'], {}), '(data)\n', (278, 284), True, 'import matplotlib.pyplot as plt\n'), ((289, 305), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (298, 305), True, 'import matplotlib.pyplot as plt\n'), ((324, 358), 'PIL.Image.open', 'Image.open', (['"""../src/lena_gray.jpg"""'], {}), "('../src/lena_gray.jpg')\n", (334, 358), False, 'from PIL import Image\n')] |
"""
Reproduce Table 2
Condorcet Efficiencies under Random Society and
Spatial Model Assumptions
(201 voters, 5 candidates)
from
<NAME> III, "A Comparison of Efficiency of Multicandidate
Electoral Systems", American Journal of Political Science, vol. 28,
no. 1, pp. 23-48, 1984. :doi:`10.2307/2110786`
Typical result:
| Disp | 1.0 | 1.0 | 1.0 | 1.0 | 0.5 | 0.5 | 0.5 | 0.5 |
| Corr | 0.5 | 0.5 | 0.0 | 0.0 | 0.5 | 0.5 | 0.0 | 0.0 |
| Dims | 2 | 4 | 2 | 4 | 2 | 4 | 2 | 4 |
|:----------|------:|------:|------:|------:|------:|------:|------:|------:|
| Plurality | 57.5 | 65.8 | 62.2 | 78.4 | 21.7 | 24.4 | 27.2 | 41.3 |
| Runoff | 80.1 | 87.3 | 81.6 | 93.6 | 35.4 | 42.2 | 41.5 | 61.5 |
| Hare | 79.2 | 86.7 | 84.0 | 95.4 | 35.9 | 46.8 | 41.0 | 69.9 |
| Approval | 73.8 | 77.8 | 76.9 | 85.4 | 71.5 | 76.4 | 73.8 | 82.7 |
| Borda | 87.1 | 89.3 | 88.2 | 92.3 | 83.7 | 86.3 | 85.2 | 89.4 |
| Coombs | 97.8 | 97.3 | 97.9 | 98.2 | 93.5 | 92.3 | 93.8 | 94.5 |
| Black | 100.0 | 100.0 | 100.0 | 100.0 | 100.0 | 100.0 | 100.0 | 100.0 |
| SU max | 82.9 | 85.8 | 85.3 | 90.8 | 78.1 | 81.5 | 80.8 | 87.1 |
| CW | 99.7 | 99.7 | 99.7 | 99.6 | 98.9 | 98.6 | 98.7 | 98.5 |
Many of these values match the paper closely, but some are consistently off by
up to 4%.
"""
import time
from collections import Counter
import numpy as np
from tabulate import tabulate
from elsim.methods import (fptp, runoff, irv, approval, borda, coombs,
black, utility_winner, condorcet)
from elsim.elections import normal_electorate, normed_dist_utilities
from elsim.strategies import honest_rankings, approval_optimal
n = 10_000
n_voters = 201
n_cands = 5
ranked_methods = {'Plurality': fptp, 'Runoff': runoff, 'Hare': irv,
'Borda': borda, 'Coombs': coombs, 'Black': black}
rated_methods = {'SU max': utility_winner,
'Approval': lambda utilities, tiebreaker:
approval(approval_optimal(utilities), tiebreaker)}
start_time = time.monotonic()
# disp, corr, D
conditions = ((1.0, 0.5, 2),
(1.0, 0.5, 4),
(1.0, 0.0, 2),
(1.0, 0.0, 4),
(0.5, 0.5, 2),
(0.5, 0.5, 4),
(0.5, 0.0, 2),
(0.5, 0.0, 4),
)
results = []
for disp, corr, D in conditions:
print(disp, corr, D)
count = Counter()
for iteration in range(n):
v, c = normal_electorate(n_voters, n_cands, dims=D, corr=corr,
disp=disp)
"""
"Simulated utilities were normalized by range, that is, each voter's
set of utilities were linearly expanded so that the highest and lowest
utilities for each voter were 1 and 0, respectively."
TODO: standard scores vs normalized don't matter for the ranked systems
and don't affect approval much
but This is necessary for the SU Maximizer results to match Merrill's.
"""
utilities = normed_dist_utilities(v, c)
rankings = honest_rankings(utilities)
# If there is a Condorcet winner, analyze election, otherwise skip it
CW = condorcet(rankings)
if CW is not None:
count['CW'] += 1
for name, method in ranked_methods.items():
if method(rankings, tiebreaker='random') == CW:
count[name] += 1
for name, method in rated_methods.items():
if method(utilities, tiebreaker='random') == CW:
count[name] += 1
results.append(count)
elapsed_time = time.monotonic() - start_time
print('Elapsed:', time.strftime("%H:%M:%S", time.gmtime(elapsed_time)), '\n')
# Neither Tabulate nor Markdown support column span or multiple headers, but
# at least this prints to plain text in a readable way.
header = ['Disp\nCorr\nDims'] + [f'{x}\n{y}\n{z}' for x, y, z in conditions]
# Of those elections with CW, likelihood that method chooses CW
table = []
y_cw = np.array([c['CW'] for c in results])
for method in ('Plurality', 'Runoff', 'Hare', 'Approval', 'Borda', 'Coombs',
'Black', 'SU max'):
y = np.array([c[method] for c in results])
table.append([method, *(y/y_cw*100)])
# Likelihood of Condorcet Winner (normalized by n iterations)
table.append(['CW', *(y_cw/n*100)])
print(tabulate(table, header, tablefmt="pipe", floatfmt='.1f'))
| [
"elsim.elections.normed_dist_utilities",
"time.gmtime",
"elsim.strategies.approval_optimal",
"time.monotonic",
"tabulate.tabulate",
"numpy.array",
"elsim.elections.normal_electorate",
"elsim.strategies.honest_rankings",
"collections.Counter",
"elsim.methods.condorcet"
] | [((2153, 2169), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2167, 2169), False, 'import time\n'), ((4156, 4192), 'numpy.array', 'np.array', (["[c['CW'] for c in results]"], {}), "([c['CW'] for c in results])\n", (4164, 4192), True, 'import numpy as np\n'), ((2533, 2542), 'collections.Counter', 'Counter', ([], {}), '()\n', (2540, 2542), False, 'from collections import Counter\n'), ((3754, 3770), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (3768, 3770), False, 'import time\n'), ((4313, 4351), 'numpy.array', 'np.array', (['[c[method] for c in results]'], {}), '([c[method] for c in results])\n', (4321, 4351), True, 'import numpy as np\n'), ((4500, 4556), 'tabulate.tabulate', 'tabulate', (['table', 'header'], {'tablefmt': '"""pipe"""', 'floatfmt': '""".1f"""'}), "(table, header, tablefmt='pipe', floatfmt='.1f')\n", (4508, 4556), False, 'from tabulate import tabulate\n'), ((2590, 2656), 'elsim.elections.normal_electorate', 'normal_electorate', (['n_voters', 'n_cands'], {'dims': 'D', 'corr': 'corr', 'disp': 'disp'}), '(n_voters, n_cands, dims=D, corr=corr, disp=disp)\n', (2607, 2656), False, 'from elsim.elections import normal_electorate, normed_dist_utilities\n'), ((3153, 3180), 'elsim.elections.normed_dist_utilities', 'normed_dist_utilities', (['v', 'c'], {}), '(v, c)\n', (3174, 3180), False, 'from elsim.elections import normal_electorate, normed_dist_utilities\n'), ((3200, 3226), 'elsim.strategies.honest_rankings', 'honest_rankings', (['utilities'], {}), '(utilities)\n', (3215, 3226), False, 'from elsim.strategies import honest_rankings, approval_optimal\n'), ((3319, 3338), 'elsim.methods.condorcet', 'condorcet', (['rankings'], {}), '(rankings)\n', (3328, 3338), False, 'from elsim.methods import fptp, runoff, irv, approval, borda, coombs, black, utility_winner, condorcet\n'), ((3828, 3853), 'time.gmtime', 'time.gmtime', (['elapsed_time'], {}), '(elapsed_time)\n', (3839, 3853), False, 'import time\n'), ((2097, 2124), 'elsim.strategies.approval_optimal', 'approval_optimal', (['utilities'], {}), '(utilities)\n', (2113, 2124), False, 'from elsim.strategies import honest_rankings, approval_optimal\n')] |
# coding: utf-8
import tensorflow as tf, numpy as np
from config import Config as BaseConfig
from models.base import Model as BaseModel
from utils.wv_utils import load_global_embedding_matrix
from models.model_ops import embedded,attention_han,bi_gru,conv_with_max_pool,build_loss,build_summaries
class Config(BaseConfig):
wv_config = {"path_w": "wv/glove/atec_word-2-300", "train_w": False,
"path_c": "wv/glove/atec_char-2-300", "train_c": False}
initial="uniform"
un_dim=128
bi_dim=64
log_dir = "logs/SenMatchSen"
save_dir = "checkpoints/SenMatchSen"
modeC=0
class Model(BaseModel):
def __init__(self,config=Config):
super(Model).__init__(config)
self.config=config
assert self.config.initial in ["uniform","normal"]
if self.config.initial == "uniform":
self.initializer=tf.glorot_uniform_initializer()
else:
self.initializer=tf.glorot_normal_initializer()
self.embeddings_w,self.embeddings_c = load_global_embedding_matrix(
self.config.wv_config['path_w'],self.config.wv_config['path_c'],self.config.global_dict)
self.build_graph()
def _encode(self,Xw,Xw_l,Xc,Xc_l,scope="encode_layers",reuse=False):
with tf.variable_scope(scope,reuse=reuse):
Xw_embedded,size_w=embedded(Xw,self.embeddings_w[0],self.embeddings_w[1],self.config.wv_config["train_w"],
scope="embedded_w")
Xc_embedded,size_c=embedded(Xc,self.embeddings_c[0],self.embeddings_c[1],self.config.wv_config["train_c"],
scope="embedded_c")
batch_size=tf.shape(Xw)[0]
# char
v0,v0_size=attention_han(Xc_embedded,self.config.un_dim,self.initializer,"attention_han_c")
v1,v1_size=bi_gru(Xc_embedded,Xc_l,(self.config.bi_dim,),2,self.initializer,1.0,"bi_gru_c")
char_v=tf.reshape(tf.concat([v0,v1],axis=-1),[batch_size,v0_size+v1_size])
# word
v0,v0_size=attention_han(Xw_embedded,self.config.un_dim,self.initializer,"attention_han_w")
v1,v1_size=bi_gru(Xw_embedded,Xw_l,(self.config.bi_dim,),2,self.initializer,1.0,"bi_gru_w")
word_v=tf.reshape(tf.concat([v0,v1],axis=-1),[batch_size,v0_size+v1_size])
# phrase
Xp_embedded,size_p=conv_with_max_pool(Xw_embedded,(2,3,4,5),size_w//4,False,
tf.nn.selu,self.initializer,"conv_w2p")
v0,v0_size=attention_han(Xp_embedded,self.config.un_dim,self.initializer,"attention_han_p")
v1,v1_size=bi_gru(Xp_embedded,Xw_l,(self.config.bi_dim,),2,self.initializer,1.0,"bi_gru_p")
phrase_v=tf.reshape(tf.concat([v0,v1],axis=-1),[batch_size,v0_size+v1_size])
return char_v,word_v,phrase_v
def _match(self,h1,h2,
mah=True,euc=True,cos=True,maxi=True,
scope="match_layers",reuse=False):
with tf.variable_scope(scope,reuse=reuse):
h=[]
if mah:
h.append(tf.abs(h1-h2))
if euc:
h.append((h1-h2)*(h1-h2))
if cos:
h.append(h1*h2)
if maxi:
h.append(tf.maximum(h1*h1,h2*h2))
h=tf.concat(h,axis=-1)
return h
def build_graph(self):
self.graph = tf.Graph()
with self.graph.as_default():
with tf.variable_scope("placeholders"):
self.X1w = tf.placeholder(dtype=tf.int32, shape=[None, None], name="sent1w_ph")
self.X2w = tf.placeholder(dtype=tf.int32, shape=[None, None], name="sent2w_ph")
self.X1c = tf.placeholder(dtype=tf.int32, shape=[None, None], name="sent1c_ph")
self.X2c = tf.placeholder(dtype=tf.int32, shape=[None, None], name="sent2c_ph")
self.X1w_mask = tf.sign(self.X1w, name="sent1w_mask")
self.X2w_mask = tf.sign(self.X2w, name="sent2w_mask")
self.X1c_mask = tf.sign(self.X1c, name="sent1c_mask")
self.X2c_mask = tf.sign(self.X2c, name="sent2c_mask")
self.X1w_l = tf.reduce_sum(self.X1w_mask, axis=-1, name="sent1w_len")
self.X2w_l = tf.reduce_sum(self.X2w_mask, axis=-1, name="sent2w_len")
self.X1c_l = tf.reduce_sum(self.X1c_mask, axis=-1, name="sent1c_len")
self.X2c_l = tf.reduce_sum(self.X2c_mask, axis=-1, name="sent2c_len")
self.y = tf.placeholder(dtype=tf.int32, shape=[None, ], name="label_ph")
self.keep_prob = tf.placeholder_with_default(1.0, shape=[], name="keep_prob_ph")
# encode
X1c,X1w,X1p = self._encode(self.X1w, self.X1w_l, self.X1c, self.X1c_l,scope="encode_layers_1")
X2c,X2w,X2p = self._encode(self.X2w, self.X2w_l, self.X2c, self.X2c_l,scope="encode_layers_2")
# match
match_c = self._match(X1c, X2c, scope="match_layers_c")
match_w = self._match(X1w, X2w, scope="match_layers_w")
match_p = self._match(X1p, X2p, scope="match_layers_p")
with tf.variable_scope("fc"):
h=tf.nn.dropout(tf.concat([match_c,match_w,match_p],axis=-1),self.keep_prob)
h1=tf.layers.dense(h,self.config.un_dim,activation=tf.nn.selu,
kernel_initializer=self.initializer)
h2=tf.layers.dense(h,self.config.un_dim,activation=tf.nn.sigmoid,
kernel_initializer=self.initializer)
h=tf.concat([h1,h2],axis=-1)
h=tf.nn.dropout(h,keep_prob=self.keep_prob)
pi = 0.01
self.logits = tf.layers.dense(h, 1,
kernel_initializer=self.initializer,
bias_initializer=tf.constant_initializer(-np.log((1 - pi) / pi)))
self.pos_prob = tf.nn.sigmoid(self.logits)
self.var_list = [v for v in tf.global_variables()]
if self.config.fine_tune:
self.var_list_trainable = [v for v in tf.trainable_variables()
if "embedded" in v.name or "fc" in v.name]
else:
self.var_list_trainable=[v for v in tf.trainable_variables()]
with tf.name_scope("Loss"):
self.loss_op= build_loss(labels=self.y, logits=self.logits,focal=self.config.focal,
alpha=self.config.alpha,gamma=self.config.gamma)
with tf.name_scope("Optimize"):
self.adam_op = tf.train.AdamOptimizer(learning_rate=self.config.init_learning_rate).\
minimize(self.loss_op,var_list=self.var_list_trainable)
self.sgd_op = tf.train.MomentumOptimizer(learning_rate=self.config.init_learning_rate,momentum=0.9).\
minimize(self.loss_op,var_list=self.var_list_trainable)
with tf.name_scope("Prediction"):
self.predicted = tf.cast(tf.greater_equal(self.pos_prob, self.config.threshold), dtype=tf.int32)
with tf.name_scope("Summary"):
self.summaries = build_summaries()
def _get_train_feed_dict(self,batch):
feed_dict = {self.X1w: np.asarray(batch["sen1w"].tolist()),
self.X2w: np.asarray(batch["sen2w"].tolist()),
# self.X1w_l: np.asarray(batch["sen1w_len"].tolist()),
# self.X2w_l: np.asarray(batch["sen2w_len"].tolist()),
self.X1c: np.asarray(batch["sen1c"].tolist()),
self.X2c: np.asarray(batch["sen2c"].tolist()),
# self.X1c_l: np.asarray(batch["sen1c_len"].tolist()),
# self.X2c_l: np.asarray(batch["sen2c_len"].tolist()),
self.y:np.asarray(batch["label"].tolist()),
self.keep_prob:1-self.config.dropout}
return feed_dict
def _get_valid_feed_dict(self,batch):
feed_dict = {self.X1w: np.asarray(batch["sen1w"].tolist()),
self.X2w: np.asarray(batch["sen2w"].tolist()),
# self.X1w_l: np.asarray(batch["sen1w_len"].tolist()),
# self.X2w_l: np.asarray(batch["sen2w_len"].tolist()),
self.X1c: np.asarray(batch["sen1c"].tolist()),
self.X2c: np.asarray(batch["sen2c"].tolist()),
# self.X1c_l: np.asarray(batch["sen1c_len"].tolist()),
# self.X2c_l: np.asarray(batch["sen2c_len"].tolist()),
self.y:np.asarray(batch["label"].tolist())}
return feed_dict
def _get_test_feed_dict(self,batch):
feed_dict = {self.X1w: np.asarray(batch["sen1w"].tolist()),
self.X2w: np.asarray(batch["sen2w"].tolist()),
# self.X1w_l: np.asarray(batch["sen1w_len"].tolist()),
# self.X2w_l: np.asarray(batch["sen2w_len"].tolist()),
self.X1c: np.asarray(batch["sen1c"].tolist()),
self.X2c: np.asarray(batch["sen2c"].tolist()),}
# self.X1c_l: np.asarray(batch["sen1c_len"].tolist()),
# self.X2c_l: np.asarray(batch["sen2c_len"].tolist())}
return feed_dict
| [
"tensorflow.reduce_sum",
"tensorflow.trainable_variables",
"tensorflow.maximum",
"models.model_ops.conv_with_max_pool",
"tensorflow.train.AdamOptimizer",
"tensorflow.global_variables",
"tensorflow.greater_equal",
"tensorflow.abs",
"models.model_ops.build_loss",
"tensorflow.placeholder_with_default... | [((1026, 1150), 'utils.wv_utils.load_global_embedding_matrix', 'load_global_embedding_matrix', (["self.config.wv_config['path_w']", "self.config.wv_config['path_c']", 'self.config.global_dict'], {}), "(self.config.wv_config['path_w'], self.config.\n wv_config['path_c'], self.config.global_dict)\n", (1054, 1150), False, 'from utils.wv_utils import load_global_embedding_matrix\n'), ((3422, 3432), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (3430, 3432), True, 'import tensorflow as tf, numpy as np\n'), ((874, 905), 'tensorflow.glorot_uniform_initializer', 'tf.glorot_uniform_initializer', ([], {}), '()\n', (903, 905), True, 'import tensorflow as tf, numpy as np\n'), ((949, 979), 'tensorflow.glorot_normal_initializer', 'tf.glorot_normal_initializer', ([], {}), '()\n', (977, 979), True, 'import tensorflow as tf, numpy as np\n'), ((1271, 1308), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'reuse': 'reuse'}), '(scope, reuse=reuse)\n', (1288, 1308), True, 'import tensorflow as tf, numpy as np\n'), ((1340, 1455), 'models.model_ops.embedded', 'embedded', (['Xw', 'self.embeddings_w[0]', 'self.embeddings_w[1]', "self.config.wv_config['train_w']"], {'scope': '"""embedded_w"""'}), "(Xw, self.embeddings_w[0], self.embeddings_w[1], self.config.\n wv_config['train_w'], scope='embedded_w')\n", (1348, 1455), False, 'from models.model_ops import embedded, attention_han, bi_gru, conv_with_max_pool, build_loss, build_summaries\n'), ((1519, 1634), 'models.model_ops.embedded', 'embedded', (['Xc', 'self.embeddings_c[0]', 'self.embeddings_c[1]', "self.config.wv_config['train_c']"], {'scope': '"""embedded_c"""'}), "(Xc, self.embeddings_c[0], self.embeddings_c[1], self.config.\n wv_config['train_c'], scope='embedded_c')\n", (1527, 1634), False, 'from models.model_ops import embedded, attention_han, bi_gru, conv_with_max_pool, build_loss, build_summaries\n'), ((1748, 1835), 'models.model_ops.attention_han', 'attention_han', (['Xc_embedded', 'self.config.un_dim', 'self.initializer', '"""attention_han_c"""'], {}), "(Xc_embedded, self.config.un_dim, self.initializer,\n 'attention_han_c')\n", (1761, 1835), False, 'from models.model_ops import embedded, attention_han, bi_gru, conv_with_max_pool, build_loss, build_summaries\n'), ((1852, 1942), 'models.model_ops.bi_gru', 'bi_gru', (['Xc_embedded', 'Xc_l', '(self.config.bi_dim,)', '(2)', 'self.initializer', '(1.0)', '"""bi_gru_c"""'], {}), "(Xc_embedded, Xc_l, (self.config.bi_dim,), 2, self.initializer, 1.0,\n 'bi_gru_c')\n", (1858, 1942), False, 'from models.model_ops import embedded, attention_han, bi_gru, conv_with_max_pool, build_loss, build_summaries\n'), ((2062, 2149), 'models.model_ops.attention_han', 'attention_han', (['Xw_embedded', 'self.config.un_dim', 'self.initializer', '"""attention_han_w"""'], {}), "(Xw_embedded, self.config.un_dim, self.initializer,\n 'attention_han_w')\n", (2075, 2149), False, 'from models.model_ops import embedded, attention_han, bi_gru, conv_with_max_pool, build_loss, build_summaries\n'), ((2166, 2256), 'models.model_ops.bi_gru', 'bi_gru', (['Xw_embedded', 'Xw_l', '(self.config.bi_dim,)', '(2)', 'self.initializer', '(1.0)', '"""bi_gru_w"""'], {}), "(Xw_embedded, Xw_l, (self.config.bi_dim,), 2, self.initializer, 1.0,\n 'bi_gru_w')\n", (2172, 2256), False, 'from models.model_ops import embedded, attention_han, bi_gru, conv_with_max_pool, build_loss, build_summaries\n'), ((2386, 2498), 'models.model_ops.conv_with_max_pool', 'conv_with_max_pool', (['Xw_embedded', '(2, 3, 4, 5)', '(size_w // 4)', '(False)', 'tf.nn.selu', 'self.initializer', '"""conv_w2p"""'], {}), "(Xw_embedded, (2, 3, 4, 5), size_w // 4, False, tf.nn.\n selu, self.initializer, 'conv_w2p')\n", (2404, 2498), False, 'from models.model_ops import embedded, attention_han, bi_gru, conv_with_max_pool, build_loss, build_summaries\n'), ((2557, 2644), 'models.model_ops.attention_han', 'attention_han', (['Xp_embedded', 'self.config.un_dim', 'self.initializer', '"""attention_han_p"""'], {}), "(Xp_embedded, self.config.un_dim, self.initializer,\n 'attention_han_p')\n", (2570, 2644), False, 'from models.model_ops import embedded, attention_han, bi_gru, conv_with_max_pool, build_loss, build_summaries\n'), ((2661, 2751), 'models.model_ops.bi_gru', 'bi_gru', (['Xp_embedded', 'Xw_l', '(self.config.bi_dim,)', '(2)', 'self.initializer', '(1.0)', '"""bi_gru_p"""'], {}), "(Xp_embedded, Xw_l, (self.config.bi_dim,), 2, self.initializer, 1.0,\n 'bi_gru_p')\n", (2667, 2751), False, 'from models.model_ops import embedded, attention_han, bi_gru, conv_with_max_pool, build_loss, build_summaries\n'), ((3017, 3054), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'reuse': 'reuse'}), '(scope, reuse=reuse)\n', (3034, 3054), True, 'import tensorflow as tf, numpy as np\n'), ((3331, 3352), 'tensorflow.concat', 'tf.concat', (['h'], {'axis': '(-1)'}), '(h, axis=-1)\n', (3340, 3352), True, 'import tensorflow as tf, numpy as np\n'), ((6025, 6051), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['self.logits'], {}), '(self.logits)\n', (6038, 6051), True, 'import tensorflow as tf, numpy as np\n'), ((1690, 1702), 'tensorflow.shape', 'tf.shape', (['Xw'], {}), '(Xw)\n', (1698, 1702), True, 'import tensorflow as tf, numpy as np\n'), ((1963, 1991), 'tensorflow.concat', 'tf.concat', (['[v0, v1]'], {'axis': '(-1)'}), '([v0, v1], axis=-1)\n', (1972, 1991), True, 'import tensorflow as tf, numpy as np\n'), ((2277, 2305), 'tensorflow.concat', 'tf.concat', (['[v0, v1]'], {'axis': '(-1)'}), '([v0, v1], axis=-1)\n', (2286, 2305), True, 'import tensorflow as tf, numpy as np\n'), ((2774, 2802), 'tensorflow.concat', 'tf.concat', (['[v0, v1]'], {'axis': '(-1)'}), '([v0, v1], axis=-1)\n', (2783, 2802), True, 'import tensorflow as tf, numpy as np\n'), ((3488, 3521), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""placeholders"""'], {}), "('placeholders')\n", (3505, 3521), True, 'import tensorflow as tf, numpy as np\n'), ((3550, 3618), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None, None]', 'name': '"""sent1w_ph"""'}), "(dtype=tf.int32, shape=[None, None], name='sent1w_ph')\n", (3564, 3618), True, 'import tensorflow as tf, numpy as np\n'), ((3646, 3714), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None, None]', 'name': '"""sent2w_ph"""'}), "(dtype=tf.int32, shape=[None, None], name='sent2w_ph')\n", (3660, 3714), True, 'import tensorflow as tf, numpy as np\n'), ((3742, 3810), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None, None]', 'name': '"""sent1c_ph"""'}), "(dtype=tf.int32, shape=[None, None], name='sent1c_ph')\n", (3756, 3810), True, 'import tensorflow as tf, numpy as np\n'), ((3838, 3906), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None, None]', 'name': '"""sent2c_ph"""'}), "(dtype=tf.int32, shape=[None, None], name='sent2c_ph')\n", (3852, 3906), True, 'import tensorflow as tf, numpy as np\n'), ((3939, 3976), 'tensorflow.sign', 'tf.sign', (['self.X1w'], {'name': '"""sent1w_mask"""'}), "(self.X1w, name='sent1w_mask')\n", (3946, 3976), True, 'import tensorflow as tf, numpy as np\n'), ((4009, 4046), 'tensorflow.sign', 'tf.sign', (['self.X2w'], {'name': '"""sent2w_mask"""'}), "(self.X2w, name='sent2w_mask')\n", (4016, 4046), True, 'import tensorflow as tf, numpy as np\n'), ((4079, 4116), 'tensorflow.sign', 'tf.sign', (['self.X1c'], {'name': '"""sent1c_mask"""'}), "(self.X1c, name='sent1c_mask')\n", (4086, 4116), True, 'import tensorflow as tf, numpy as np\n'), ((4149, 4186), 'tensorflow.sign', 'tf.sign', (['self.X2c'], {'name': '"""sent2c_mask"""'}), "(self.X2c, name='sent2c_mask')\n", (4156, 4186), True, 'import tensorflow as tf, numpy as np\n'), ((4216, 4272), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.X1w_mask'], {'axis': '(-1)', 'name': '"""sent1w_len"""'}), "(self.X1w_mask, axis=-1, name='sent1w_len')\n", (4229, 4272), True, 'import tensorflow as tf, numpy as np\n'), ((4302, 4358), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.X2w_mask'], {'axis': '(-1)', 'name': '"""sent2w_len"""'}), "(self.X2w_mask, axis=-1, name='sent2w_len')\n", (4315, 4358), True, 'import tensorflow as tf, numpy as np\n'), ((4388, 4444), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.X1c_mask'], {'axis': '(-1)', 'name': '"""sent1c_len"""'}), "(self.X1c_mask, axis=-1, name='sent1c_len')\n", (4401, 4444), True, 'import tensorflow as tf, numpy as np\n'), ((4474, 4530), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.X2c_mask'], {'axis': '(-1)', 'name': '"""sent2c_len"""'}), "(self.X2c_mask, axis=-1, name='sent2c_len')\n", (4487, 4530), True, 'import tensorflow as tf, numpy as np\n'), ((4556, 4617), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None]', 'name': '"""label_ph"""'}), "(dtype=tf.int32, shape=[None], name='label_ph')\n", (4570, 4617), True, 'import tensorflow as tf, numpy as np\n'), ((4653, 4716), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(1.0)'], {'shape': '[]', 'name': '"""keep_prob_ph"""'}), "(1.0, shape=[], name='keep_prob_ph')\n", (4680, 4716), True, 'import tensorflow as tf, numpy as np\n'), ((5196, 5219), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fc"""'], {}), "('fc')\n", (5213, 5219), True, 'import tensorflow as tf, numpy as np\n'), ((5333, 5435), 'tensorflow.layers.dense', 'tf.layers.dense', (['h', 'self.config.un_dim'], {'activation': 'tf.nn.selu', 'kernel_initializer': 'self.initializer'}), '(h, self.config.un_dim, activation=tf.nn.selu,\n kernel_initializer=self.initializer)\n', (5348, 5435), True, 'import tensorflow as tf, numpy as np\n'), ((5484, 5589), 'tensorflow.layers.dense', 'tf.layers.dense', (['h', 'self.config.un_dim'], {'activation': 'tf.nn.sigmoid', 'kernel_initializer': 'self.initializer'}), '(h, self.config.un_dim, activation=tf.nn.sigmoid,\n kernel_initializer=self.initializer)\n', (5499, 5589), True, 'import tensorflow as tf, numpy as np\n'), ((5637, 5665), 'tensorflow.concat', 'tf.concat', (['[h1, h2]'], {'axis': '(-1)'}), '([h1, h2], axis=-1)\n', (5646, 5665), True, 'import tensorflow as tf, numpy as np\n'), ((5682, 5724), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['h'], {'keep_prob': 'self.keep_prob'}), '(h, keep_prob=self.keep_prob)\n', (5695, 5724), True, 'import tensorflow as tf, numpy as np\n'), ((6432, 6453), 'tensorflow.name_scope', 'tf.name_scope', (['"""Loss"""'], {}), "('Loss')\n", (6445, 6453), True, 'import tensorflow as tf, numpy as np\n'), ((6485, 6609), 'models.model_ops.build_loss', 'build_loss', ([], {'labels': 'self.y', 'logits': 'self.logits', 'focal': 'self.config.focal', 'alpha': 'self.config.alpha', 'gamma': 'self.config.gamma'}), '(labels=self.y, logits=self.logits, focal=self.config.focal,\n alpha=self.config.alpha, gamma=self.config.gamma)\n', (6495, 6609), False, 'from models.model_ops import embedded, attention_han, bi_gru, conv_with_max_pool, build_loss, build_summaries\n'), ((6663, 6688), 'tensorflow.name_scope', 'tf.name_scope', (['"""Optimize"""'], {}), "('Optimize')\n", (6676, 6688), True, 'import tensorflow as tf, numpy as np\n'), ((7080, 7107), 'tensorflow.name_scope', 'tf.name_scope', (['"""Prediction"""'], {}), "('Prediction')\n", (7093, 7107), True, 'import tensorflow as tf, numpy as np\n'), ((7240, 7264), 'tensorflow.name_scope', 'tf.name_scope', (['"""Summary"""'], {}), "('Summary')\n", (7253, 7264), True, 'import tensorflow as tf, numpy as np\n'), ((7299, 7316), 'models.model_ops.build_summaries', 'build_summaries', ([], {}), '()\n', (7314, 7316), False, 'from models.model_ops import embedded, attention_han, bi_gru, conv_with_max_pool, build_loss, build_summaries\n'), ((3117, 3132), 'tensorflow.abs', 'tf.abs', (['(h1 - h2)'], {}), '(h1 - h2)\n', (3123, 3132), True, 'import tensorflow as tf, numpy as np\n'), ((3292, 3320), 'tensorflow.maximum', 'tf.maximum', (['(h1 * h1)', '(h2 * h2)'], {}), '(h1 * h1, h2 * h2)\n', (3302, 3320), True, 'import tensorflow as tf, numpy as np\n'), ((5253, 5300), 'tensorflow.concat', 'tf.concat', (['[match_c, match_w, match_p]'], {'axis': '(-1)'}), '([match_c, match_w, match_p], axis=-1)\n', (5262, 5300), True, 'import tensorflow as tf, numpy as np\n'), ((6092, 6113), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (6111, 6113), True, 'import tensorflow as tf, numpy as np\n'), ((7150, 7204), 'tensorflow.greater_equal', 'tf.greater_equal', (['self.pos_prob', 'self.config.threshold'], {}), '(self.pos_prob, self.config.threshold)\n', (7166, 7204), True, 'import tensorflow as tf, numpy as np\n'), ((6207, 6231), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (6229, 6231), True, 'import tensorflow as tf, numpy as np\n'), ((6388, 6412), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (6410, 6412), True, 'import tensorflow as tf, numpy as np\n'), ((6721, 6789), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.config.init_learning_rate'}), '(learning_rate=self.config.init_learning_rate)\n', (6743, 6789), True, 'import tensorflow as tf, numpy as np\n'), ((6898, 6988), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', ([], {'learning_rate': 'self.config.init_learning_rate', 'momentum': '(0.9)'}), '(learning_rate=self.config.init_learning_rate,\n momentum=0.9)\n', (6924, 6988), True, 'import tensorflow as tf, numpy as np\n'), ((5973, 5994), 'numpy.log', 'np.log', (['((1 - pi) / pi)'], {}), '((1 - pi) / pi)\n', (5979, 5994), True, 'import tensorflow as tf, numpy as np\n')] |
import numpy
import scipy.sparse
import unittest
import pytest
from xminds.lib.utils import deep_hash, classproperty, retry
def test_deep_hash():
val = {
'arrays': {
'int': numpy.arange(1000),
'struct': numpy.array([(11, 12), (21, 22)], [('a', int), ('b', int)]),
'struct-sliced': numpy.array([(11, 12), (21, 22)], [('a', int), ('b', int)])[['a']],
'transposed': numpy.arange(3*5).reshape((3, 5)).T,
},
'sparse': {
'csr': scipy.sparse.random(5, 4, 0.1, 'csr'),
'csc': scipy.sparse.random(5, 4, 0.1, 'csc'),
'coo': scipy.sparse.random(5, 4, 0.1, 'coo'),
},
'scalars': [1, 2.5, 'str', b'bytes', numpy.float32(1.5), numpy.bytes_(b'npbytes')]
}
h1 = deep_hash(val)
# test prefix works
assert h1 != deep_hash(val, prefix=b'my-prefix')
# test modify something inside the array
val['arrays']['int'][50] = 999
h2 = deep_hash(val)
assert h2 != h1
# test `fmt`
h_long = deep_hash(val, fmt='long')
assert isinstance(h_long, int) and h_long.bit_length(
) <= 160 and h_long.bit_length() > 64
h_int = deep_hash(val, fmt='int')
assert isinstance(h_int, int) and h_int.bit_length(
) <= 64 and h_int.bit_length() > 32
h_hex = deep_hash(val, fmt='hex40')
assert isinstance(h_hex, str) and len(h_hex) == 40
h_bytes = deep_hash(val, fmt='bytes20')
assert isinstance(h_bytes, bytes) and len(h_bytes) == 20
class ClassPropertyTestCase(unittest.TestCase):
def test_access(self):
class MyTest(object):
@classproperty
def name(cls):
return cls.__name__
assert MyTest.name == 'MyTest'
instance = MyTest()
assert instance.name == 'MyTest'
def test_setter(self):
class MyTest(object):
_val = 42
@classproperty
def val(cls):
return cls._val
@val.setter
def val(cls, value):
cls._val = value
assert MyTest.val == 42
instance = MyTest()
assert instance.val == 42
MyTest.val = 43
assert MyTest.val == 43
assert instance.val == 43
def test_retry():
lst = []
@retry(base=0.0001, max_retry=3)
def dummy(arg):
lst.append(arg)
raise ValueError('failed')
try:
dummy(2)
except ValueError:
pass
assert lst == [2, 2, 2, 2]
def test_not_retry():
lst = []
@retry(base=0.0001, max_retry=3)
def dummy(arg):
lst.append(arg)
raise ValueError('failed')
try:
dummy(2, __retry__=False)
except ValueError:
pass
assert lst == [2]
| [
"xminds.lib.utils.deep_hash",
"numpy.float32",
"xminds.lib.utils.retry",
"numpy.array",
"numpy.arange",
"numpy.bytes_"
] | [((788, 802), 'xminds.lib.utils.deep_hash', 'deep_hash', (['val'], {}), '(val)\n', (797, 802), False, 'from xminds.lib.utils import deep_hash, classproperty, retry\n'), ((969, 983), 'xminds.lib.utils.deep_hash', 'deep_hash', (['val'], {}), '(val)\n', (978, 983), False, 'from xminds.lib.utils import deep_hash, classproperty, retry\n'), ((1034, 1060), 'xminds.lib.utils.deep_hash', 'deep_hash', (['val'], {'fmt': '"""long"""'}), "(val, fmt='long')\n", (1043, 1060), False, 'from xminds.lib.utils import deep_hash, classproperty, retry\n'), ((1173, 1198), 'xminds.lib.utils.deep_hash', 'deep_hash', (['val'], {'fmt': '"""int"""'}), "(val, fmt='int')\n", (1182, 1198), False, 'from xminds.lib.utils import deep_hash, classproperty, retry\n'), ((1307, 1334), 'xminds.lib.utils.deep_hash', 'deep_hash', (['val'], {'fmt': '"""hex40"""'}), "(val, fmt='hex40')\n", (1316, 1334), False, 'from xminds.lib.utils import deep_hash, classproperty, retry\n'), ((1404, 1433), 'xminds.lib.utils.deep_hash', 'deep_hash', (['val'], {'fmt': '"""bytes20"""'}), "(val, fmt='bytes20')\n", (1413, 1433), False, 'from xminds.lib.utils import deep_hash, classproperty, retry\n'), ((2282, 2313), 'xminds.lib.utils.retry', 'retry', ([], {'base': '(0.0001)', 'max_retry': '(3)'}), '(base=0.0001, max_retry=3)\n', (2287, 2313), False, 'from xminds.lib.utils import deep_hash, classproperty, retry\n'), ((2531, 2562), 'xminds.lib.utils.retry', 'retry', ([], {'base': '(0.0001)', 'max_retry': '(3)'}), '(base=0.0001, max_retry=3)\n', (2536, 2562), False, 'from xminds.lib.utils import deep_hash, classproperty, retry\n'), ((844, 879), 'xminds.lib.utils.deep_hash', 'deep_hash', (['val'], {'prefix': "b'my-prefix'"}), "(val, prefix=b'my-prefix')\n", (853, 879), False, 'from xminds.lib.utils import deep_hash, classproperty, retry\n'), ((203, 221), 'numpy.arange', 'numpy.arange', (['(1000)'], {}), '(1000)\n', (215, 221), False, 'import numpy\n'), ((245, 304), 'numpy.array', 'numpy.array', (['[(11, 12), (21, 22)]', "[('a', int), ('b', int)]"], {}), "([(11, 12), (21, 22)], [('a', int), ('b', int)])\n", (256, 304), False, 'import numpy\n'), ((727, 745), 'numpy.float32', 'numpy.float32', (['(1.5)'], {}), '(1.5)\n', (740, 745), False, 'import numpy\n'), ((747, 771), 'numpy.bytes_', 'numpy.bytes_', (["b'npbytes'"], {}), "(b'npbytes')\n", (759, 771), False, 'import numpy\n'), ((335, 394), 'numpy.array', 'numpy.array', (['[(11, 12), (21, 22)]', "[('a', int), ('b', int)]"], {}), "([(11, 12), (21, 22)], [('a', int), ('b', int)])\n", (346, 394), False, 'import numpy\n'), ((429, 448), 'numpy.arange', 'numpy.arange', (['(3 * 5)'], {}), '(3 * 5)\n', (441, 448), False, 'import numpy\n')] |
# Importing Libraries
from foolbox.criteria import TargetClass
from foolbox.criteria import Misclassification
from numpy import linalg as LA
import matplotlib.pyplot as plt
from foolbox.attacks import CarliniWagnerL2Attack
from foolbox.attacks import SaliencyMapAttack
from foolbox.attacks import GradientSignAttack
from foolbox.v1.attacks import FGSM
from foolbox.v1.attacks import MomentumIterativeAttack
#from foolbox.v1.attacks import GradientSignAttack
from skimage.measure import compare_ssim
from keras import layers, models
import numpy as np
from keras.utils import np_utils
from keras import backend as K
from keras.applications import vgg16
import tensorflow as tf
import pickle
import foolbox
import json
import timeit
start = timeit.default_timer()
import cvxpy as cp
from numpy import linalg as LA
from ISMAIL_big_picture_journal_lib import sup_lbl_from_lbl,get_S_T_S_T_comp_from_lbl,Imperceptibility,ADMM_,Attack_performance,cvxPy_pert_gen
########################################################################
############################################### Fashion MNIST dataset import
############################################################################
#tf.keras.backend.set_learning_phase(False)
# Keras Parameters
batch_size = 28
nb_classes = 10
nb_epoch = 2
img_rows, img_col = 28, 28
img_channels = 1
# download mnist data and split into train and test sets
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.fashion_mnist.load_data()
# reshape data to fit model
X_train = train_images.reshape(train_images.shape[0], 28, 28, 1)
X_test = test_images.reshape(test_images.shape[0], 28, 28, 1)
X_train, X_test = X_train/255, X_test/255
# normalization:
train_images = train_images / 255
test_images = test_images / 255
print("")
y_train = np_utils.to_categorical(train_labels,10)
y_test = np_utils.to_categorical(test_labels,10)
X_train_1d = X_train.reshape(60000,784,1)
X_test_1d = X_test.reshape(10000,784,1)
################################################################################
############## Loading the model and preprocessing #####################
######################################################################################
########### load the propoer model here
model1 = tf.keras.models.load_model('my_model_1d_last_dense_activation_seperate')
model1.summary()
####################################################################
####################################################################################
############RE-LABEL TRAIN_LABELS AND TEST_LABELS (Using a dictonary) #########################
######################################################################################
dic5 = {2:0, 4:0, 6:0, 5:2, 7:2, 9:2, 8:4}
train_labels_5 = [dic5[x] if x in dic5.keys() else x for x in train_labels]
test_labels_5 = [dic5[x] if x in dic5.keys() else x for x in test_labels]
'''
your mapping is different than mine. Here is the mapping from the paper you gave me.
0 ==> {0,2,4,6} top
1 ==> {1} bottom
2 ==> {5,7,9} shoes
3 ==> {3} dress
4 ==> {8}
'''
######################################################################################
#####################################################################
################### loading Grads and testing the vectorization
#####################################################################
Grad_MNIST_model1 = pickle.load(open("/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/Grad_MNIST_model1_1d_before_SM.p","rb"))
disc_values = pickle.load(open("/home/user/.PyCharmCE2019.1/config/scratches/saved_models_variables/disc_values_before_SM.p","rb"))
################################################################################
##################################### BUILDING THE ALG - 1 PROBLEM WITH CVXPY ######
################################################################################
stop = timeit.default_timer()
print('Time: ', stop - start)
######## to save eta, ceate a vectorized empty np array of size 10000,28*28,1
number_of_observations = 1000
### tensors to save and to calc CApert, CApert_sup, ELA, RLA, and sigmas
eta_vec = np.zeros(shape=(number_of_observations,28*28,1))
imperceptibility_rho_2_save = np.nan*np.ones(shape=(number_of_observations,1))
imperceptibility_rho_i_save = np.nan*np.ones(shape=(number_of_observations,1))
imperceptibility_sssim_save = np.nan*np.ones(shape=(number_of_observations,1))
pred_pert_lbls = np.zeros(shape=(number_of_observations))
pred_pert_sup_lbls = np.zeros(shape=(number_of_observations))
pred_lbls = np.zeros(shape=(number_of_observations))
cnt = 0
#for id in [10]:
for id in range(number_of_observations):
######## LET THE INPUT IMAGE be:
id = id
input_image = X_test_1d[id]
input_image_reshaped = input_image.reshape(784)
######## get tru_lbl
tru_lbl = test_labels[id]
######## get tru_sup_lbl
tru_sup_lbl = sup_lbl_from_lbl(tru_lbl)
######## get pred_lbl
pred_lbl = np.argmax(model1(input_image.reshape(1, 784, 1)))
pred_lbls[id] = pred_lbl
######## get_pred_sup_lbl
pred_sup_lbl = sup_lbl_from_lbl(pred_lbl)
######## get S_T and S_T_comp: this is based on the tru lbl not the predicted lbl
[S_T,S_T_comp] = get_S_T_S_T_comp_from_lbl(tru_lbl)
######## get vectozied gradients and disc values of of the disgnated lbl
Grad_MNIST_model1_vec_disgnated = Grad_MNIST_model1[id,:,:]
#print('Grad_MNIST_model1_vec_disgnated = ' , Grad_MNIST_model1_vec_disgnated.shape)
disc_values_disgnated = disc_values[id,:]
###### for j \in S_T_comp,
#print('break')
###### SAVE eta[id] of each j \in S_T_comp
# initial
eta_vec_j = np.zeros(shape=(10,28*28,1))
# distance initial
D_j = 1000000*np.ones(shape=(10, 1))
for jj in S_T_comp:
j_star = jj
###### solve the cvx problem and save the eta_j and D_j (make j as j_star and below is the same as for the NOC)
########
epsilon = 10
####### get matrix G \in N \times |S_T| and b \in |S_T|, where G_columns = [grad_j_star - grad_l], for all l \in S_T
n = 28*28
card_S_T = len(S_T) # cardinality of the set S_T
mat_G = np.zeros(shape=(n,card_S_T)) # init mat_G
vec_b_wout = np.zeros(shape=(card_S_T,1) )
temp_jstar = Grad_MNIST_model1_vec_disgnated[j_star , : ,:]
temp_jstar = temp_jstar.reshape(n,)
b_jstar = disc_values_disgnated[j_star]
#b_jstar = b_jstar.reshape(1,)
for i in range(card_S_T):
temp1 = Grad_MNIST_model1_vec_disgnated[S_T[i] , : ,:]
temp1 = temp1.reshape(n,)
b_l = disc_values_disgnated[S_T[i]]
# b_l = b_l.reshape(1,)
mat_G[:,i] = temp_jstar - temp1
vec_b_wout[ i] = b_l - b_jstar
vec_b = vec_b_wout + epsilon
############# use CVXPy to generate perturbations
eta_cvx = cvxPy_pert_gen(input_image,mat_G, vec_b)
########## save, then reshape eta to be a matrix:
#eta_cvx = np.asarray(eta_cvx.value)
eta_vec[id,:,:] = eta_cvx.reshape(784,1)
# save eta for each j \in S_T_comp
eta_vec_j[jj,:,:] = eta_cvx.reshape(n,1)
# get D_j if eta_vec_j is not zeros
# if np.sum(eta_vec_j[jj,:,:]) == 0:
# D_j[jj] = 1000000
# else:
#
# #### ADD HERE THE LOGIC TO CHECK IF T{k(x)} != T{k(x+\eta)}, if yes, then get D_j[jj] = norm, if not, leave it as it is initially defined... THANKS mother fuckars
#
# D_j[jj] = LA.norm(eta_cvx,2)
image_pert_temp = eta_vec_j[jj,:,:] + input_image
if np.sum(eta_vec_j[jj,:,:]) != 0 and sup_lbl_from_lbl(np.argmax(model1(image_pert_temp.reshape(1, 784, 1)))) != pred_sup_lbl:
D_j[jj] = LA.norm(eta_cvx, 2)
###### algorithm - 1 shit in which we should choose the best candidate from all jj \in S_T_comp
###### The logic for checking wether eta is good AND
if np.all(D_j == 1000000.0):
eta_cvx = np.zeros(shape=(784,1))
winning_label = None
else:
winning_label = np.argmin(D_j)
eta_cvx = eta_vec_j[winning_label,:,:]
###### after choosing, continue with below for the attack success statistics
image_pert = eta_cvx + input_image
# pred_pert_lbl
pred_pert_lbl = np.argmax(model1(image_pert.reshape(1, 784, 1)))
pred_pert_lbls[id] = pred_pert_lbl
# pred_pert_sup_lbl
pred_pert_sup_lbl = sup_lbl_from_lbl(pred_pert_lbl)
pred_pert_sup_lbls[id] = pred_pert_sup_lbl
# calculate the imperceptibility:
#rho_2 = LA.norm(eta_cvx.reshape(784)) / LA.norm(input_image.reshape(784))
rho_2 = Imperceptibility(input_image,eta_cvx)[0]
rho_inf = Imperceptibility(input_image, eta_cvx)[1]
D_ssim = Imperceptibility(input_image,eta_cvx)[2]
#if pred_sup_lbl != pred_pert_sup_lbl and rho_2 >= 0.000001 and rho_2 <= 0.35:
if pred_sup_lbl != pred_pert_sup_lbl:
cnt = cnt+1
imperceptibility_rho_2_save[id] = rho_2
imperceptibility_rho_i_save[id] = rho_inf
imperceptibility_sssim_save[id] = D_ssim
##### logger:
print('id = ', id, 'winning_label = ', winning_label, 'pred_sup_lbl = ', pred_sup_lbl, 'predecited_perturbed_super_lbl = ',
pred_pert_sup_lbl, ' imperceptibility = ', rho_2, 'count = ', cnt)
attack_success = cnt / number_of_observations
print('ATTACK SUCCESS = ' , attack_success*100 , '%')
CA,CA_sup,CA_pert, CA_pert_sup, RLA, ELA,RLA_sup, ELA_sup , sigma_2, sigma_inf, sigma_s = \
Attack_performance(test_labels[0:number_of_observations] ,
pred_lbls,
pred_pert_lbls ,
imperceptibility_rho_2_save,
imperceptibility_rho_i_save,
imperceptibility_sssim_save)
# attack performace
print('Number of observations = ', number_of_observations ,
'\n CA_pert = ' , CA_pert,
"\n CA_pert_sup = " , CA_pert_sup ,
"\n RLA = " , RLA ,
"\n ELA = " , ELA,
'\n RLA_sup = ' , RLA_sup,
'\n ELA_sup = ' , ELA_sup,
"\n sigma_2 = " , sigma_2 ,
"\n sigma_inf = " , sigma_inf ,
'\n ssim = ' , sigma_s)
stop = timeit.default_timer()
print('Time: ', stop - start)
# # #####################################################################
# # ################### Plotting images
# # #####################################################################
# print("")
#
# iidd = 55
#
# plt.figure()
# plt.subplot(1,3,1)
# plt.title('Original')
# plt.imshow(X_test_1d[iidd].reshape(28,28),cmap='gray')
# plt.axis('off')
#
#
# plt.subplot(1,3,2)
# plt.title('pertubations')
# plt.imshow(eta_vec[iidd,:,:].reshape(28,28),cmap='gray')
# plt.axis('off')
#
#
# plt.subplot(1,3,3)
# plt.title('perturbed image')
# perturbed_image = X_test_1d[iidd] + eta_vec[iidd,:,:]
# plt.imshow(perturbed_image.reshape(28,28),cmap='gray')
# plt.axis('off')
#
#
# plt.show()
# # ########################################################################
print('break here')
# pickle.dump(eta_vec, open("eta_alg_1_cvx.p", "wb"))
# pickle.dump(pred_pert_sup_lbls, open("pred_pert_sup_lbls_alg_1_cvx.p", "wb"))
# pickle.dump(pred_lbls, open("pred_lbls_model_1d.p", "wb"))
print('break here') | [
"ISMAIL_big_picture_journal_lib.sup_lbl_from_lbl",
"tensorflow.keras.models.load_model",
"ISMAIL_big_picture_journal_lib.Imperceptibility",
"numpy.sum",
"timeit.default_timer",
"ISMAIL_big_picture_journal_lib.cvxPy_pert_gen",
"numpy.zeros",
"numpy.ones",
"numpy.argmin",
"keras.utils.np_utils.to_ca... | [((755, 777), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (775, 777), False, 'import timeit\n'), ((1472, 1515), 'tensorflow.keras.datasets.fashion_mnist.load_data', 'tf.keras.datasets.fashion_mnist.load_data', ([], {}), '()\n', (1513, 1515), True, 'import tensorflow as tf\n'), ((1817, 1858), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['train_labels', '(10)'], {}), '(train_labels, 10)\n', (1840, 1858), False, 'from keras.utils import np_utils\n'), ((1867, 1907), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['test_labels', '(10)'], {}), '(test_labels, 10)\n', (1890, 1907), False, 'from keras.utils import np_utils\n'), ((2285, 2357), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""my_model_1d_last_dense_activation_seperate"""'], {}), "('my_model_1d_last_dense_activation_seperate')\n", (2311, 2357), True, 'import tensorflow as tf\n'), ((3935, 3957), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (3955, 3957), False, 'import timeit\n'), ((4201, 4253), 'numpy.zeros', 'np.zeros', ([], {'shape': '(number_of_observations, 28 * 28, 1)'}), '(shape=(number_of_observations, 28 * 28, 1))\n', (4209, 4253), True, 'import numpy as np\n'), ((4517, 4555), 'numpy.zeros', 'np.zeros', ([], {'shape': 'number_of_observations'}), '(shape=number_of_observations)\n', (4525, 4555), True, 'import numpy as np\n'), ((4588, 4626), 'numpy.zeros', 'np.zeros', ([], {'shape': 'number_of_observations'}), '(shape=number_of_observations)\n', (4596, 4626), True, 'import numpy as np\n'), ((4659, 4697), 'numpy.zeros', 'np.zeros', ([], {'shape': 'number_of_observations'}), '(shape=number_of_observations)\n', (4667, 4697), True, 'import numpy as np\n'), ((9693, 9872), 'ISMAIL_big_picture_journal_lib.Attack_performance', 'Attack_performance', (['test_labels[0:number_of_observations]', 'pred_lbls', 'pred_pert_lbls', 'imperceptibility_rho_2_save', 'imperceptibility_rho_i_save', 'imperceptibility_sssim_save'], {}), '(test_labels[0:number_of_observations], pred_lbls,\n pred_pert_lbls, imperceptibility_rho_2_save,\n imperceptibility_rho_i_save, imperceptibility_sssim_save)\n', (9711, 9872), False, 'from ISMAIL_big_picture_journal_lib import sup_lbl_from_lbl, get_S_T_S_T_comp_from_lbl, Imperceptibility, ADMM_, Attack_performance, cvxPy_pert_gen\n'), ((10364, 10386), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (10384, 10386), False, 'import timeit\n'), ((4287, 4329), 'numpy.ones', 'np.ones', ([], {'shape': '(number_of_observations, 1)'}), '(shape=(number_of_observations, 1))\n', (4294, 4329), True, 'import numpy as np\n'), ((4366, 4408), 'numpy.ones', 'np.ones', ([], {'shape': '(number_of_observations, 1)'}), '(shape=(number_of_observations, 1))\n', (4373, 4408), True, 'import numpy as np\n'), ((4445, 4487), 'numpy.ones', 'np.ones', ([], {'shape': '(number_of_observations, 1)'}), '(shape=(number_of_observations, 1))\n', (4452, 4487), True, 'import numpy as np\n'), ((5006, 5031), 'ISMAIL_big_picture_journal_lib.sup_lbl_from_lbl', 'sup_lbl_from_lbl', (['tru_lbl'], {}), '(tru_lbl)\n', (5022, 5031), False, 'from ISMAIL_big_picture_journal_lib import sup_lbl_from_lbl, get_S_T_S_T_comp_from_lbl, Imperceptibility, ADMM_, Attack_performance, cvxPy_pert_gen\n'), ((5202, 5228), 'ISMAIL_big_picture_journal_lib.sup_lbl_from_lbl', 'sup_lbl_from_lbl', (['pred_lbl'], {}), '(pred_lbl)\n', (5218, 5228), False, 'from ISMAIL_big_picture_journal_lib import sup_lbl_from_lbl, get_S_T_S_T_comp_from_lbl, Imperceptibility, ADMM_, Attack_performance, cvxPy_pert_gen\n'), ((5337, 5371), 'ISMAIL_big_picture_journal_lib.get_S_T_S_T_comp_from_lbl', 'get_S_T_S_T_comp_from_lbl', (['tru_lbl'], {}), '(tru_lbl)\n', (5362, 5371), False, 'from ISMAIL_big_picture_journal_lib import sup_lbl_from_lbl, get_S_T_S_T_comp_from_lbl, Imperceptibility, ADMM_, Attack_performance, cvxPy_pert_gen\n'), ((5783, 5815), 'numpy.zeros', 'np.zeros', ([], {'shape': '(10, 28 * 28, 1)'}), '(shape=(10, 28 * 28, 1))\n', (5791, 5815), True, 'import numpy as np\n'), ((8114, 8138), 'numpy.all', 'np.all', (['(D_j == 1000000.0)'], {}), '(D_j == 1000000.0)\n', (8120, 8138), True, 'import numpy as np\n'), ((8608, 8639), 'ISMAIL_big_picture_journal_lib.sup_lbl_from_lbl', 'sup_lbl_from_lbl', (['pred_pert_lbl'], {}), '(pred_pert_lbl)\n', (8624, 8639), False, 'from ISMAIL_big_picture_journal_lib import sup_lbl_from_lbl, get_S_T_S_T_comp_from_lbl, Imperceptibility, ADMM_, Attack_performance, cvxPy_pert_gen\n'), ((5859, 5881), 'numpy.ones', 'np.ones', ([], {'shape': '(10, 1)'}), '(shape=(10, 1))\n', (5866, 5881), True, 'import numpy as np\n'), ((6309, 6338), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, card_S_T)'}), '(shape=(n, card_S_T))\n', (6317, 6338), True, 'import numpy as np\n'), ((6373, 6402), 'numpy.zeros', 'np.zeros', ([], {'shape': '(card_S_T, 1)'}), '(shape=(card_S_T, 1))\n', (6381, 6402), True, 'import numpy as np\n'), ((7042, 7083), 'ISMAIL_big_picture_journal_lib.cvxPy_pert_gen', 'cvxPy_pert_gen', (['input_image', 'mat_G', 'vec_b'], {}), '(input_image, mat_G, vec_b)\n', (7056, 7083), False, 'from ISMAIL_big_picture_journal_lib import sup_lbl_from_lbl, get_S_T_S_T_comp_from_lbl, Imperceptibility, ADMM_, Attack_performance, cvxPy_pert_gen\n'), ((8158, 8182), 'numpy.zeros', 'np.zeros', ([], {'shape': '(784, 1)'}), '(shape=(784, 1))\n', (8166, 8182), True, 'import numpy as np\n'), ((8245, 8259), 'numpy.argmin', 'np.argmin', (['D_j'], {}), '(D_j)\n', (8254, 8259), True, 'import numpy as np\n'), ((8819, 8857), 'ISMAIL_big_picture_journal_lib.Imperceptibility', 'Imperceptibility', (['input_image', 'eta_cvx'], {}), '(input_image, eta_cvx)\n', (8835, 8857), False, 'from ISMAIL_big_picture_journal_lib import sup_lbl_from_lbl, get_S_T_S_T_comp_from_lbl, Imperceptibility, ADMM_, Attack_performance, cvxPy_pert_gen\n'), ((8874, 8912), 'ISMAIL_big_picture_journal_lib.Imperceptibility', 'Imperceptibility', (['input_image', 'eta_cvx'], {}), '(input_image, eta_cvx)\n', (8890, 8912), False, 'from ISMAIL_big_picture_journal_lib import sup_lbl_from_lbl, get_S_T_S_T_comp_from_lbl, Imperceptibility, ADMM_, Attack_performance, cvxPy_pert_gen\n'), ((8930, 8968), 'ISMAIL_big_picture_journal_lib.Imperceptibility', 'Imperceptibility', (['input_image', 'eta_cvx'], {}), '(input_image, eta_cvx)\n', (8946, 8968), False, 'from ISMAIL_big_picture_journal_lib import sup_lbl_from_lbl, get_S_T_S_T_comp_from_lbl, Imperceptibility, ADMM_, Attack_performance, cvxPy_pert_gen\n'), ((7927, 7946), 'numpy.linalg.norm', 'LA.norm', (['eta_cvx', '(2)'], {}), '(eta_cvx, 2)\n', (7934, 7946), True, 'from numpy import linalg as LA\n'), ((7781, 7808), 'numpy.sum', 'np.sum', (['eta_vec_j[jj, :, :]'], {}), '(eta_vec_j[jj, :, :])\n', (7787, 7808), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""util.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1BpuYZIMKiZOv-FsGhPqYLJq28NTe7PxO
"""
import pandas as pd
import numpy as np
import json
import re
import nltk
nltk.download('stopwords')
nltk.download('punkt')
from nltk.corpus import stopwords
from nltk.stem.snowball import EnglishStemmer
from nltk.stem.snowball import GermanStemmer
from enum import Enum
class DocType(Enum):
NON_COMMENT = -1
ARTICLE = 0
COMMENT = 1
ALL = 2
class Language(Enum):
EN = 1
DE = 2
class Polarity(Enum):
NEUTRAL = 0
POSITIVE = 1
NEGATIVE = 2
class Source():
NYTIMES = 'nytimes'
QUORA = 'quora'
SPIEGEL = 'spiegel'
class Stopwords():
EN_NLTK = stopwords.words('english')
DE_NLTK = stopwords.words('german')
# The most high term-frequency words with term-frequency larger than 5000
EN_TFHIGH = frozenset(['the', 'and', 'to', 'of', 'is', 'in', 'that', 'organic', 'it', 'are', 'not', 'for',
'you', 'food', 'as', 'have', 'be', 'on', 'with', 'they', 'or', 'but', 'this', 'from',
'more', 'we', 'do', 'can', 'there', 'if', 'by', 'at', 'all', 'foods', 'about',
'what', 'has', 'will', 'so', 'their', 'an', 'your', 'would', 'than', 'people', 'no',
'which', 'like', 'was', 'one', 'some', 'my'])
DE_TFHIGH = frozenset(['die', 'und', 'der', 'ist', 'das', 'nicht', 'von', 'in', 'es', 'sie', 'zu', 'den',
'ich', 'auch', 'mit', 'zitat', 'ein', 'sich', 'für', 'auf', 'man', 'sind', 'dass',
'aber', 'werden', 'wie', 'im', 'nur', 'oder', 'wenn', 'eine', 'so', 'bei', 'als',
'wird', 'aus', 'was', 'dem', 'noch', 'bio', 'an', 'dann', 'haben', 'kann', 'da',
'hat', 'mehr', 'wir', 'um', 'mal', 'doch', 'schon', 'ja', 'nach', 'sein', 'keine',
'immer', 'einen', 'des', 'gibt', 'hier', 'diese', 'durch'])
class OptimalKClustersConfig():
# clusters_with_garbage = ['Planting and gardening', 'Retail', 'Garbage 1', 'GMO label and bio-products', 'Garbage 2',
# 'Taste and food', 'Chemicals and cancer', 'Genetic research', 'Health and diet', 'Garbage 3',
# 'Governance and public policy', 'Meat and animal feeding', 'Agriculture', 'Price and consumption', 'Garbage 4']
clusters_with_garbage_nonum = ['Environment (pesticides & fertilizers)', 'Retailers', 'Garbage', 'GMO & organic', 'Garbage',
'Food products & taste', 'Food safety', 'Research', 'Health & nutrition', 'Garbage',
'Politics & policy & compliance', 'Animal welfare & meat consumption', 'Farming & agricultural policy & food security',
'Consumer prices & profit', 'Garbage']
clusters_with_garbage = ['Environment (pesticides & fertilizers)', 'Retailers', 'Garbage 1', 'GMO & organic', 'Garbage 2',
'Food products & taste', 'Food safety', 'Research', 'Health & nutrition', 'Garbage 3',
'Politics & policy & compliance', 'Animal welfare & meat consumption', 'Farming & agricultural policy & food security',
'Consumer prices & profit', 'Garbage 4']
k_with_garbage = len(clusters_with_garbage)
# define the index of garbage_clusters
garbage_clusters = [2, 4, 9, 14]
clusters = [c for c in clusters_with_garbage if 'Garbage' not in c]
k = len(clusters)
valid_cluster_index = [0, 1, 3, 5, 6, 7, 8, 10, 11, 12, 13]
def load_kmean_labels(path):
labels = np.load(path).astype(int)
k = max(labels) + 1
print('Number of {}-means labels: {}'.format(k, len(labels)))
return k, labels
def load_sentences(path):
with open(path, 'r') as f:
loaded_data = json.load(f)
print('Sentences file - loaded')
sentences = []
for item in loaded_data:
for key, value in item.items():
sentences.append(value)
print('Done - appended all sentences')
print('Number of tokenized sentences from corpus:', len(sentences))
return sentences
def load_sentences_index(path, source: Source):
with open(path, 'r') as f:
loaded_data = json.load(f)
print('Sentences index file - loaded')
indeces = []
for item in loaded_data:
if item['source'] in source:
indeces.append(item['sentence_id'])
print('Done - appended all sentences indeces')
print('Number of indeces from corpus {}: {}'.format(source, len(indeces)))
return indeces
def replace_url(sentence):
return re.sub(r'<a[\s\w\/~=#\'\":.,@?;&%()+-]*\>[\s\w\/~=#\"\':.,@?;&%()+-]*<\/a>', 'url', sentence)
def get_stemmed_sentences(lanuage: Language, stopwords: Stopwords, sentences):
token_pattern = re.compile(r'(?u)\b\w\w+\b')
if lanuage == Language.EN:
stemmer = EnglishStemmer()
else:
stemmer = GermanStemmer()
s = []
for sentence in sentences:
s.append(' '.join([stemmer.stem(word) for word in token_pattern.findall(sentence.lower()) if word not in stopwords]))
return np.array(s)
# encoder for numpy to json format
# reference: https://stackoverflow.com/questions/50916422/python-typeerror-object-of-type-int64-is-not-json-serializable/50916741
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyEncoder, self).default(obj)
def list2json(input_list, output_path):
output_file = open(output_path, 'w', encoding = 'utf-8')
json.dump(input_list, output_file, ensure_ascii = False, cls = NumpyEncoder)
output_file.close()
def get_start_end_datetime(start_year, end_year):
end_year = end_year + 1
start_datetime_str = str(start_year) + '-01-01'
end_datetime_str = str(end_year - 1) + '-12-31'
start_datetime = pd.to_datetime(start_datetime_str)
end_datetime = pd.to_datetime(end_datetime_str)
return start_datetime, end_datetime, start_datetime_str, end_datetime_str
def get_month_list(start_year, end_year):
end_year = end_year + 1
years = [range(start_year, end_year)]
totalMonths = 12 * (np.max(years) - np.min(years) + 1)
months = [str(np.min(years) + (i // 12)) + '-' + str(i % 12 + 1).zfill(2) for i in range(totalMonths)]
return months
def get_date_list(start_year, end_year):
_, _, start_datetime_str, end_datetime_str = get_start_end_datetime(start_year, end_year)
dates = pd.date_range(start = start_datetime_str, end = end_datetime_str)
dates = [str(date)[:10] for date in dates.values]
return dates
# for plotting histogram in terms of probability
# e.g. weights = np.ones_like(carryOut) / (len(carryOut))
# plt.hist(carryOut, bins=50, weights=weights)
def get_plot_weights(numerator, denominator = None):
if denominator != None:
return np.ones_like(numerator) / (len(denominator))
else:
return np.ones_like(numerator) / (len(numerator))
def print_stat(series1, series2):
print(' Min 1st Qu. Median Mean 3rd Qu. Max SD')
print('article: {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'.format(
series1.min(), np.quantile(np.array(series1), 0.25), series1.median(),
series1.mean(),np.quantile(np.array(series1), 0.75), series1.max(),
series1.std())
)
print('comment: {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'.format(
series2.min(), np.quantile(np.array(series2), 0.25), series2.median(),
series2.mean(),np.quantile(np.array(series2), 0.75), series2.max(),
series2.std())
)
print()
| [
"json.dump",
"numpy.load",
"json.load",
"pandas.date_range",
"numpy.ones_like",
"nltk.stem.snowball.EnglishStemmer",
"numpy.max",
"numpy.min",
"numpy.array",
"pandas.to_datetime",
"nltk.corpus.stopwords.words",
"nltk.stem.snowball.GermanStemmer",
"nltk.download",
"re.sub",
"re.compile"
] | [((265, 291), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (278, 291), False, 'import nltk\n'), ((292, 314), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (305, 314), False, 'import nltk\n'), ((763, 789), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (778, 789), False, 'from nltk.corpus import stopwords\n'), ((802, 827), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""german"""'], {}), "('german')\n", (817, 827), False, 'from nltk.corpus import stopwords\n'), ((4614, 4730), 're.sub', 're.sub', (['"""<a[\\\\s\\\\w\\\\/~=#\\\\\'\\\\":.,@?;&%()+-]*\\\\>[\\\\s\\\\w\\\\/~=#\\\\"\\\\\':.,@?;&%()+-]*<\\\\/a>"""', '"""url"""', 'sentence'], {}), '(\n \'<a[\\\\s\\\\w\\\\/~=#\\\\\\\'\\\\":.,@?;&%()+-]*\\\\>[\\\\s\\\\w\\\\/~=#\\\\"\\\\\\\':.,@?;&%()+-]*<\\\\/a>\'\n , \'url\', sentence)\n', (4620, 4730), False, 'import re\n'), ((4806, 4837), 're.compile', 're.compile', (['"""(?u)\\\\b\\\\w\\\\w+\\\\b"""'], {}), "('(?u)\\\\b\\\\w\\\\w+\\\\b')\n", (4816, 4837), False, 'import re\n'), ((5104, 5115), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (5112, 5115), True, 'import numpy as np\n'), ((5696, 5768), 'json.dump', 'json.dump', (['input_list', 'output_file'], {'ensure_ascii': '(False)', 'cls': 'NumpyEncoder'}), '(input_list, output_file, ensure_ascii=False, cls=NumpyEncoder)\n', (5705, 5768), False, 'import json\n'), ((5992, 6026), 'pandas.to_datetime', 'pd.to_datetime', (['start_datetime_str'], {}), '(start_datetime_str)\n', (6006, 6026), True, 'import pandas as pd\n'), ((6044, 6076), 'pandas.to_datetime', 'pd.to_datetime', (['end_datetime_str'], {}), '(end_datetime_str)\n', (6058, 6076), True, 'import pandas as pd\n'), ((6587, 6648), 'pandas.date_range', 'pd.date_range', ([], {'start': 'start_datetime_str', 'end': 'end_datetime_str'}), '(start=start_datetime_str, end=end_datetime_str)\n', (6600, 6648), True, 'import pandas as pd\n'), ((3877, 3889), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3886, 3889), False, 'import json\n'), ((4262, 4274), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4271, 4274), False, 'import json\n'), ((4878, 4894), 'nltk.stem.snowball.EnglishStemmer', 'EnglishStemmer', ([], {}), '()\n', (4892, 4894), False, 'from nltk.stem.snowball import EnglishStemmer\n'), ((4917, 4932), 'nltk.stem.snowball.GermanStemmer', 'GermanStemmer', ([], {}), '()\n', (4930, 4932), False, 'from nltk.stem.snowball import GermanStemmer\n'), ((3672, 3685), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (3679, 3685), True, 'import numpy as np\n'), ((6966, 6989), 'numpy.ones_like', 'np.ones_like', (['numerator'], {}), '(numerator)\n', (6978, 6989), True, 'import numpy as np\n'), ((7030, 7053), 'numpy.ones_like', 'np.ones_like', (['numerator'], {}), '(numerator)\n', (7042, 7053), True, 'import numpy as np\n'), ((6285, 6298), 'numpy.max', 'np.max', (['years'], {}), '(years)\n', (6291, 6298), True, 'import numpy as np\n'), ((6301, 6314), 'numpy.min', 'np.min', (['years'], {}), '(years)\n', (6307, 6314), True, 'import numpy as np\n'), ((7296, 7313), 'numpy.array', 'np.array', (['series1'], {}), '(series1)\n', (7304, 7313), True, 'import numpy as np\n'), ((7371, 7388), 'numpy.array', 'np.array', (['series1'], {}), '(series1)\n', (7379, 7388), True, 'import numpy as np\n'), ((7548, 7565), 'numpy.array', 'np.array', (['series2'], {}), '(series2)\n', (7556, 7565), True, 'import numpy as np\n'), ((7623, 7640), 'numpy.array', 'np.array', (['series2'], {}), '(series2)\n', (7631, 7640), True, 'import numpy as np\n'), ((6336, 6349), 'numpy.min', 'np.min', (['years'], {}), '(years)\n', (6342, 6349), True, 'import numpy as np\n')] |
import os
import random
from copy import deepcopy
import pandas as pd
import logging
from tqdm import tqdm
import json
import glob
import re
from resemblyzer import VoiceEncoder
import traceback
import numpy as np
import pretty_midi
import librosa
from scipy.interpolate import interp1d
import torch
from textgrid import TextGrid
from utils.hparams import hparams
from data_gen.tts.data_gen_utils import build_phone_encoder, get_pitch
from utils.pitch_utils import f0_to_coarse
from data_gen.tts.base_binarizer import BaseBinarizer, BinarizationError
from data_gen.tts.binarizer_zh import ZhBinarizer
from data_gen.tts.txt_processors.zh_g2pM import ALL_YUNMU
from vocoders.base_vocoder import VOCODERS
class SingingBinarizer(BaseBinarizer):
def __init__(self, processed_data_dir=None):
if processed_data_dir is None:
processed_data_dir = hparams['processed_data_dir']
self.processed_data_dirs = processed_data_dir.split(",")
self.binarization_args = hparams['binarization_args']
self.pre_align_args = hparams['pre_align_args']
self.item2txt = {}
self.item2ph = {}
self.item2wavfn = {}
self.item2f0fn = {}
self.item2tgfn = {}
self.item2spk = {}
def split_train_test_set(self, item_names):
item_names = deepcopy(item_names)
test_item_names = [x for x in item_names if any([ts in x for ts in hparams['test_prefixes']])]
train_item_names = [x for x in item_names if x not in set(test_item_names)]
logging.info("train {}".format(len(train_item_names)))
logging.info("test {}".format(len(test_item_names)))
return train_item_names, test_item_names
def load_meta_data(self):
for ds_id, processed_data_dir in enumerate(self.processed_data_dirs):
wav_suffix = '_wf0.wav'
txt_suffix = '.txt'
ph_suffix = '_ph.txt'
tg_suffix = '.TextGrid'
all_wav_pieces = glob.glob(f'{processed_data_dir}/*/*{wav_suffix}')
for piece_path in all_wav_pieces:
item_name = raw_item_name = piece_path[len(processed_data_dir)+1:].replace('/', '-')[:-len(wav_suffix)]
if len(self.processed_data_dirs) > 1:
item_name = f'ds{ds_id}_{item_name}'
self.item2txt[item_name] = open(f'{piece_path.replace(wav_suffix, txt_suffix)}').readline()
self.item2ph[item_name] = open(f'{piece_path.replace(wav_suffix, ph_suffix)}').readline()
self.item2wavfn[item_name] = piece_path
self.item2spk[item_name] = re.split('-|#', piece_path.split('/')[-2])[0]
if len(self.processed_data_dirs) > 1:
self.item2spk[item_name] = f"ds{ds_id}_{self.item2spk[item_name]}"
self.item2tgfn[item_name] = piece_path.replace(wav_suffix, tg_suffix)
print('spkers: ', set(self.item2spk.values()))
self.item_names = sorted(list(self.item2txt.keys()))
if self.binarization_args['shuffle']:
random.seed(1234)
random.shuffle(self.item_names)
self._train_item_names, self._test_item_names = self.split_train_test_set(self.item_names)
@property
def train_item_names(self):
return self._train_item_names
@property
def valid_item_names(self):
return self._test_item_names
@property
def test_item_names(self):
return self._test_item_names
def process(self):
self.load_meta_data()
os.makedirs(hparams['binary_data_dir'], exist_ok=True)
self.spk_map = self.build_spk_map()
print("| spk_map: ", self.spk_map)
spk_map_fn = f"{hparams['binary_data_dir']}/spk_map.json"
json.dump(self.spk_map, open(spk_map_fn, 'w'))
self.phone_encoder = self._phone_encoder()
self.process_data('valid')
self.process_data('test')
self.process_data('train')
def _phone_encoder(self):
ph_set_fn = f"{hparams['binary_data_dir']}/phone_set.json"
ph_set = []
if hparams['reset_phone_dict'] or not os.path.exists(ph_set_fn):
for ph_sent in self.item2ph.values():
ph_set += ph_sent.split(' ')
ph_set = sorted(set(ph_set))
json.dump(ph_set, open(ph_set_fn, 'w'))
print("| Build phone set: ", ph_set)
else:
ph_set = json.load(open(ph_set_fn, 'r'))
print("| Load phone set: ", ph_set)
return build_phone_encoder(hparams['binary_data_dir'])
# @staticmethod
# def get_pitch(wav_fn, spec, res):
# wav_suffix = '_wf0.wav'
# f0_suffix = '_f0.npy'
# f0fn = wav_fn.replace(wav_suffix, f0_suffix)
# pitch_info = np.load(f0fn)
# f0 = [x[1] for x in pitch_info]
# spec_x_coor = np.arange(0, 1, 1 / len(spec))[:len(spec)]
# f0_x_coor = np.arange(0, 1, 1 / len(f0))[:len(f0)]
# f0 = interp1d(f0_x_coor, f0, 'nearest', fill_value='extrapolate')(spec_x_coor)[:len(spec)]
# # f0_x_coor = np.arange(0, 1, 1 / len(f0))
# # f0_x_coor[-1] = 1
# # f0 = interp1d(f0_x_coor, f0, 'nearest')(spec_x_coor)[:len(spec)]
# if sum(f0) == 0:
# raise BinarizationError("Empty f0")
# assert len(f0) == len(spec), (len(f0), len(spec))
# pitch_coarse = f0_to_coarse(f0)
#
# # vis f0
# # import matplotlib.pyplot as plt
# # from textgrid import TextGrid
# # tg_fn = wav_fn.replace(wav_suffix, '.TextGrid')
# # fig = plt.figure(figsize=(12, 6))
# # plt.pcolor(spec.T, vmin=-5, vmax=0)
# # ax = plt.gca()
# # ax2 = ax.twinx()
# # ax2.plot(f0, color='red')
# # ax2.set_ylim(0, 800)
# # itvs = TextGrid.fromFile(tg_fn)[0]
# # for itv in itvs:
# # x = itv.maxTime * hparams['audio_sample_rate'] / hparams['hop_size']
# # plt.vlines(x=x, ymin=0, ymax=80, color='black')
# # plt.text(x=x, y=20, s=itv.mark, color='black')
# # plt.savefig('tmp/20211229_singing_plots_test.png')
#
# res['f0'] = f0
# res['pitch'] = pitch_coarse
@classmethod
def process_item(cls, item_name, ph, txt, tg_fn, wav_fn, spk_id, encoder, binarization_args):
if hparams['vocoder'] in VOCODERS:
wav, mel = VOCODERS[hparams['vocoder']].wav2spec(wav_fn)
else:
wav, mel = VOCODERS[hparams['vocoder'].split('.')[-1]].wav2spec(wav_fn)
res = {
'item_name': item_name, 'txt': txt, 'ph': ph, 'mel': mel, 'wav': wav, 'wav_fn': wav_fn,
'sec': len(wav) / hparams['audio_sample_rate'], 'len': mel.shape[0], 'spk_id': spk_id
}
try:
if binarization_args['with_f0']:
# cls.get_pitch(wav_fn, mel, res)
cls.get_pitch(wav, mel, res)
if binarization_args['with_txt']:
try:
# print(ph)
phone_encoded = res['phone'] = encoder.encode(ph)
except:
traceback.print_exc()
raise BinarizationError(f"Empty phoneme")
if binarization_args['with_align']:
cls.get_align(tg_fn, ph, mel, phone_encoded, res)
except BinarizationError as e:
print(f"| Skip item ({e}). item_name: {item_name}, wav_fn: {wav_fn}")
return None
return res
class MidiSingingBinarizer(SingingBinarizer):
item2midi = {}
item2midi_dur = {}
item2is_slur = {}
item2ph_durs = {}
item2wdb = {}
def load_meta_data(self):
for ds_id, processed_data_dir in enumerate(self.processed_data_dirs):
meta_midi = json.load(open(os.path.join(processed_data_dir, 'meta.json'))) # [list of dict]
for song_item in meta_midi:
item_name = raw_item_name = song_item['item_name']
if len(self.processed_data_dirs) > 1:
item_name = f'ds{ds_id}_{item_name}'
self.item2wavfn[item_name] = song_item['wav_fn']
self.item2txt[item_name] = song_item['txt']
self.item2ph[item_name] = ' '.join(song_item['phs'])
self.item2wdb[item_name] = [1 if x in ALL_YUNMU + ['AP', 'SP', '<SIL>'] else 0 for x in song_item['phs']]
self.item2ph_durs[item_name] = song_item['ph_dur']
self.item2midi[item_name] = song_item['notes']
self.item2midi_dur[item_name] = song_item['notes_dur']
self.item2is_slur[item_name] = song_item['is_slur']
self.item2spk[item_name] = 'pop-cs'
if len(self.processed_data_dirs) > 1:
self.item2spk[item_name] = f"ds{ds_id}_{self.item2spk[item_name]}"
print('spkers: ', set(self.item2spk.values()))
self.item_names = sorted(list(self.item2txt.keys()))
if self.binarization_args['shuffle']:
random.seed(1234)
random.shuffle(self.item_names)
self._train_item_names, self._test_item_names = self.split_train_test_set(self.item_names)
@staticmethod
def get_pitch(wav_fn, wav, spec, ph, res):
wav_suffix = '.wav'
# midi_suffix = '.mid'
wav_dir = 'wavs'
f0_dir = 'f0'
item_name = '/'.join(os.path.splitext(wav_fn)[0].split('/')[-2:]).replace('_wf0', '')
res['pitch_midi'] = np.asarray(MidiSingingBinarizer.item2midi[item_name])
res['midi_dur'] = np.asarray(MidiSingingBinarizer.item2midi_dur[item_name])
res['is_slur'] = np.asarray(MidiSingingBinarizer.item2is_slur[item_name])
res['word_boundary'] = np.asarray(MidiSingingBinarizer.item2wdb[item_name])
assert res['pitch_midi'].shape == res['midi_dur'].shape == res['is_slur'].shape, (
res['pitch_midi'].shape, res['midi_dur'].shape, res['is_slur'].shape)
# gt f0.
gt_f0, gt_pitch_coarse = get_pitch(wav, spec, hparams)
if sum(gt_f0) == 0:
raise BinarizationError("Empty **gt** f0")
res['f0'] = gt_f0
res['pitch'] = gt_pitch_coarse
@staticmethod
def get_align(ph_durs, mel, phone_encoded, res, hop_size=hparams['hop_size'], audio_sample_rate=hparams['audio_sample_rate']):
mel2ph = np.zeros([mel.shape[0]], int)
startTime = 0
for i_ph in range(len(ph_durs)):
start_frame = int(startTime * audio_sample_rate / hop_size + 0.5)
end_frame = int((startTime + ph_durs[i_ph]) * audio_sample_rate / hop_size + 0.5)
mel2ph[start_frame:end_frame] = i_ph + 1
startTime = startTime + ph_durs[i_ph]
# print('ph durs: ', ph_durs)
# print('mel2ph: ', mel2ph, len(mel2ph))
res['mel2ph'] = mel2ph
# res['dur'] = None
@classmethod
def process_item(cls, item_name, ph, txt, tg_fn, wav_fn, spk_id, encoder, binarization_args):
if hparams['vocoder'] in VOCODERS:
wav, mel = VOCODERS[hparams['vocoder']].wav2spec(wav_fn)
else:
wav, mel = VOCODERS[hparams['vocoder'].split('.')[-1]].wav2spec(wav_fn)
res = {
'item_name': item_name, 'txt': txt, 'ph': ph, 'mel': mel, 'wav': wav, 'wav_fn': wav_fn,
'sec': len(wav) / hparams['audio_sample_rate'], 'len': mel.shape[0], 'spk_id': spk_id
}
try:
if binarization_args['with_f0']:
cls.get_pitch(wav_fn, wav, mel, ph, res)
if binarization_args['with_txt']:
try:
phone_encoded = res['phone'] = encoder.encode(ph)
except:
traceback.print_exc()
raise BinarizationError(f"Empty phoneme")
if binarization_args['with_align']:
cls.get_align(MidiSingingBinarizer.item2ph_durs[item_name], mel, phone_encoded, res)
except BinarizationError as e:
print(f"| Skip item ({e}). item_name: {item_name}, wav_fn: {wav_fn}")
return None
return res
class ZhSingingBinarizer(ZhBinarizer, SingingBinarizer):
pass
class OpencpopBinarizer(MidiSingingBinarizer):
item2midi = {}
item2midi_dur = {}
item2is_slur = {}
item2ph_durs = {}
item2wdb = {}
def split_train_test_set(self, item_names):
item_names = deepcopy(item_names)
test_item_names = [x for x in item_names if any([x.startswith(ts) for ts in hparams['test_prefixes']])]
train_item_names = [x for x in item_names if x not in set(test_item_names)]
logging.info("train {}".format(len(train_item_names)))
logging.info("test {}".format(len(test_item_names)))
return train_item_names, test_item_names
def load_meta_data(self):
raw_data_dir = hparams['raw_data_dir']
# meta_midi = json.load(open(os.path.join(raw_data_dir, 'meta.json'))) # [list of dict]
utterance_labels = open(os.path.join(raw_data_dir, 'transcriptions.txt')).readlines()
for utterance_label in utterance_labels:
song_info = utterance_label.split('|')
item_name = raw_item_name = song_info[0]
self.item2wavfn[item_name] = f'{raw_data_dir}/wavs/{item_name}.wav'
self.item2txt[item_name] = song_info[1]
self.item2ph[item_name] = song_info[2]
# self.item2wdb[item_name] = list(np.nonzero([1 if x in ALL_YUNMU + ['AP', 'SP'] else 0 for x in song_info[2].split()])[0])
self.item2wdb[item_name] = [1 if x in ALL_YUNMU + ['AP', 'SP'] else 0 for x in song_info[2].split()]
self.item2ph_durs[item_name] = [float(x) for x in song_info[5].split(" ")]
self.item2midi[item_name] = [librosa.note_to_midi(x.split("/")[0]) if x != 'rest' else 0
for x in song_info[3].split(" ")]
self.item2midi_dur[item_name] = [float(x) for x in song_info[4].split(" ")]
self.item2is_slur[item_name] = [int(x) for x in song_info[6].split(" ")]
self.item2spk[item_name] = 'opencpop'
print('spkers: ', set(self.item2spk.values()))
self.item_names = sorted(list(self.item2txt.keys()))
if self.binarization_args['shuffle']:
random.seed(1234)
random.shuffle(self.item_names)
self._train_item_names, self._test_item_names = self.split_train_test_set(self.item_names)
@staticmethod
def get_pitch(wav_fn, wav, spec, ph, res):
wav_suffix = '.wav'
# midi_suffix = '.mid'
wav_dir = 'wavs'
f0_dir = 'text_f0_align'
item_name = os.path.splitext(os.path.basename(wav_fn))[0]
res['pitch_midi'] = np.asarray(OpencpopBinarizer.item2midi[item_name])
res['midi_dur'] = np.asarray(OpencpopBinarizer.item2midi_dur[item_name])
res['is_slur'] = np.asarray(OpencpopBinarizer.item2is_slur[item_name])
res['word_boundary'] = np.asarray(OpencpopBinarizer.item2wdb[item_name])
assert res['pitch_midi'].shape == res['midi_dur'].shape == res['is_slur'].shape, (res['pitch_midi'].shape, res['midi_dur'].shape, res['is_slur'].shape)
# gt f0.
# f0 = None
# f0_suffix = '_f0.npy'
# f0fn = wav_fn.replace(wav_suffix, f0_suffix).replace(wav_dir, f0_dir)
# pitch_info = np.load(f0fn)
# f0 = [x[1] for x in pitch_info]
# spec_x_coor = np.arange(0, 1, 1 / len(spec))[:len(spec)]
#
# f0_x_coor = np.arange(0, 1, 1 / len(f0))[:len(f0)]
# f0 = interp1d(f0_x_coor, f0, 'nearest', fill_value='extrapolate')(spec_x_coor)[:len(spec)]
# if sum(f0) == 0:
# raise BinarizationError("Empty **gt** f0")
#
# pitch_coarse = f0_to_coarse(f0)
# res['f0'] = f0
# res['pitch'] = pitch_coarse
# gt f0.
gt_f0, gt_pitch_coarse = get_pitch(wav, spec, hparams)
if sum(gt_f0) == 0:
raise BinarizationError("Empty **gt** f0")
res['f0'] = gt_f0
res['pitch'] = gt_pitch_coarse
@classmethod
def process_item(cls, item_name, ph, txt, tg_fn, wav_fn, spk_id, encoder, binarization_args):
if hparams['vocoder'] in VOCODERS:
wav, mel = VOCODERS[hparams['vocoder']].wav2spec(wav_fn)
else:
wav, mel = VOCODERS[hparams['vocoder'].split('.')[-1]].wav2spec(wav_fn)
res = {
'item_name': item_name, 'txt': txt, 'ph': ph, 'mel': mel, 'wav': wav, 'wav_fn': wav_fn,
'sec': len(wav) / hparams['audio_sample_rate'], 'len': mel.shape[0], 'spk_id': spk_id
}
try:
if binarization_args['with_f0']:
cls.get_pitch(wav_fn, wav, mel, ph, res)
if binarization_args['with_txt']:
try:
phone_encoded = res['phone'] = encoder.encode(ph)
except:
traceback.print_exc()
raise BinarizationError(f"Empty phoneme")
if binarization_args['with_align']:
cls.get_align(OpencpopBinarizer.item2ph_durs[item_name], mel, phone_encoded, res)
except BinarizationError as e:
print(f"| Skip item ({e}). item_name: {item_name}, wav_fn: {wav_fn}")
return None
return res
if __name__ == "__main__":
SingingBinarizer().process()
| [
"copy.deepcopy",
"data_gen.tts.base_binarizer.BinarizationError",
"traceback.print_exc",
"os.makedirs",
"os.path.basename",
"data_gen.tts.data_gen_utils.build_phone_encoder",
"random.shuffle",
"numpy.asarray",
"data_gen.tts.data_gen_utils.get_pitch",
"numpy.zeros",
"os.path.exists",
"random.se... | [((1313, 1333), 'copy.deepcopy', 'deepcopy', (['item_names'], {}), '(item_names)\n', (1321, 1333), False, 'from copy import deepcopy\n'), ((3535, 3589), 'os.makedirs', 'os.makedirs', (["hparams['binary_data_dir']"], {'exist_ok': '(True)'}), "(hparams['binary_data_dir'], exist_ok=True)\n", (3546, 3589), False, 'import os\n'), ((4512, 4559), 'data_gen.tts.data_gen_utils.build_phone_encoder', 'build_phone_encoder', (["hparams['binary_data_dir']"], {}), "(hparams['binary_data_dir'])\n", (4531, 4559), False, 'from data_gen.tts.data_gen_utils import build_phone_encoder, get_pitch\n'), ((9484, 9537), 'numpy.asarray', 'np.asarray', (['MidiSingingBinarizer.item2midi[item_name]'], {}), '(MidiSingingBinarizer.item2midi[item_name])\n', (9494, 9537), True, 'import numpy as np\n'), ((9564, 9621), 'numpy.asarray', 'np.asarray', (['MidiSingingBinarizer.item2midi_dur[item_name]'], {}), '(MidiSingingBinarizer.item2midi_dur[item_name])\n', (9574, 9621), True, 'import numpy as np\n'), ((9647, 9703), 'numpy.asarray', 'np.asarray', (['MidiSingingBinarizer.item2is_slur[item_name]'], {}), '(MidiSingingBinarizer.item2is_slur[item_name])\n', (9657, 9703), True, 'import numpy as np\n'), ((9735, 9787), 'numpy.asarray', 'np.asarray', (['MidiSingingBinarizer.item2wdb[item_name]'], {}), '(MidiSingingBinarizer.item2wdb[item_name])\n', (9745, 9787), True, 'import numpy as np\n'), ((10008, 10037), 'data_gen.tts.data_gen_utils.get_pitch', 'get_pitch', (['wav', 'spec', 'hparams'], {}), '(wav, spec, hparams)\n', (10017, 10037), False, 'from data_gen.tts.data_gen_utils import build_phone_encoder, get_pitch\n'), ((10353, 10382), 'numpy.zeros', 'np.zeros', (['[mel.shape[0]]', 'int'], {}), '([mel.shape[0]], int)\n', (10361, 10382), True, 'import numpy as np\n'), ((12411, 12431), 'copy.deepcopy', 'deepcopy', (['item_names'], {}), '(item_names)\n', (12419, 12431), False, 'from copy import deepcopy\n'), ((14753, 14803), 'numpy.asarray', 'np.asarray', (['OpencpopBinarizer.item2midi[item_name]'], {}), '(OpencpopBinarizer.item2midi[item_name])\n', (14763, 14803), True, 'import numpy as np\n'), ((14830, 14884), 'numpy.asarray', 'np.asarray', (['OpencpopBinarizer.item2midi_dur[item_name]'], {}), '(OpencpopBinarizer.item2midi_dur[item_name])\n', (14840, 14884), True, 'import numpy as np\n'), ((14910, 14963), 'numpy.asarray', 'np.asarray', (['OpencpopBinarizer.item2is_slur[item_name]'], {}), '(OpencpopBinarizer.item2is_slur[item_name])\n', (14920, 14963), True, 'import numpy as np\n'), ((14995, 15044), 'numpy.asarray', 'np.asarray', (['OpencpopBinarizer.item2wdb[item_name]'], {}), '(OpencpopBinarizer.item2wdb[item_name])\n', (15005, 15044), True, 'import numpy as np\n'), ((15923, 15952), 'data_gen.tts.data_gen_utils.get_pitch', 'get_pitch', (['wav', 'spec', 'hparams'], {}), '(wav, spec, hparams)\n', (15932, 15952), False, 'from data_gen.tts.data_gen_utils import build_phone_encoder, get_pitch\n'), ((1970, 2020), 'glob.glob', 'glob.glob', (['f"""{processed_data_dir}/*/*{wav_suffix}"""'], {}), "(f'{processed_data_dir}/*/*{wav_suffix}')\n", (1979, 2020), False, 'import glob\n'), ((3060, 3077), 'random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (3071, 3077), False, 'import random\n'), ((3090, 3121), 'random.shuffle', 'random.shuffle', (['self.item_names'], {}), '(self.item_names)\n', (3104, 3121), False, 'import random\n'), ((9028, 9045), 'random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (9039, 9045), False, 'import random\n'), ((9058, 9089), 'random.shuffle', 'random.shuffle', (['self.item_names'], {}), '(self.item_names)\n', (9072, 9089), False, 'import random\n'), ((10084, 10120), 'data_gen.tts.base_binarizer.BinarizationError', 'BinarizationError', (['"""Empty **gt** f0"""'], {}), "('Empty **gt** f0')\n", (10101, 10120), False, 'from data_gen.tts.base_binarizer import BaseBinarizer, BinarizationError\n'), ((14314, 14331), 'random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (14325, 14331), False, 'import random\n'), ((14344, 14375), 'random.shuffle', 'random.shuffle', (['self.item_names'], {}), '(self.item_names)\n', (14358, 14375), False, 'import random\n'), ((15999, 16035), 'data_gen.tts.base_binarizer.BinarizationError', 'BinarizationError', (['"""Empty **gt** f0"""'], {}), "('Empty **gt** f0')\n", (16016, 16035), False, 'from data_gen.tts.base_binarizer import BaseBinarizer, BinarizationError\n'), ((4118, 4143), 'os.path.exists', 'os.path.exists', (['ph_set_fn'], {}), '(ph_set_fn)\n', (4132, 4143), False, 'import os\n'), ((14696, 14720), 'os.path.basename', 'os.path.basename', (['wav_fn'], {}), '(wav_fn)\n', (14712, 14720), False, 'import os\n'), ((7787, 7832), 'os.path.join', 'os.path.join', (['processed_data_dir', '"""meta.json"""'], {}), "(processed_data_dir, 'meta.json')\n", (7799, 7832), False, 'import os\n'), ((13009, 13057), 'os.path.join', 'os.path.join', (['raw_data_dir', '"""transcriptions.txt"""'], {}), "(raw_data_dir, 'transcriptions.txt')\n", (13021, 13057), False, 'import os\n'), ((7117, 7138), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7136, 7138), False, 'import traceback\n'), ((7165, 7200), 'data_gen.tts.base_binarizer.BinarizationError', 'BinarizationError', (['f"""Empty phoneme"""'], {}), "(f'Empty phoneme')\n", (7182, 7200), False, 'from data_gen.tts.base_binarizer import BaseBinarizer, BinarizationError\n'), ((11715, 11736), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (11734, 11736), False, 'import traceback\n'), ((11763, 11798), 'data_gen.tts.base_binarizer.BinarizationError', 'BinarizationError', (['f"""Empty phoneme"""'], {}), "(f'Empty phoneme')\n", (11780, 11798), False, 'from data_gen.tts.base_binarizer import BaseBinarizer, BinarizationError\n'), ((16947, 16968), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (16966, 16968), False, 'import traceback\n'), ((16995, 17030), 'data_gen.tts.base_binarizer.BinarizationError', 'BinarizationError', (['f"""Empty phoneme"""'], {}), "(f'Empty phoneme')\n", (17012, 17030), False, 'from data_gen.tts.base_binarizer import BaseBinarizer, BinarizationError\n'), ((9391, 9415), 'os.path.splitext', 'os.path.splitext', (['wav_fn'], {}), '(wav_fn)\n', (9407, 9415), False, 'import os\n')] |
import json
import numpy as np
import keras
import keras.preprocessing.text as kpt
from keras.preprocessing.text import Tokenizer
from keras.models import model_from_json
tokenizer = Tokenizer()
wine_training = np.genfromtxt('ranked_wine.csv', delimiter=',', skip_header=1,usecols=(1,3),dtype=None, filling_values= 0, invalid_raise=False, encoding=None)
train_x = [x[0] for x in wine_training]
tokenizer.fit_on_texts(texts=train_x)
labels = ['positive', 'neutral', 'negative']
with open('dictionary.json', 'r') as dictionary_file:
dictionary = json.load(dictionary_file)
def convert_text_to_index_array(text):
words = kpt.text_to_word_sequence(text)
wordIndices = []
for word in words:
if word in dictionary:
wordIndices.append(dictionary[word])
else:
print("'%s' not in training corpus; ignoring." %(word))
return wordIndices
json_file = open('tipsy_model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
model.load_weights('tipsy_model.h5')
while 1:
evalSentence = input('Input a sentence to be evaluated, or Enter to quit: ')
if len(evalSentence) == 0:
break
testArr = convert_text_to_index_array(evalSentence)
input = tokenizer.sequences_to_matrix([testArr], mode='binary')
# Takes my new string and scores it!
pred = model.predict(input)
print(pred, "%s sentiment; %f%% confidence" % (labels[np.argmax(pred)], pred[0][np.argmax(pred)]*100)) | [
"json.load",
"numpy.argmax",
"numpy.genfromtxt",
"keras.preprocessing.text.Tokenizer",
"keras.models.model_from_json",
"keras.preprocessing.text.text_to_word_sequence"
] | [((185, 196), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {}), '()\n', (194, 196), False, 'from keras.preprocessing.text import Tokenizer\n'), ((213, 362), 'numpy.genfromtxt', 'np.genfromtxt', (['"""ranked_wine.csv"""'], {'delimiter': '""","""', 'skip_header': '(1)', 'usecols': '(1, 3)', 'dtype': 'None', 'filling_values': '(0)', 'invalid_raise': '(False)', 'encoding': 'None'}), "('ranked_wine.csv', delimiter=',', skip_header=1, usecols=(1, \n 3), dtype=None, filling_values=0, invalid_raise=False, encoding=None)\n", (226, 362), True, 'import numpy as np\n'), ((973, 1007), 'keras.models.model_from_json', 'model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (988, 1007), False, 'from keras.models import model_from_json\n'), ((551, 577), 'json.load', 'json.load', (['dictionary_file'], {}), '(dictionary_file)\n', (560, 577), False, 'import json\n'), ((629, 660), 'keras.preprocessing.text.text_to_word_sequence', 'kpt.text_to_word_sequence', (['text'], {}), '(text)\n', (654, 660), True, 'import keras.preprocessing.text as kpt\n'), ((1425, 1440), 'numpy.argmax', 'np.argmax', (['pred'], {}), '(pred)\n', (1434, 1440), True, 'import numpy as np\n'), ((1451, 1466), 'numpy.argmax', 'np.argmax', (['pred'], {}), '(pred)\n', (1460, 1466), True, 'import numpy as np\n')] |
from sklearn import datasets
from sklearn import svm
import numpy as np
from sklearn import random_projection
import matplotlib.pyplot as plt
def ex1(): # loading an example dataset
iris = datasets.load_iris()
digits = datasets.load_digits()
clf = svm.SVC(gamma=0.001, C=100.)
clf.fit(digits.data[:-1], digits.target[:-1])
def ex2(): # Conventions: dtype
rng = np.random.RandomState(0)
X = rng.rand(10, 2000)
X = np.array(X, dtype='float32')
transformer = random_projection.GaussianRandomProjection()
X_new = transformer.fit_transform(X)
def ex3():
iris = datasets.load_iris()
iris_X = iris.data
iris_Y = iris.target
np.unique(iris_Y)
def ex4():
iris = datasets.load_iris()
x = iris.data[:, :2]
y = iris.target
h = .02
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(x, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(x, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(x, y)
lin_svc = svm.LinearSVC(C=C).fit(x, y)
x_min, x_max = x[:, 0].min() - 1, x[:, 0].max() + 1
y_min, y_max = x[:, 1].min() - 1, x[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
titles = ['SVC with linear kernel', 'LinearSVC (linear kernel)', 'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8)
plt.scatter(x[:, 0], x[:, 1], c=y, cmap=plt.cm.coolwarm)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
if __name__ == '__main__':
ex4()
| [
"matplotlib.pyplot.title",
"sklearn.datasets.load_iris",
"sklearn.datasets.load_digits",
"numpy.arange",
"matplotlib.pyplot.contourf",
"sklearn.svm.SVC",
"numpy.unique",
"sklearn.random_projection.GaussianRandomProjection",
"matplotlib.pyplot.yticks",
"numpy.random.RandomState",
"sklearn.svm.Lin... | [((196, 216), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (214, 216), False, 'from sklearn import datasets\n'), ((230, 252), 'sklearn.datasets.load_digits', 'datasets.load_digits', ([], {}), '()\n', (250, 252), False, 'from sklearn import datasets\n'), ((263, 292), 'sklearn.svm.SVC', 'svm.SVC', ([], {'gamma': '(0.001)', 'C': '(100.0)'}), '(gamma=0.001, C=100.0)\n', (270, 292), False, 'from sklearn import svm\n'), ((387, 411), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (408, 411), True, 'import numpy as np\n'), ((447, 475), 'numpy.array', 'np.array', (['X'], {'dtype': '"""float32"""'}), "(X, dtype='float32')\n", (455, 475), True, 'import numpy as np\n'), ((494, 538), 'sklearn.random_projection.GaussianRandomProjection', 'random_projection.GaussianRandomProjection', ([], {}), '()\n', (536, 538), False, 'from sklearn import random_projection\n'), ((604, 624), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (622, 624), False, 'from sklearn import datasets\n'), ((677, 694), 'numpy.unique', 'np.unique', (['iris_Y'], {}), '(iris_Y)\n', (686, 694), True, 'import numpy as np\n'), ((719, 739), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (737, 739), False, 'from sklearn import datasets\n'), ((1994, 2004), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2002, 2004), True, 'import matplotlib.pyplot as plt\n'), ((1198, 1224), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'h'], {}), '(x_min, x_max, h)\n', (1207, 1224), True, 'import numpy as np\n'), ((1226, 1252), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'h'], {}), '(y_min, y_max, h)\n', (1235, 1252), True, 'import numpy as np\n'), ((1475, 1499), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(i + 1)'], {}), '(2, 2, i + 1)\n', (1486, 1499), True, 'import matplotlib.pyplot as plt\n'), ((1508, 1551), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.4)', 'hspace': '(0.4)'}), '(wspace=0.4, hspace=0.4)\n', (1527, 1551), True, 'import matplotlib.pyplot as plt\n'), ((1649, 1705), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx', 'yy', 'Z'], {'cmap': 'plt.cm.coolwarm', 'alpha': '(0.8)'}), '(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8)\n', (1661, 1705), True, 'import matplotlib.pyplot as plt\n'), ((1715, 1771), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[:, 0]', 'x[:, 1]'], {'c': 'y', 'cmap': 'plt.cm.coolwarm'}), '(x[:, 0], x[:, 1], c=y, cmap=plt.cm.coolwarm)\n', (1726, 1771), True, 'import matplotlib.pyplot as plt\n'), ((1780, 1806), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sepal length"""'], {}), "('Sepal length')\n", (1790, 1806), True, 'import matplotlib.pyplot as plt\n'), ((1815, 1840), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sepal width"""'], {}), "('Sepal width')\n", (1825, 1840), True, 'import matplotlib.pyplot as plt\n'), ((1923, 1937), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (1933, 1937), True, 'import matplotlib.pyplot as plt\n'), ((1946, 1960), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (1956, 1960), True, 'import matplotlib.pyplot as plt\n'), ((1969, 1989), 'matplotlib.pyplot.title', 'plt.title', (['titles[i]'], {}), '(titles[i])\n', (1978, 1989), True, 'import matplotlib.pyplot as plt\n'), ((852, 881), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""linear"""', 'C': 'C'}), "(kernel='linear', C=C)\n", (859, 881), False, 'from sklearn import svm\n'), ((906, 943), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""rbf"""', 'gamma': '(0.7)', 'C': 'C'}), "(kernel='rbf', gamma=0.7, C=C)\n", (913, 943), False, 'from sklearn import svm\n'), ((969, 1006), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""poly"""', 'degree': '(3)', 'C': 'C'}), "(kernel='poly', degree=3, C=C)\n", (976, 1006), False, 'from sklearn import svm\n'), ((1031, 1049), 'sklearn.svm.LinearSVC', 'svm.LinearSVC', ([], {'C': 'C'}), '(C=C)\n', (1044, 1049), False, 'from sklearn import svm\n')] |
#!/usr/bin/env python3
# Simple k-means implementation with animated plot.
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.datasets import make_classification
sns.set(style="white", color_codes=True)
customPalette = pd.Series(['#630C3A', '#39C8C6', '#D3500C'])
labelPalette = pd.Series(['#EEEE99', '#DDDD99','#FFB139'])
N_CLASSES=3
N_FEATURES=2
X, y = make_classification(n_samples=500,
n_features=N_FEATURES,
n_informative=2,
n_redundant=0,
n_repeated=0,
n_classes=N_CLASSES,
n_clusters_per_class=1,
class_sep=2.0,
hypercube=True,
shuffle=False)
X1 = X[:, 0]
X2 = X[:, 1]
df = pd.DataFrame({'x1': X1, 'x2': X2, 'true_label': y, 'cur_label': y})
centroids = np.random.randn(N_CLASSES, N_FEATURES)
cent_labels = np.arange(0, N_CLASSES, 1)
cen_df = pd.DataFrame({'x1': centroids[:, 0], 'x2': centroids[:, 1], 'cur_label': cent_labels})
for i in range(20):
print('iteration', i)
plt.cla()
plt.clf()
plt.scatter(x=df['x1'], y=df['x2'], color=customPalette[df['cur_label']])
plt.scatter(x=cen_df['x1'], y=cen_df['x2'], color=labelPalette[cen_df['cur_label']])
plt.pause(0.1)
def pick_closest(row):
# K-means happens here. NOTE: `pick_closest()` is nested since it relies on updating `cen_df`.
return np.argmin(np.sum(np.sqrt(np.square(cen_df[['x1', 'x2']] - row)), axis=1))
df['cur_label'] = df[['x1', 'x2']].apply(pick_closest, axis=1)
# recompute centroids
cen_df = df.groupby(['cur_label'])['x1', 'x2'].mean().reset_index()
# F-score won't work, but V-measure will, because it is independent of the absolute values of the labels.
from sklearn.metrics import v_measure_score
print('V-measure:', v_measure_score(labels_true=df['true_label'], labels_pred=df['cur_label']))
| [
"pandas.DataFrame",
"numpy.random.randn",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.scatter",
"sklearn.metrics.v_measure_score",
"numpy.square",
"sklearn.datasets.make_classification",
"numpy.arange",
"pandas.Series",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.pause",
"seaborn.set"
] | [((220, 260), 'seaborn.set', 'sns.set', ([], {'style': '"""white"""', 'color_codes': '(True)'}), "(style='white', color_codes=True)\n", (227, 260), True, 'import seaborn as sns\n'), ((277, 321), 'pandas.Series', 'pd.Series', (["['#630C3A', '#39C8C6', '#D3500C']"], {}), "(['#630C3A', '#39C8C6', '#D3500C'])\n", (286, 321), True, 'import pandas as pd\n'), ((337, 381), 'pandas.Series', 'pd.Series', (["['#EEEE99', '#DDDD99', '#FFB139']"], {}), "(['#EEEE99', '#DDDD99', '#FFB139'])\n", (346, 381), True, 'import pandas as pd\n'), ((415, 618), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(500)', 'n_features': 'N_FEATURES', 'n_informative': '(2)', 'n_redundant': '(0)', 'n_repeated': '(0)', 'n_classes': 'N_CLASSES', 'n_clusters_per_class': '(1)', 'class_sep': '(2.0)', 'hypercube': '(True)', 'shuffle': '(False)'}), '(n_samples=500, n_features=N_FEATURES, n_informative=2,\n n_redundant=0, n_repeated=0, n_classes=N_CLASSES, n_clusters_per_class=\n 1, class_sep=2.0, hypercube=True, shuffle=False)\n', (434, 618), False, 'from sklearn.datasets import make_classification\n'), ((886, 953), 'pandas.DataFrame', 'pd.DataFrame', (["{'x1': X1, 'x2': X2, 'true_label': y, 'cur_label': y}"], {}), "({'x1': X1, 'x2': X2, 'true_label': y, 'cur_label': y})\n", (898, 953), True, 'import pandas as pd\n'), ((967, 1005), 'numpy.random.randn', 'np.random.randn', (['N_CLASSES', 'N_FEATURES'], {}), '(N_CLASSES, N_FEATURES)\n', (982, 1005), True, 'import numpy as np\n'), ((1020, 1046), 'numpy.arange', 'np.arange', (['(0)', 'N_CLASSES', '(1)'], {}), '(0, N_CLASSES, 1)\n', (1029, 1046), True, 'import numpy as np\n'), ((1056, 1146), 'pandas.DataFrame', 'pd.DataFrame', (["{'x1': centroids[:, 0], 'x2': centroids[:, 1], 'cur_label': cent_labels}"], {}), "({'x1': centroids[:, 0], 'x2': centroids[:, 1], 'cur_label':\n cent_labels})\n", (1068, 1146), True, 'import pandas as pd\n'), ((1195, 1204), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (1202, 1204), True, 'import matplotlib.pyplot as plt\n'), ((1209, 1218), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1216, 1218), True, 'import matplotlib.pyplot as plt\n'), ((1223, 1296), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': "df['x1']", 'y': "df['x2']", 'color': "customPalette[df['cur_label']]"}), "(x=df['x1'], y=df['x2'], color=customPalette[df['cur_label']])\n", (1234, 1296), True, 'import matplotlib.pyplot as plt\n'), ((1309, 1398), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': "cen_df['x1']", 'y': "cen_df['x2']", 'color': "labelPalette[cen_df['cur_label']]"}), "(x=cen_df['x1'], y=cen_df['x2'], color=labelPalette[cen_df[\n 'cur_label']])\n", (1320, 1398), True, 'import matplotlib.pyplot as plt\n'), ((1398, 1412), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (1407, 1412), True, 'import matplotlib.pyplot as plt\n'), ((1970, 2044), 'sklearn.metrics.v_measure_score', 'v_measure_score', ([], {'labels_true': "df['true_label']", 'labels_pred': "df['cur_label']"}), "(labels_true=df['true_label'], labels_pred=df['cur_label'])\n", (1985, 2044), False, 'from sklearn.metrics import v_measure_score\n'), ((1584, 1621), 'numpy.square', 'np.square', (["(cen_df[['x1', 'x2']] - row)"], {}), "(cen_df[['x1', 'x2']] - row)\n", (1593, 1621), True, 'import numpy as np\n')] |
import numpy as np
from laserchicken.feature_extractor.base_feature_extractor import FeatureExtractor
from laserchicken.keys import point
class MeanStdCoeffFeatureExtractor(FeatureExtractor):
"""Calculates mean, standard deviation and the ratio between the two."""
def __init__(self, data_key='z'):
self.data_key = data_key
@classmethod
def requires(cls):
return []
def provides(self):
base_names = ['mean_', 'std_', 'coeff_var_']
return [base + str(self.data_key) for base in base_names]
def extract(self, point_cloud, neighborhoods, target_point_cloud, target_indices, volume_description):
return np.array([self._extract_one(point_cloud, neighborhood) for neighborhood in neighborhoods]).T
def _extract_one(self, sourcepc, neighborhood):
if neighborhood:
z = sourcepc[point][self.data_key]['data'][neighborhood]
mean_z = np.mean(z)
std_z = np.std(z)
coeff_var_z = std_z / mean_z
else:
mean_z = std_z = coeff_var_z = np.NaN
return mean_z, std_z, coeff_var_z
| [
"numpy.std",
"numpy.mean"
] | [((930, 940), 'numpy.mean', 'np.mean', (['z'], {}), '(z)\n', (937, 940), True, 'import numpy as np\n'), ((961, 970), 'numpy.std', 'np.std', (['z'], {}), '(z)\n', (967, 970), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import yaml
import pydmps
import rospy
import sys
import tf
import std_msgs
import numpy as np
from os.path import join
from geometry_msgs.msg import PoseStamped
from nav_msgs.msg import Path
from ros_dmp.srv import *
class LearnDmp:
def __init__(self):
'''Ros interface for learning DMP
Initializes the learn DMP service
'''
rospy.init_node("learn_dynamic_motion_primitive_service")
service_ = rospy.Service('learn_dynamic_motion_primitive_service',
LearnDMP, self.learn_dmp_handler)
rospy.loginfo("Started learn DMP service")
# Publishers
self.imitated_path_pub = rospy.Publisher("~imitated_path", Path, queue_size=1)
self.demonstrated_path_pub = rospy.Publisher("~demonstrated_path", Path, queue_size=1)
# Parameters
self.weights_file_path = rospy.get_param('~weights_file_path', '../../data/weights/')
loop_rate = rospy.get_param('~loop_rate')
self.result = ""
r = rospy.Rate(loop_rate)
rospy.spin()
def learn_dmp_handler(self, req):
'''Handler for client request
req: service request msg
'''
rospy.loginfo("Recieved request to learn a motion primitive")
trajectory = np.zeros((6, len(req.poses)))
rospy.loginfo("Learning motion primitive " + req.dmp_name)
for i in range(len(req.poses)):
rpy = tf.transformations.euler_from_quaternion([req.poses[i].orientation.x,
req.poses[i].orientation.y,
req.poses[i].orientation.z,
req.poses[i].orientation.w])
trajectory[:, i] = [req.poses[i].position.x, req.poses[i].position.y,
req.poses[i].position.z, rpy[0], rpy[1], rpy[2]]
self.learn_dmp(trajectory, req.output_weight_file_name, req.n_dmps, req.n_bfs)
rospy.loginfo("Successfully learned the motion primitive")
# Return response
response = LearnDMPResponse()
response.result = self.result
return response
def learn_dmp(self, trajectory, file_name, n_dmps=6, n_bfs=50):
"""This function learns dmp weights and stores them in desired file
trajectory: Matrix containing trajectory
file_name: Name of file in which weights will be stored
n_dmps: Number of dimmensions (6 default for cartesian trajectory)
n_bfs: Number of basis functions to be used
"""
demonstrated_trajectory = trajectory.copy()
demonstrated_goal_pose = demonstrated_trajectory[:, -1]
demonstrated_initial_pose = demonstrated_trajectory[:, 0]
# Removing bias from the data. (Start position is zero now)
trajectory -= trajectory[:, 0][:, None]
# Initiating DMP
self.dmp = pydmps.dmp_discrete.DMPs_discrete(n_dmps=n_dmps, n_bfs=n_bfs, ay=None)
# Learn weights
weights = self.dmp.imitate_path(y_des=trajectory)
#save weights to desired file
data = {'x': np.asarray(weights[0, :]).tolist(), 'y': np.asarray(weights[1, :]).tolist(),
'z': np.asarray(weights[2, :]).tolist(), 'roll': np.asarray(weights[3, :]).tolist(),
'pitch': np.asarray(weights[4, :]).tolist(),
'yaw': np.asarray(weights[5, :]).tolist()}
file = join(self.weights_file_path, file_name)
try:
with open(file, "a+") as f:
yaml.dump(data, f)
self.result = "success"
except:
rospy.logerr("Cannot save weight file. Check if the directory of the weight file exists. Related parameter can be found in launch file.")
self.result = "failed"
# Imitate the same path as demonstrated
pos, vel, acc = self.dmp.rollout(goal=demonstrated_goal_pose, y0=demonstrated_initial_pose)
# Publish Imitated Path
imitated_path = Path()
imitated_path.header.frame_id = "/base_link"
for itr in range(pos.shape[0]):
pose_stamped = PoseStamped()
pose_stamped.pose.position.x = pos[itr, 0]
pose_stamped.pose.position.y = pos[itr, 1]
pose_stamped.pose.position.z = pos[itr, 2]
imitated_path.poses.append(pose_stamped)
self.imitated_path_pub.publish(imitated_path)
# Publish Demonstrated Path
demonstrated_path = Path()
demonstrated_path.header.frame_id = "/base_link"
for itr in range(demonstrated_trajectory.shape[1]):
pose_stamped = PoseStamped()
pose_stamped.pose.position.x = demonstrated_trajectory[0, itr]
pose_stamped.pose.position.y = demonstrated_trajectory[1, itr]
pose_stamped.pose.position.z = demonstrated_trajectory[2, itr]
demonstrated_path.poses.append(pose_stamped)
self.demonstrated_path_pub.publish(demonstrated_path)
| [
"geometry_msgs.msg.PoseStamped",
"rospy.logerr",
"nav_msgs.msg.Path",
"numpy.asarray",
"yaml.dump",
"rospy.Publisher",
"rospy.Rate",
"pydmps.dmp_discrete.DMPs_discrete",
"rospy.loginfo",
"rospy.get_param",
"rospy.init_node",
"tf.transformations.euler_from_quaternion",
"rospy.spin",
"rospy.... | [((387, 444), 'rospy.init_node', 'rospy.init_node', (['"""learn_dynamic_motion_primitive_service"""'], {}), "('learn_dynamic_motion_primitive_service')\n", (402, 444), False, 'import rospy\n'), ((464, 558), 'rospy.Service', 'rospy.Service', (['"""learn_dynamic_motion_primitive_service"""', 'LearnDMP', 'self.learn_dmp_handler'], {}), "('learn_dynamic_motion_primitive_service', LearnDMP, self.\n learn_dmp_handler)\n", (477, 558), False, 'import rospy\n'), ((595, 637), 'rospy.loginfo', 'rospy.loginfo', (['"""Started learn DMP service"""'], {}), "('Started learn DMP service')\n", (608, 637), False, 'import rospy\n'), ((693, 746), 'rospy.Publisher', 'rospy.Publisher', (['"""~imitated_path"""', 'Path'], {'queue_size': '(1)'}), "('~imitated_path', Path, queue_size=1)\n", (708, 746), False, 'import rospy\n'), ((784, 841), 'rospy.Publisher', 'rospy.Publisher', (['"""~demonstrated_path"""', 'Path'], {'queue_size': '(1)'}), "('~demonstrated_path', Path, queue_size=1)\n", (799, 841), False, 'import rospy\n'), ((897, 957), 'rospy.get_param', 'rospy.get_param', (['"""~weights_file_path"""', '"""../../data/weights/"""'], {}), "('~weights_file_path', '../../data/weights/')\n", (912, 957), False, 'import rospy\n'), ((978, 1007), 'rospy.get_param', 'rospy.get_param', (['"""~loop_rate"""'], {}), "('~loop_rate')\n", (993, 1007), False, 'import rospy\n'), ((1046, 1067), 'rospy.Rate', 'rospy.Rate', (['loop_rate'], {}), '(loop_rate)\n', (1056, 1067), False, 'import rospy\n'), ((1076, 1088), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1086, 1088), False, 'import rospy\n'), ((1220, 1281), 'rospy.loginfo', 'rospy.loginfo', (['"""Recieved request to learn a motion primitive"""'], {}), "('Recieved request to learn a motion primitive')\n", (1233, 1281), False, 'import rospy\n'), ((1341, 1399), 'rospy.loginfo', 'rospy.loginfo', (["('Learning motion primitive ' + req.dmp_name)"], {}), "('Learning motion primitive ' + req.dmp_name)\n", (1354, 1399), False, 'import rospy\n'), ((2051, 2109), 'rospy.loginfo', 'rospy.loginfo', (['"""Successfully learned the motion primitive"""'], {}), "('Successfully learned the motion primitive')\n", (2064, 2109), False, 'import rospy\n'), ((2979, 3049), 'pydmps.dmp_discrete.DMPs_discrete', 'pydmps.dmp_discrete.DMPs_discrete', ([], {'n_dmps': 'n_dmps', 'n_bfs': 'n_bfs', 'ay': 'None'}), '(n_dmps=n_dmps, n_bfs=n_bfs, ay=None)\n', (3012, 3049), False, 'import pydmps\n'), ((3506, 3545), 'os.path.join', 'join', (['self.weights_file_path', 'file_name'], {}), '(self.weights_file_path, file_name)\n', (3510, 3545), False, 'from os.path import join\n'), ((4077, 4083), 'nav_msgs.msg.Path', 'Path', ([], {}), '()\n', (4081, 4083), False, 'from nav_msgs.msg import Path\n'), ((4555, 4561), 'nav_msgs.msg.Path', 'Path', ([], {}), '()\n', (4559, 4561), False, 'from nav_msgs.msg import Path\n'), ((1458, 1622), 'tf.transformations.euler_from_quaternion', 'tf.transformations.euler_from_quaternion', (['[req.poses[i].orientation.x, req.poses[i].orientation.y, req.poses[i].\n orientation.z, req.poses[i].orientation.w]'], {}), '([req.poses[i].orientation.x, req.\n poses[i].orientation.y, req.poses[i].orientation.z, req.poses[i].\n orientation.w])\n', (1498, 1622), False, 'import tf\n'), ((4204, 4217), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (4215, 4217), False, 'from geometry_msgs.msg import PoseStamped\n'), ((4706, 4719), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (4717, 4719), False, 'from geometry_msgs.msg import PoseStamped\n'), ((3615, 3633), 'yaml.dump', 'yaml.dump', (['data', 'f'], {}), '(data, f)\n', (3624, 3633), False, 'import yaml\n'), ((3698, 3845), 'rospy.logerr', 'rospy.logerr', (['"""Cannot save weight file. Check if the directory of the weight file exists. Related parameter can be found in launch file."""'], {}), "(\n 'Cannot save weight file. Check if the directory of the weight file exists. Related parameter can be found in launch file.'\n )\n", (3710, 3845), False, 'import rospy\n'), ((3193, 3218), 'numpy.asarray', 'np.asarray', (['weights[0, :]'], {}), '(weights[0, :])\n', (3203, 3218), True, 'import numpy as np\n'), ((3234, 3259), 'numpy.asarray', 'np.asarray', (['weights[1, :]'], {}), '(weights[1, :])\n', (3244, 3259), True, 'import numpy as np\n'), ((3291, 3316), 'numpy.asarray', 'np.asarray', (['weights[2, :]'], {}), '(weights[2, :])\n', (3301, 3316), True, 'import numpy as np\n'), ((3335, 3360), 'numpy.asarray', 'np.asarray', (['weights[3, :]'], {}), '(weights[3, :])\n', (3345, 3360), True, 'import numpy as np\n'), ((3396, 3421), 'numpy.asarray', 'np.asarray', (['weights[4, :]'], {}), '(weights[4, :])\n', (3406, 3421), True, 'import numpy as np\n'), ((3455, 3480), 'numpy.asarray', 'np.asarray', (['weights[5, :]'], {}), '(weights[5, :])\n', (3465, 3480), True, 'import numpy as np\n')] |
from assembly import *
import matplotlib.pyplot as plt
from scipy.linalg import eig
import numpy as np
#
assemble_te = False
assemble_tn = True
#
# Compute the ground state for H2
#
N = 2 # number of electrons in molecule
halfN = 1
M = 2 # number of nuclei in molecule
# Position of nuclei
R = np.array([[0,0,0],[1.4,0,0]])
#
# Initialize computational mesh
#
level = 3
half_length = 5
L = 2*half_length
Mu, cells = mesh(half_length, level)
K = len(cells) # number of basis functions
#
# Initialize Wave functions
#
A = assemble_laplacian(Mu, L, level)
if assemble_tn:
# Compute nucleus-electron interaction matrix
print('Assembling nucleus-electron interaction matrix')
Tn = assemble_ne_interaction_matrix(R, cells)
np.save('Tn', Tn)
else:
Tn = np.load('Tn.npy')
if assemble_te:
# Electron-electron interaction matrix
print('Assembling electron-electron interaction matrix')
Te = assemble_ee_interaction_matrix(cells)
np.save('Te', Te)
else:
Te = np.load('Te.npy')
#
# Visualize matrices
#
fig, ax = plt.subplots(nrows=1, ncols=3)
ax[0].imshow(Te)
ax[1].imshow(Tn.todense())
ax[0].set_title('Electron-electron interactions')
ax[1].set_title('Electron-nuclear interactions')
#
# SCF iteration
#
print('starting SCF iteration')
C = np.ones((K, halfN))
max_iteration = 30
converged = False
tol = 1e-4
i = 0
e_old = np.zeros(K)
while i < max_iteration:
#
# Form Fock Matrix
#
CCT = C.dot(C.T)
J = 2*np.diag(CCT)*np.diag(Te)
K = CCT*Te
F = -0.5*A - Tn + J - K
#
# Check self-consistency F(C)C = C diag(e)
#
if i > 0:
error = F.dot(C) - C.dot(np.diag(e))
error_norm = np.linalg.norm(error)
"""
error = C_old - C
error_norm = np.linalg.norm(error)
"""
converged = error_norm < tol
print(error_norm, converged)
#
# Compute eigenvalues
#
C_old = C.copy()
e, C = eig(F)
i += 1
ax[2].imshow(F)
ax[2].set_title('Fock Matrix.')
plt.show() | [
"numpy.load",
"numpy.save",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.ones",
"scipy.linalg.eig",
"numpy.array",
"numpy.linalg.norm",
"numpy.diag",
"matplotlib.pyplot.subplots"
] | [((303, 337), 'numpy.array', 'np.array', (['[[0, 0, 0], [1.4, 0, 0]]'], {}), '([[0, 0, 0], [1.4, 0, 0]])\n', (311, 337), True, 'import numpy as np\n'), ((1065, 1095), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(3)'}), '(nrows=1, ncols=3)\n', (1077, 1095), True, 'import matplotlib.pyplot as plt\n'), ((1302, 1321), 'numpy.ones', 'np.ones', (['(K, halfN)'], {}), '((K, halfN))\n', (1309, 1321), True, 'import numpy as np\n'), ((1384, 1395), 'numpy.zeros', 'np.zeros', (['K'], {}), '(K)\n', (1392, 1395), True, 'import numpy as np\n'), ((2069, 2079), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2077, 2079), True, 'import matplotlib.pyplot as plt\n'), ((750, 767), 'numpy.save', 'np.save', (['"""Tn"""', 'Tn'], {}), "('Tn', Tn)\n", (757, 767), True, 'import numpy as np\n'), ((783, 800), 'numpy.load', 'np.load', (['"""Tn.npy"""'], {}), "('Tn.npy')\n", (790, 800), True, 'import numpy as np\n'), ((973, 990), 'numpy.save', 'np.save', (['"""Te"""', 'Te'], {}), "('Te', Te)\n", (980, 990), True, 'import numpy as np\n'), ((1006, 1023), 'numpy.load', 'np.load', (['"""Te.npy"""'], {}), "('Te.npy')\n", (1013, 1023), True, 'import numpy as np\n'), ((1993, 1999), 'scipy.linalg.eig', 'eig', (['F'], {}), '(F)\n', (1996, 1999), False, 'from scipy.linalg import eig\n'), ((1501, 1512), 'numpy.diag', 'np.diag', (['Te'], {}), '(Te)\n', (1508, 1512), True, 'import numpy as np\n'), ((1701, 1722), 'numpy.linalg.norm', 'np.linalg.norm', (['error'], {}), '(error)\n', (1715, 1722), True, 'import numpy as np\n'), ((1488, 1500), 'numpy.diag', 'np.diag', (['CCT'], {}), '(CCT)\n', (1495, 1500), True, 'import numpy as np\n'), ((1668, 1678), 'numpy.diag', 'np.diag', (['e'], {}), '(e)\n', (1675, 1678), True, 'import numpy as np\n')] |
import h5py
import click
import itertools as it
from pathlib import Path
from datetime import datetime
import numpy as np
from scipy import signal
def downsample_signal(
dataset: h5py.Dataset, ds_factor: int, block_len: int = None
):
# 8th order chebyshev filter for downsampling
flt = signal.iirfilter(
N=8,
Wn=1 / ds_factor,
btype="lowpass",
output="sos",
ftype="cheby1",
rp=3,
)
if block_len is None:
# Aim for 100 blocks
n_blocks = 100
block_len = int(len(dataset) / n_blocks)
else:
n_blocks = int(len(dataset) / block_len)
# filter state
z = np.zeros((flt.shape[0], 2))
block_ds_len = int(block_len / ds_factor)
sig_ds = np.empty((block_ds_len * n_blocks,))
for i in range(n_blocks):
y, z = signal.sosfilt(
flt, dataset[i * block_len : (i + 1) * block_len], zi=z
)
sig_ds[i * block_ds_len : (i + 1) * block_ds_len] = y[::ds_factor]
print(f"block {i+1}/{n_blocks} done")
return sig_ds
def downsample_time(time, ds_factor: int, block_len: int = 1000000):
n_blocks = int(len(time) / block_len)
block_ds_len = int(block_len / ds_factor)
time_ds = np.empty((block_ds_len * n_blocks,))
for i in range(n_blocks):
time_ds[i * block_ds_len : (i + 1) * block_ds_len] = time[
i * block_len : (i + 1) * block_len : ds_factor
]
return time_ds
@click.command()
@click.argument("infile", type=click.Path(exists=True))
@click.option("--outfile", "-o", type=click.Path())
@click.option("--sampling-rate", "-r", type=int, default=100)
def cli(infile, outfile, sampling_rate):
if outfile is None:
inpath = Path(infile)
outfile = inpath.parent / (inpath.stem + ".csv")
with h5py.File(infile, "r") as hf, open(outfile, "w") as csv_file:
ds_time = hf["data"]["time"]
fs_original = 1e9 / ((ds_time[10000] - ds_time[0]) / 10000)
print(f"original sampling rate: {int(fs_original/1000)}kHz")
ds_factor = int(fs_original / sampling_rate)
print(f"downsampling factor: {ds_factor}")
# Build the csv header, listing all variables
header = "time,voltage,current"
csv_file.write(header + "\n")
data_downsampled = dict()
# First downsample the time with 'interval' method
data_downsampled["time"] = downsample_time(
ds_time[:].astype(float) / 1e9, ds_factor, block_len=100000
)
for var in ["voltage", "current"]:
ds = hf["data"][var]
# Apply the calibration settings (gain and offset)
data_downsampled[var] = downsample_signal(
ds[:] * ds.attrs["gain"] + ds.attrs["offset"],
ds_factor,
block_len=100000,
)
for i in range(len(data_downsampled["time"])):
timestamp = datetime.utcfromtimestamp(data_downsampled["time"][i])
csv_file.write(timestamp.strftime("%Y-%m-%dT%H:%M:%S.%f"))
# Format and write to the csv file
for var in ["voltage", "current"]:
value = data_downsampled[var][i]
# Write the value to csv
csv_file.write(f",{value}")
# Done with this block - terminate line with \n
csv_file.write("\n")
if i % 1000 == 0:
click.echo(
f"written {100 * float(i)/len(data_downsampled['time']):.2f}%"
)
if __name__ == "__main__":
cli()
| [
"h5py.File",
"scipy.signal.sosfilt",
"numpy.empty",
"numpy.zeros",
"click.option",
"click.command",
"datetime.datetime.utcfromtimestamp",
"scipy.signal.iirfilter",
"pathlib.Path",
"click.Path"
] | [((1468, 1483), 'click.command', 'click.command', ([], {}), '()\n', (1481, 1483), False, 'import click\n'), ((1593, 1653), 'click.option', 'click.option', (['"""--sampling-rate"""', '"""-r"""'], {'type': 'int', 'default': '(100)'}), "('--sampling-rate', '-r', type=int, default=100)\n", (1605, 1653), False, 'import click\n'), ((300, 396), 'scipy.signal.iirfilter', 'signal.iirfilter', ([], {'N': '(8)', 'Wn': '(1 / ds_factor)', 'btype': '"""lowpass"""', 'output': '"""sos"""', 'ftype': '"""cheby1"""', 'rp': '(3)'}), "(N=8, Wn=1 / ds_factor, btype='lowpass', output='sos',\n ftype='cheby1', rp=3)\n", (316, 396), False, 'from scipy import signal\n'), ((663, 690), 'numpy.zeros', 'np.zeros', (['(flt.shape[0], 2)'], {}), '((flt.shape[0], 2))\n', (671, 690), True, 'import numpy as np\n'), ((751, 787), 'numpy.empty', 'np.empty', (['(block_ds_len * n_blocks,)'], {}), '((block_ds_len * n_blocks,))\n', (759, 787), True, 'import numpy as np\n'), ((1241, 1277), 'numpy.empty', 'np.empty', (['(block_ds_len * n_blocks,)'], {}), '((block_ds_len * n_blocks,))\n', (1249, 1277), True, 'import numpy as np\n'), ((833, 902), 'scipy.signal.sosfilt', 'signal.sosfilt', (['flt', 'dataset[i * block_len:(i + 1) * block_len]'], {'zi': 'z'}), '(flt, dataset[i * block_len:(i + 1) * block_len], zi=z)\n', (847, 902), False, 'from scipy import signal\n'), ((1737, 1749), 'pathlib.Path', 'Path', (['infile'], {}), '(infile)\n', (1741, 1749), False, 'from pathlib import Path\n'), ((1817, 1839), 'h5py.File', 'h5py.File', (['infile', '"""r"""'], {}), "(infile, 'r')\n", (1826, 1839), False, 'import h5py\n'), ((1515, 1538), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (1525, 1538), False, 'import click\n'), ((1578, 1590), 'click.Path', 'click.Path', ([], {}), '()\n', (1588, 1590), False, 'import click\n'), ((2931, 2985), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (["data_downsampled['time'][i]"], {}), "(data_downsampled['time'][i])\n", (2956, 2985), False, 'from datetime import datetime\n')] |
import numpy as np
import torch
import torch.nn as nn
import re
from torch.autograd import Variable
from collections import OrderedDict, defaultdict, Counter
# from torch.profiler import profile, record_function, ProfilerActivity
from itertools import zip_longest
import pprint
import sys
GLOBAL_LAYER_IDX = 0
def get_longest_str(l):
"""
Get the string length of the element with the longest string representation
"""
max_len = 0
obj_w_max = None
for thing in l:
string = len(str(thing))
if string > max_len:
max_len = string
obj_w_max = thing
return max_len
def raise_nesting(nested:list):
"""
This is a super simple function, but a visualization can help prevent bugs. This function
takes a nesting that is a list of lists. The each inner list could have differing lengths.
This function removes the nesting by raising the last level up one.
Example:
input:
[
[
item1
],
[
item2,
item3
]
]
output:
[
item1,
item2,
item3.
]
"""
unnested = [item for inner_list in nested for item in inner_list]
return unnested
def format_layer_names(layer, summary):
"""
Returns a list of 1 or more names for one layer. If the layer has more than one input/output
it will be given multiple lines in the summary printout. This function controls how the label is changed
for the subsequent names of the layer in the summary.
"""
# Format for layer name
new_name_format = "{prepend}{name}"
# Format for excess input/output marker
prepend_format = "({var})"
# Function to get marker from input/output iteration number
get_marker_from_iter = lambda x: "" if x == 0 else prepend_format.format(var=str(x+1))
inps, outs = summary[layer]["input_shapes"], summary[layer]["output_shapes"]
names = []
for i, (inp, out) in enumerate(zip_longest(inps, outs)):
marker = get_marker_from_iter(i)
names.append(new_name_format.format(prepend=marker, name=layer))
return names
def get_col_lengths(summary, column_headers):
"""
Gets the appropriate lengths for each column. Does this by calculating
the max string representation length in each column
"""
# column_headers = {"name":"Layer Type", "input":"Input Dims", "output":"Output Dims", "params":"#Params"}
# max layer name length
layer_names = [layer for layer in list(summary.keys()) ]
names = [name for layer_name in layer_names for name in format_layer_names(layer_name, summary)]
names = names+[column_headers["layer_id"]]
name_cols = get_longest_str(names)
# max input shape string length
input_shapes = [summary[layer]["input_shapes"] for layer in summary.keys()]
# nested_list structure: [num_layers, num_layer_inputs, shapes]
# change to: -->[all_inputs, shapes ]
input_shapes = raise_nesting(input_shapes)
input_shapes = input_shapes+[column_headers["input_shapes"]]
in_cols = get_longest_str(input_shapes)
# max output shape string length
output_shapes = [summary[layer]["output_shapes"] for layer in summary.keys()]
output_shapes = raise_nesting(output_shapes)
output_shapes = output_shapes+[column_headers["output_shapes"]]
out_cols = get_longest_str(output_shapes)
# max num_params string length
nparams = [f"{summary[layer]['nb_params']}" for layer in summary.keys()]
nparams = nparams+[column_headers["params"]]
np_cols = get_longest_str(nparams)
return name_cols, in_cols, out_cols, np_cols
def print_header_line(format_str, col_lengths, model_name=""):
name_cols, in_cols, out_cols, np_cols = col_lengths
header_line = format_str.format(name= "Layer Type",
name_cols=name_cols,
inp="Input Shape",
in_cols=in_cols,
out="Output Shape",
out_cols=out_cols,
params="Param #",
np_cols = np_cols)
print("-"*len(header_line))
print(f"{model_name:^{len(header_line)}}")
print("-"*len(header_line))
print(header_line)
print("="*len(header_line))
return header_line
def get_output_shapes(summary, layer):
"""
Some modules have multiple inputs and/or multiple outputs to their forward method.
This function organizes them into first and extras.
"""
extra_shapes = None
first_in, first_out, extra_in, extra_out = None, None, [], []
for i, (input_shape, out_shape) in enumerate(zip_longest(summary[layer]["input_shapes"], summary[layer]["output_shapes"])):
if i == 0:
first_in, first_out = input_shape, out_shape
else:
extra_in.append(input_shape)
extra_out.append(out_shape)
return first_in, first_out, extra_in, extra_out
def print_info_line(summary, layer, format_str, col_lengths):
"""
The module/layer info will only be printed with the first input/output pair.
If more than one input/output tensor present, only the module/layer name
will be displayed.
"""
name_cols, in_cols, out_cols, np_cols = col_lengths
# Organize input and output shapes
first_in, first_out, extra_in, extra_out = get_output_shapes(summary, layer)
# Get layer names (could be more than one if multiple inputs or outputs present
names = format_layer_names(layer, summary)
name = names.pop(0)
extra_names = names
line_new = format_str.format(name= name,
name_cols=name_cols,
inp=str(first_in),
in_cols=in_cols,
out=str(first_out),
out_cols=out_cols,
params=summary[layer]["nb_params"],
np_cols = np_cols)
print(line_new)
# Handle the case where multiple lines are necessary
print_excess_info(extra_in, extra_out, extra_names, format_str, col_lengths)
def print_excess_info(extra_ins, extra_outs, extra_names, format_str, col_lengths):
"""
Prints the excess lines for module/layers with more than one input/output tensor
"""
name_cols, in_cols, out_cols, np_cols = col_lengths
for i, (inp_shape, out_shape, extra_name) in enumerate(zip_longest(extra_ins, extra_outs, extra_names)):
line_new = format_str.format(name= extra_name,
name_cols=name_cols,
inp=str(inp_shape),
in_cols=in_cols,
out=str(out_shape),
out_cols=out_cols,
params="-",
np_cols = np_cols)
print(line_new)
def get_all_keys(summaries, seen=set(), ignore=[]):
order = list()
for summary in summaries:
for key in summary.keys():
if key in ignore:
continue
if key != "submods":
if not key in seen:
seen.add(key)
order.append(key)
else: #submods
order.extend(get_all_keys(summary["submods"], seen, ignore))
return order
def prepend_and_stringify(nested_l, prefix, cols, prefix_cols_fmt_strs:dict, depth_col_idx=3):
new_nested_l = []
for l in nested_l:
new_l = []
for i, item in enumerate(l):
if cols[i] in prefix_cols_fmt_strs.keys():
item = prefix*int(l[depth_col_idx])+f"{item:{prefix_cols_fmt_strs[cols[i]]}}"
new_l.append(item)
new_nested_l.append(new_l)
return new_nested_l
def convert_nested_summary(submods_summaries, depth=0, col_names=[]):
if depth==0:
col_names:list = get_all_keys(submods_summaries, seen=set(), ignore=["trainable"])
rows = []
for i, submod_summary in enumerate(submods_summaries):
row = []
row_depth = submod_summary["depth"]
for col in col_names:
if not col in submod_summary.keys():
if col == "parameters":
row.append(init_param_dict())#{"trainable": Counter(), "frozen": Counter(), "total":0})
else:
row.append("N/A")
else:
row.append(submod_summary[col])
rows.append(row)
if "submods" in submod_summary.keys():
nested_rows, _ = convert_nested_summary(submod_summary["submods"], depth=depth+1, col_names=col_names)
rows.extend(nested_rows)
return rows, col_names
def merge_cols(summary_rows, col_names):
new_rows = []
for row in summary_rows:
# print(row)
new_row = []
mod_class = row.pop(1)
layer_num = row.pop(1)
new_row.append("["+str(layer_num)+"]"+str(row[0]) + "("+str(mod_class) +")")
new_row.extend(row[1:])
new_rows.append(new_row)
col_names.pop(1)
col_names.pop(1)
col_names[0] = "layer_id"
return new_rows, col_names
def aggregate(parameter_dict, accum):
# accum["trainable"].update(parameter_dict["trainable"])
# accum["frozen"].update(parameter_dict["frozen"])
accum["total"] += parameter_dict.get("total", 0)
accum["num_bytes"] += parameter_dict.get("num_bytes", 0)
accum["trainable"] += parameter_dict.get("trainable", 0)
return accum
def add_cumulative_params(summary_rows, col_names, depth_col_idx=3):
prev_depth = 0
# depth_col_idx = 1
param_col_idx = -1
accumulating_idxs = []
# stop_idx = 20
for i in range(len(summary_rows)):
# if i == stop_idx:
# break
# print(i, summary_rows[i])
cur_depth = int(summary_rows[i][depth_col_idx])
if cur_depth > prev_depth:
accumulating_idxs.append(i-1)
elif cur_depth < prev_depth:
dif = prev_depth - cur_depth
for j in range(dif):
accumulating_idxs.pop(-1)
# print(i, "Accumulating:",accumulating_idxs)
for acc_idx in accumulating_idxs:
# print(i, "Accumulating:", accumulating_idxs)
summary_rows[acc_idx][-1] = aggregate(summary_rows[i][param_col_idx],
summary_rows[acc_idx][param_col_idx])
prev_depth = cur_depth
return summary_rows#[:stop_idx]
def convert_to_dict_of_dicts(summary_rows, col_names):
new_summary = OrderedDict()
first_row_len = len(summary_rows[0])
for i, row in enumerate(summary_rows):
assert len(row) == first_row_len
od = OrderedDict()
layer_id = None
for item, col_name in zip(row, col_names):
if col_name == "layer_id":
layer_id = item
continue
od[col_name] = item
# print(layer_id)
# print(od)
new_summary[layer_id] = od
return new_summary
def print_final_summary(input_size, batch_size, total_output, total_params,
header_line, trainable_params, total_param_size,
total_buffer_size, device, mem_stats):
"""
Prints aggregate statistics
"""
# Estimate model's size in memory
# assumes 4 bytes/number (float on cuda).
total_input_size = np.prod(input_size) * batch_size * 4. / (2**20)
total_activations_size = total_output * 4. / (2**20)
total_param_size = total_param_size /(2**20)
total_buffer_size = total_buffer_size/(2**20)
total_size = total_activations_size + total_input_size + total_param_size
# Number of Parameter
# longest label
padding =len("Non-trainable params: ")
label_fmt = "<"+str(padding)
data_fmt = ","
print("="*len(header_line))
print(f'{"Total Number of Parameters":^{len(header_line)}}')
print("-"*len(header_line))
print(f"{'Total params: ':{label_fmt}}{total_params:{data_fmt}}")
print(f"{'Trainable params: ':{label_fmt}}{trainable_params:{data_fmt}}")
print(f"{'Non-trainable params: ':{label_fmt}}{total_params - trainable_params:{data_fmt}}")
# Size and
# longest label
padding =len('Size of layer activations (MB): ')
label_fmt = "<"+str(padding)
data_fmt = "0,.2f"
print("-"*len(header_line))
print(f'{"Parameter and Activation Sizes":^{len(header_line)}}')
print("-"*len(header_line))
print(f"{'Input size (MB): ':{label_fmt}}{total_input_size:{data_fmt}}")
print(f"{'Size of layer activations (MB): ':{label_fmt}}{total_activations_size:{data_fmt}}")
print(f"{'Params size (MB): ':{label_fmt}}{total_param_size:{data_fmt}}")
print(f"{'Buffer size (MB): ':{label_fmt}}{total_buffer_size:{data_fmt}}")
print(f"{'Estimated total size (MB): ':{label_fmt}}{total_size:{data_fmt}}")
padding =len("Max memory allocated w/ Adam (MB): ")
label_fmt = "<"+str(padding)
print("-"*len(header_line))
print(f'{"Actual Memory Usage During Mock Training":^{len(header_line)}}')
print("-"*len(header_line))
print("----SGD")
print(f"{'Max memory allocated w/ SGD (MB): ':{label_fmt}}{mem_stats['sgd']['max_mem_alloc']:{data_fmt}}")
print(f"{'Max memory reserved w/ SGD (MB): ':{label_fmt}}{mem_stats['sgd']['max_mem_res']:{data_fmt}}")
print("----Adam")
print(f"{'Max memory allocated w/ Adam (MB): ':{label_fmt}}{mem_stats['adam']['max_mem_alloc']:{data_fmt}}")
print(f"{'Max memory reserved w/ Adam (MB): ':{label_fmt}}{mem_stats['adam']['max_mem_res']:{data_fmt}}")
print("-"*len(header_line), flush=True)
def prep_summary_info(summaries, depth_prefix):
# Convert nested summary to a single level
summary_rows, col_names = convert_nested_summary(summaries)
# Accumulate submodule info for each "custom"/container module
summary_rows = add_cumulative_params(summary_rows, col_names)
# Add prefix to var_name indicate the depth of the module in the hierarchy
summary_rows = prepend_and_stringify(summary_rows, depth_prefix, col_names, prefix_cols_fmt_strs={"var_name":""}, depth_col_idx=3)
# Create the "layer_id" column from information in other columns
summary_rows, col_names = merge_cols(summary_rows, col_names)
# Add column for total parameters
# Last element in each row should be the parameters dict
for row in summary_rows:
row.append(row[-1]["total"])
col_names.append("nb_params")
# Add prefix to var_name indicate the depth of the module in the hierarchy
summary_rows = prepend_and_stringify(summary_rows, depth_prefix, col_names, prefix_cols_fmt_strs={"nb_params":","}, depth_col_idx=1)
# Now that everything is flattened, convert to structure
# expected by the rest of the code
summary = convert_to_dict_of_dicts(summary_rows, col_names)
return summary
def print_model_info(model,
submods_summaries,
input_size,
batch_size,
num_spaces=3,
column_headers=None,
depth_prefix="--",
device="cpu",
mem_stats=None,
model_name=None):
"""
Top level method in the heirarchy for controlling printouts
"""
if model_name is None:
model_class_name = str(model.__class__).split(".")[-1].split("'")[0]
else:
model_class_name = str(model.__class__).split(".")[-1].split("'")[0]
model_class_name = model_name + " ("+model_class_name+")"
summary = prep_summary_info(submods_summaries["submods"], depth_prefix=depth_prefix)
# Get lengths to format columns
col_lengths = get_col_lengths(summary, column_headers)
spacing = " "*num_spaces
format_str = "{name:<{name_cols}}"+spacing+"{inp:<{in_cols}}"+spacing+"{out:<{out_cols}}"+spacing+"{params:<{np_cols}}"+spacing
header_line = print_header_line(format_str, col_lengths, model_class_name)
total_params = 0
total_output = 0
trainable_params = 0
total_param_size = 0
total_buffer_size = 0
for i, (layer_name, layer_info_dict) in enumerate(summary.items()):
# Print info for each layer
print_info_line(summary, layer_name, format_str, col_lengths)
# Aggregate info for total summary
if layer_info_dict["depth"] ==0:
total_params += layer_info_dict["parameters"]["total"]
trainable_params += layer_info_dict["parameters"]["trainable"]
total_param_size += layer_info_dict["parameters"]["num_bytes"]
total_buffer_size += layer_info_dict["parameters"]["num_buffer_bytes"]
if layer_info_dict["is_standard"] and layer_info_dict["parameters"]["total"] > 0: # Don't count the container layers, they are already included implicitly
total_output += np.prod(sum([np.prod(shape) for shape in layer_info_dict["output_shapes"]]))
submods = [sm for sm in model.modules()]
submods = submods[1:]
print_final_summary(input_size, batch_size, total_output, total_params,
header_line, trainable_params, total_param_size,
total_buffer_size, device, mem_stats)
def remove_layers(summary):
"""
This is called when print_major_layers_only is True.
Removes layers with no weights, like activation layers. Retain layers that manipulate
shapes, otherwise reading the model summary can be confusing.
"""
new_summary = OrderedDict()
prev_out_size = None
for layer in summary.keys():
if summary[layer]["nb_params"] <= 0.0:
input_shapes = summary[layer]["input_shapes"]
out_shapes = summary[layer]["output_shapes"]
# Check for different shapes btw previous output and current input, then curr input w/ curr output
should_keep = False
for in_shape in input_shapes:
for prev_out_shape in prev_out_shapes:
if in_shape != prev_out_shape:
should_keep = True
for out_shape in out_shapes:
if in_shape == out_shape:
should_keep = True
if not should_keep:
continue
prev_out_shapes = summary[layer]["output_shapes"]
new_summary[layer] = summary[layer]
return new_summary
def init_param_dict():
return {"total":0, "num_bytes": 0, "num_buffer_bytes":0, "trainable":0, "DTs":set()}
def init_summary_info(module, parent_mod_summary, input_tuple, mod_var_name, depth):
"""
Creates and stores information about a pytorch module. This
function stores the information that does not need specialized
logic to handle.
"""
global GLOBAL_LAYER_IDX
classname = str(module.__class__).split(".")[-1].split("'")[0]
summary = OrderedDict()
# Add identifier when part of a nn.Sequential set of layers
if "module_class" in parent_mod_summary.keys() and \
parent_mod_summary["module_class"] == "Sequential":
var_name = "seq_"+parent_mod_summary["var_name"]+"-"+str(int(mod_var_name)+1)
else:
var_name = mod_var_name
summary["var_name"] = var_name
summary["module_class"] = classname
summary["layer_number"] = GLOBAL_LAYER_IDX
summary["depth"] = depth
# Input is stored as tuples containing posibly more than one tensor
# For each layer store as list of lists: [[dim1, dim2,...], [dim1,dim2,...],...]
summary["input_shapes"] = [list(sub_input.size()) for sub_input in input_tuple if type(sub_input) == torch.tensor]
GLOBAL_LAYER_IDX += 1
return summary
def hook_wrapper(hook_type, mod_var_name="", depth=-1, parent_mod_summary=None, handles=None):
"""
This wrapper allows the hooks to have access to extra information outside of the given parameters.
"""
############### Embedded closure ###############
def standard_module_hook(module:nn.Module, input_tuple, output_tuple):
"""
This function should be sent to the nn.Module.register_forward_hook(...) function. It will then
be called after the forward function of the registered nn.Module. We track sizing information
and store it in the current parent module's summary (in the 'submods' field).
"""
classname = str(module.__class__).split(".")[-1].split("'")[0]
summary = init_summary_info(module, parent_mod_summary, input_tuple, mod_var_name, depth)
summary["is_standard"] = True
# Output may or may not be a tuple
if isinstance(output_tuple, (tuple, list)):
summary["output_shapes"] = [list(o.size()) for o in output_tuple]
elif isinstance(output_tuple, torch.Tensor):
summary["output_shapes"] = [list(output_tuple.size())]
else:
raise ValueError("Expected forward output to be either torch.Tensor, tuple, or list")
# Gather parameter info
summary["parameters"] = init_param_dict()
for tens in module.parameters(recurse=False):
dt = tens.dtype
trainable = tens.requires_grad
num_params = tens.nelement()
summary["parameters"]["total"] += num_params
summary["parameters"]["DTs"].add(tens.dtype)
if trainable:
summary["parameters"]["trainable"] += num_params
summary["parameters"]["num_bytes"] += num_params*tens.element_size()
for tens in module.buffers(recurse=False):
summary["parameters"]["num_buffer_bytes"] += tens.nelement()*tens.element_size()
parent_mod_summary["submods"].append(summary)
############### New embedded closure ###############
def custom_module_pre_hook(module:nn.Module, input_tuple):
"""
This is used for user defined layers. These will typically contain several
"standard" nn.Module layers such as nn.Linear. These are treated as a container
of the standard nn.Module layers. As such, nn.Sequential layers are also
handled with this method. I refer to these containers as "custom" to
differentiate them from the "standard" modules.
We register a new set of hooks for each "child" module and record their
information hierarchically to preserve structural information. This is
done BEFORE the forward method is called, so this hook must be registered
with register_forward_pre_hook.
This function does not store parameter info. Those will be aggregated later
from the "submods". This is to avoid counting parameters that don't actually get
used in the forward method (either through lazy coding or specialized logic).
"""
classname = str(module.__class__).split(".")[-1].split("'")[0]
new_parent_mod_summary = init_summary_info(module, parent_mod_summary, input_tuple, mod_var_name, depth)
# if classname == "MultiheadAttention":
# print("Found multihead")
# print([p for p in module.parameters()])
# print([c for c in module.children()])
new_parent_mod_summary["is_standard"] = False
new_parent_mod_summary["submods"] = list()
parent_mod_summary["submods"].append(new_parent_mod_summary)
register_hooks(module, new_parent_mod_summary, depth+1, handles=handles)
############### New embedded closure ###############
def custom_module_post_hook(module:nn.Module, input_tuple, output_tuple):
"""
This hook merely stores the output size of the custom module that was
summarized by custom_module_pre_hook. The output_tuple is only available
after the forward method is called.
"""
# Aliasing to add clarity.
# In this hook we are not adding to the parent module's summary list
mod_summary = parent_mod_summary
if isinstance(output_tuple, (tuple, list)):
mod_summary["output_shapes"] = [list(o.size()) for o in output_tuple]
elif isinstance(output_tuple, torch.Tensor):
mod_summary["output_shapes"] = [list(output_tuple.size())]
else:
raise ValueError("Expected forward output to be either torch.Tensor, tuple, or list")
############### End of embedded closures ###############
if hook_type == "standard":
return standard_module_hook
elif hook_type == "custom_pre":
return custom_module_pre_hook
elif hook_type == "custom_post":
return custom_module_post_hook
else:
return None
def register_hooks(parent_module, parent_mod_summary, depth=0, handles=None):
"""
Registers hooks with every module on a given level of the model's module hierarchy
using the nn.Module.children() iterator.
"""
if handles is None:
handles = list()
# Only functions calling this one after the initial call will be "custom" modules
# Use the special hook
if depth > 0:
handle = parent_module.register_forward_hook(hook_wrapper("custom_post", parent_mod_summary=parent_mod_summary))
handles.append(handle)
# if parent_mod_summary.get("module_class", None) is not None and parent_mod_summary['module_class'] == 'MultiheadAttention':
# print("!!!!!!!!!!!1")
for i, (sm_name,submod) in enumerate(parent_module.named_children()):
# if classname == "MultiheadAttention":
submod_qualified_classname = str(submod.__class__).split("'")[1].strip()
# if parent_mod_summary.get("module_class", None) is not None and parent_mod_summary['module_class'] == 'MultiheadAttention':
# print("submod", submod_qualified_classname)
# print("sm_name", sm_name)
# print([p for p in submod.parameters()])
# print([c for c in submod.children()])
if (re.match(r"^torch\.nn\.modules.*", submod_qualified_classname) is not None) \
and (len([c for c in submod.children() if re.match(r"^.*NonDynamicallyQuantizableLinear", str(c.__class__)) is None]) == 0 \
and len([p for p in submod.parameters()]) > 0):
if parent_mod_summary.get("module_class", None) is not None and parent_mod_summary['module_class'] == 'MultiheadAttention':
print("About to register standard hook")
# and submod_qualified_classname != "torch.nn.modules.container.Sequential")\
handle = submod.register_forward_hook(
hook_wrapper(hook_type="standard",
mod_var_name=sm_name,
depth=depth,
parent_mod_summary=parent_mod_summary))
else: #Custom module
handle = submod.register_forward_pre_hook(
hook_wrapper(hook_type="custom_pre",
mod_var_name=sm_name,
depth=depth,
parent_mod_summary=parent_mod_summary, handles=handles))
handles.append(handle)
return handles
def get_stats_from_training_loop(model, batch_size, input_shape, dtype, device, optimizer=None, loop_iters=5):
out, x = None, None
for i in range(loop_iters):
print(i+1, end="\r", flush=True)
x = torch.rand((batch_size, *input_shape)).type(dtype)
if model_name == "Audio Model":
out = model(x, view_id="english")
else:
out = model(x)
optimizer.zero_grad()
l = (torch.sum(out[0])*0)
l.backward()
optimizer.step()
del out, x
mem_stats = dict()
mem_stats["max_mem_alloc"] = torch.cuda.max_memory_allocated(device)/(2**20)
mem_stats["max_mem_res"] = torch.cuda.max_memory_reserved(device)/(2**20)
return mem_stats
def get_memory_stats(model, batch_size, input_shape, dtype, device):
# Run mock training loop iterations to
# gather memory info. A single forward pass is ok for SGD
mem_stats = dict()
trainables = [p for p in model.parameters() if p.requires_grad]
torch.cuda.reset_peak_memory_stats()
optimizer = torch.optim.SGD(trainables, lr=.000, # Don't actually update the weights
weight_decay=.000)
mem_stats["sgd"] = get_stats_from_training_loop(model,
batch_size,
input_shape,
dtype,
device,
optimizer=optimizer,
loop_iters=1)
del optimizer
# Run another mock training loop.
# Adam seems to give consistent memory readings after 5 iterations
torch.cuda.reset_peak_memory_stats()
optimizer = torch.optim.Adam(trainables, lr=.000, # Don't actually update the weights
weight_decay=.000)
mem_stats["adam"] = get_stats_from_training_loop(model,
batch_size,
input_shape,
dtype,
device,
optimizer=optimizer,
loop_iters=5)
del optimizer
return mem_stats
def my_model_summary(model,
input_shape,
batch_size=1,
device="cuda",
print_parametarized_layers_only=False,
spacing=3,
column_headers = { # Change values to adjust column headers
"layer_id" : "Layer ID",
"input_shapes" : "Input Dims",
"output_shapes" : "Output Dims",
"params" : "#Params"
},
mock_train4_mem_stats=False,
model_name=None):
global GLOBAL_LAYER_IDX
GLOBAL_LAYER_IDX = 0
device = device.lower()
assert device in [
"cuda",
"cpu",
], "Input device is not valid, please specify 'cuda' or 'cpu'"
assert device == "cpu" or torch.cuda.is_available()
model = model.to(device)
if device == "cuda" and torch.cuda.is_available() and next(model.parameters()).is_cuda:
dtype = torch.cuda.FloatTensor
else:
dtype = torch.FloatTensor
# Get memory stats for SGD and Adam optimization (Adam requires more memory)
if mock_train4_mem_stats:
mem_stats = get_memory_stats(model, batch_size, input_shape, dtype, device)
else:
mem_stats = {"sgd":{"max_mem_alloc":0.0, "max_mem_res":0.0},
"adam":{"max_mem_alloc":0.0, "max_mem_res":0.0}}
# create dict to hold properties
mod_summary = OrderedDict()
# Top layer summary only holds submods list
mod_summary["submods"] = list()
# register hooks
handles = register_hooks(model, mod_summary)
# make a forward pass
x = torch.rand((batch_size, *input_shape)).type(dtype)
if model_name == "Audio Model":
out = model(x, view_id="english")
else:
out = model(x)
# print(torch.cuda.memory_summary('cuda'))
# print()
### remove these hooks
for h in handles:
h.remove()
if print_parametarized_layers_only:
mod_summary = remove_layers(summary)
# display info
print_model_info(model,
mod_summary,
input_shape,
batch_size,
num_spaces=spacing,
column_headers=column_headers,
device=device,
mem_stats=mem_stats,
depth_prefix="--",
model_name=model_name)
| [
"torch.cuda.max_memory_allocated",
"torch.cuda.reset_peak_memory_stats",
"itertools.zip_longest",
"re.match",
"torch.optim.Adam",
"torch.cuda.is_available",
"torch.cuda.max_memory_reserved",
"torch.rand",
"collections.OrderedDict",
"torch.sum",
"numpy.prod",
"torch.optim.SGD"
] | [((10831, 10844), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10842, 10844), False, 'from collections import OrderedDict, defaultdict, Counter\n'), ((17834, 17847), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17845, 17847), False, 'from collections import OrderedDict, defaultdict, Counter\n'), ((19215, 19228), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (19226, 19228), False, 'from collections import OrderedDict, defaultdict, Counter\n'), ((28775, 28811), 'torch.cuda.reset_peak_memory_stats', 'torch.cuda.reset_peak_memory_stats', ([], {}), '()\n', (28809, 28811), False, 'import torch\n'), ((28828, 28881), 'torch.optim.SGD', 'torch.optim.SGD', (['trainables'], {'lr': '(0.0)', 'weight_decay': '(0.0)'}), '(trainables, lr=0.0, weight_decay=0.0)\n', (28843, 28881), False, 'import torch\n'), ((29526, 29562), 'torch.cuda.reset_peak_memory_stats', 'torch.cuda.reset_peak_memory_stats', ([], {}), '()\n', (29560, 29562), False, 'import torch\n'), ((29579, 29633), 'torch.optim.Adam', 'torch.optim.Adam', (['trainables'], {'lr': '(0.0)', 'weight_decay': '(0.0)'}), '(trainables, lr=0.0, weight_decay=0.0)\n', (29595, 29633), False, 'import torch\n'), ((31698, 31711), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (31709, 31711), False, 'from collections import OrderedDict, defaultdict, Counter\n'), ((2089, 2112), 'itertools.zip_longest', 'zip_longest', (['inps', 'outs'], {}), '(inps, outs)\n', (2100, 2112), False, 'from itertools import zip_longest\n'), ((4827, 4903), 'itertools.zip_longest', 'zip_longest', (["summary[layer]['input_shapes']", "summary[layer]['output_shapes']"], {}), "(summary[layer]['input_shapes'], summary[layer]['output_shapes'])\n", (4838, 4903), False, 'from itertools import zip_longest\n'), ((6636, 6683), 'itertools.zip_longest', 'zip_longest', (['extra_ins', 'extra_outs', 'extra_names'], {}), '(extra_ins, extra_outs, extra_names)\n', (6647, 6683), False, 'from itertools import zip_longest\n'), ((10983, 10996), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10994, 10996), False, 'from collections import OrderedDict, defaultdict, Counter\n'), ((28357, 28396), 'torch.cuda.max_memory_allocated', 'torch.cuda.max_memory_allocated', (['device'], {}), '(device)\n', (28388, 28396), False, 'import torch\n'), ((28436, 28474), 'torch.cuda.max_memory_reserved', 'torch.cuda.max_memory_reserved', (['device'], {}), '(device)\n', (28466, 28474), False, 'import torch\n'), ((31066, 31091), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (31089, 31091), False, 'import torch\n'), ((31150, 31175), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (31173, 31175), False, 'import torch\n'), ((28219, 28236), 'torch.sum', 'torch.sum', (['out[0]'], {}), '(out[0])\n', (28228, 28236), False, 'import torch\n'), ((31903, 31941), 'torch.rand', 'torch.rand', (['(batch_size, *input_shape)'], {}), '((batch_size, *input_shape))\n', (31913, 31941), False, 'import torch\n'), ((11680, 11699), 'numpy.prod', 'np.prod', (['input_size'], {}), '(input_size)\n', (11687, 11699), True, 'import numpy as np\n'), ((26284, 26347), 're.match', 're.match', (['"""^torch\\\\.nn\\\\.modules.*"""', 'submod_qualified_classname'], {}), "('^torch\\\\.nn\\\\.modules.*', submod_qualified_classname)\n", (26292, 26347), False, 'import re\n'), ((27998, 28036), 'torch.rand', 'torch.rand', (['(batch_size, *input_shape)'], {}), '((batch_size, *input_shape))\n', (28008, 28036), False, 'import torch\n'), ((17195, 17209), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (17202, 17209), True, 'import numpy as np\n')] |
#To use pylagrit, import the module.
import pylagrit
import numpy
#Instantiate the lagrit object.
lg = pylagrit.PyLaGriT()
# Create list with mesh object as first element
dxyz = numpy.array([0.25]*3)
mins = numpy.array([0.]*3)
maxs = numpy.array([1.]*3)
ms = [lg.createpts_dxyz(dxyz,mins,maxs,'tet',connect=True,name='testmo')]
# Create three new mesh objects, each one directly above the other
for i in range(3):
ms.append(ms[-1].copy())
ms[-1].trans(ms[-1].mins,ms[-1].mins+numpy.array([0.,0.,1.]))
lg.dump('lagrit_binary.lg')
lg.close()
lg = pylagrit.PyLaGriT()
ms_read = lg.read('lagrit_binary.lg')
print('Name of mesh object read in should be testmo, is: ', ms_read.name)
| [
"pylagrit.PyLaGriT",
"numpy.array"
] | [((104, 123), 'pylagrit.PyLaGriT', 'pylagrit.PyLaGriT', ([], {}), '()\n', (121, 123), False, 'import pylagrit\n'), ((180, 203), 'numpy.array', 'numpy.array', (['([0.25] * 3)'], {}), '([0.25] * 3)\n', (191, 203), False, 'import numpy\n'), ((209, 231), 'numpy.array', 'numpy.array', (['([0.0] * 3)'], {}), '([0.0] * 3)\n', (220, 231), False, 'import numpy\n'), ((236, 258), 'numpy.array', 'numpy.array', (['([1.0] * 3)'], {}), '([1.0] * 3)\n', (247, 258), False, 'import numpy\n'), ((558, 577), 'pylagrit.PyLaGriT', 'pylagrit.PyLaGriT', ([], {}), '()\n', (575, 577), False, 'import pylagrit\n'), ((487, 515), 'numpy.array', 'numpy.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (498, 515), False, 'import numpy\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Author: <NAME>
Email: <EMAIL>, <EMAIL>
github: https://github.com/viebboy
"""
import pytest
import os
import shutil
import utility
import numpy as np
import random
from GOP.utility import gop_utils, gop_operators
INPUT_DIM = utility.INPUT_DIM
OUTPUT_DIM = utility.OUTPUT_DIM
BATCH_SIZE = 32
STEPS = 4
def test_block_update(tmpdir):
model_path = os.path.join(tmpdir.dirname, 'test_model')
if os.path.exists(model_path):
shutil.rmtree(model_path)
os.mkdir(model_path)
params, model_data = utility.get_random_model_data()
params['tmp_dir'] = tmpdir.dirname
params['model_name'] = 'test_model'
params['nodal_set'] = gop_operators.get_default_nodal_set()
params['pool_set'] = gop_operators.get_default_pool_set()
params['activation_set'] = gop_operators.get_default_activation_set()
params['convergence_measure'] = random.choice(
['train_', 'val_']) + params['convergence_measure']
block_names = ['gop_0_0', 'gop_1_0', 'bn_0_0', 'bn_1_0', 'output']
all_op_sets = utility.get_all_operators()
op_set_indices = {}
for layer_name in model_data['op_sets'].keys():
op_set_indices[layer_name] = all_op_sets.index(model_data['op_sets'][layer_name])
train_states = {'topology': model_data['topology'],
'weights': model_data['weights'],
'op_set_indices': op_set_indices}
_, _, new_weights = gop_utils.block_update_standalone(train_states,
params,
block_names,
utility.get_generator,
[INPUT_DIM, OUTPUT_DIM, BATCH_SIZE, STEPS],
utility.get_generator,
[INPUT_DIM, OUTPUT_DIM, BATCH_SIZE, STEPS],
utility.get_generator,
[INPUT_DIM, OUTPUT_DIM, BATCH_SIZE, STEPS])
for layer_name in new_weights.keys():
if layer_name not in block_names:
assert np.allclose(new_weights[layer_name][0], model_data['weights'][layer_name][0])
shutil.rmtree(model_path)
if __name__ == '__main__':
pytest.main([__file__])
| [
"os.mkdir",
"GOP.utility.gop_operators.get_default_nodal_set",
"GOP.utility.gop_utils.block_update_standalone",
"numpy.allclose",
"os.path.exists",
"utility.get_random_model_data",
"random.choice",
"pytest.main",
"utility.get_all_operators",
"GOP.utility.gop_operators.get_default_activation_set",
... | [((404, 446), 'os.path.join', 'os.path.join', (['tmpdir.dirname', '"""test_model"""'], {}), "(tmpdir.dirname, 'test_model')\n", (416, 446), False, 'import os\n'), ((454, 480), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (468, 480), False, 'import os\n'), ((520, 540), 'os.mkdir', 'os.mkdir', (['model_path'], {}), '(model_path)\n', (528, 540), False, 'import os\n'), ((567, 598), 'utility.get_random_model_data', 'utility.get_random_model_data', ([], {}), '()\n', (596, 598), False, 'import utility\n'), ((704, 741), 'GOP.utility.gop_operators.get_default_nodal_set', 'gop_operators.get_default_nodal_set', ([], {}), '()\n', (739, 741), False, 'from GOP.utility import gop_utils, gop_operators\n'), ((767, 803), 'GOP.utility.gop_operators.get_default_pool_set', 'gop_operators.get_default_pool_set', ([], {}), '()\n', (801, 803), False, 'from GOP.utility import gop_utils, gop_operators\n'), ((835, 877), 'GOP.utility.gop_operators.get_default_activation_set', 'gop_operators.get_default_activation_set', ([], {}), '()\n', (875, 877), False, 'from GOP.utility import gop_utils, gop_operators\n'), ((1079, 1106), 'utility.get_all_operators', 'utility.get_all_operators', ([], {}), '()\n', (1104, 1106), False, 'import utility\n'), ((1464, 1745), 'GOP.utility.gop_utils.block_update_standalone', 'gop_utils.block_update_standalone', (['train_states', 'params', 'block_names', 'utility.get_generator', '[INPUT_DIM, OUTPUT_DIM, BATCH_SIZE, STEPS]', 'utility.get_generator', '[INPUT_DIM, OUTPUT_DIM, BATCH_SIZE, STEPS]', 'utility.get_generator', '[INPUT_DIM, OUTPUT_DIM, BATCH_SIZE, STEPS]'], {}), '(train_states, params, block_names,\n utility.get_generator, [INPUT_DIM, OUTPUT_DIM, BATCH_SIZE, STEPS],\n utility.get_generator, [INPUT_DIM, OUTPUT_DIM, BATCH_SIZE, STEPS],\n utility.get_generator, [INPUT_DIM, OUTPUT_DIM, BATCH_SIZE, STEPS])\n', (1497, 1745), False, 'from GOP.utility import gop_utils, gop_operators\n'), ((2385, 2410), 'shutil.rmtree', 'shutil.rmtree', (['model_path'], {}), '(model_path)\n', (2398, 2410), False, 'import shutil\n'), ((2444, 2467), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (2455, 2467), False, 'import pytest\n'), ((490, 515), 'shutil.rmtree', 'shutil.rmtree', (['model_path'], {}), '(model_path)\n', (503, 515), False, 'import shutil\n'), ((914, 947), 'random.choice', 'random.choice', (["['train_', 'val_']"], {}), "(['train_', 'val_'])\n", (927, 947), False, 'import random\n'), ((2302, 2379), 'numpy.allclose', 'np.allclose', (['new_weights[layer_name][0]', "model_data['weights'][layer_name][0]"], {}), "(new_weights[layer_name][0], model_data['weights'][layer_name][0])\n", (2313, 2379), True, 'import numpy as np\n')] |
import numpy as np
from math import sqrt
from sklearn.neighbors import NearestNeighbors
class Ilamp:
"""Inverse projection using the ILAMP algorithm described in "iLAMP: exploring high-dimensional spacing through
backward multidimensional projection" (https://ieeexplore.ieee.org/document/6400489)."""
def __init__(self, X, Y, k):
""" Constructor.
Parameters
----------
X : array
Mesh array with shape (N, m, 3), where N is the number of meshes, m is the number of vertices per mesh.
Last dimension is the dimension of each vertex (3 since we are working with 3-dim meshes).
Y : array
Array with the projections of the meshes in `x`. Its shape must be (N, 2), where N is the number of meshes.
Last dimension is the projection dimension (2 since we are in the plane).
k : float
Is the number of neighbors considered in the first step of the algorithm.
"""
self.X = X
self.Y = Y
self.k = k
self.NN = NearestNeighbors(n_neighbors=k)
self.NN.fit(Y)
print('ILAMP created. X shape: ' + str(X.shape) + ' Y shape: ' + str(Y.shape))
def invert(self, p):
""" Inverse project `p`.
Parameters
----------
p : 2d point to be inverse projected.
Returns
-------
array
Array with the mesh that is the inverse projection of `p`. Its shape is (m, 3), where m is number of
vertices of the mesh.
"""
print('Inverting ' + str(p))
# First, find the k nearest neighbors
k = self.k
p.shape = (1, p.shape[0])
idx = self.NN.kneighbors(p, return_distance=False)
x = np.array([self.X[idx[i]] for i in range(0, len(idx))])
y = np.array([self.Y[idx[i]] for i in range(0, len(idx))])
x.shape = (k, self.X.shape[1], self.X.shape[2])
y.shape = (k, p.shape[1])
print('Neighbors found. X shape: ' + str(x.shape) + ' Y shape: ' + str(y.shape))
# Then, compute the inverse projection
alpha = np.array([1 / np.dot((y[i] - p)[0], (y[i] - p)[0]) for i in range(0, k)])
sum_alpha = np.sum(alpha)
x_til = np.sum(np.array([alpha[i] * x[i] for i in range(0, k)]), axis=0) / sum_alpha
y_til = np.sum(np.array([alpha[i] * y[i] for i in range(0, k)]), axis=0) / sum_alpha
x_hat = x - x_til
y_hat = y - y_til
print('Tildes and hats computed. x_til shape: ' + str(x_til.shape) + ' y_til shape: ' + str(y_til.shape)
+ ' x_hat shape: ' + str(x_hat.shape) + ' y_hat shape: ' + str(y_hat.shape))
sqrt_alpha = [sqrt(alpha[i]) for i in range(0, k)]
A = np.array([sqrt_alpha[i] * y_hat[i] for i in range(0, k)])
B = np.array([sqrt_alpha[i] * x_hat[i] for i in range(0, k)])
A_t = np.transpose(A)
print('A transposed shape ' + str(A_t.shape) + ' B shape ' + str(B.shape))
q = np.zeros((5023, 3))
for i in range(0, 3):
AB = np.matmul(A_t, B[:, :, i])
print('AB shape' + str(AB.shape))
U, D, V = np.linalg.svd(AB, full_matrices=False)
print('SVD complete. U shape: ' + str(U.shape) + ' D shape: ' + str(D.shape) + ' V shape: ' + str(V.shape))
M = np.matmul(U, V)
q[:, i] = np.matmul((p - y_til), M) + x_til[:, i]
print('Reverse projection q computed. Shape: ' + str(q.shape))
return q
| [
"numpy.sum",
"math.sqrt",
"numpy.zeros",
"numpy.transpose",
"numpy.linalg.svd",
"sklearn.neighbors.NearestNeighbors",
"numpy.matmul",
"numpy.dot"
] | [((1060, 1091), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'k'}), '(n_neighbors=k)\n', (1076, 1091), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((2218, 2231), 'numpy.sum', 'np.sum', (['alpha'], {}), '(alpha)\n', (2224, 2231), True, 'import numpy as np\n'), ((2890, 2905), 'numpy.transpose', 'np.transpose', (['A'], {}), '(A)\n', (2902, 2905), True, 'import numpy as np\n'), ((3002, 3021), 'numpy.zeros', 'np.zeros', (['(5023, 3)'], {}), '((5023, 3))\n', (3010, 3021), True, 'import numpy as np\n'), ((2698, 2712), 'math.sqrt', 'sqrt', (['alpha[i]'], {}), '(alpha[i])\n', (2702, 2712), False, 'from math import sqrt\n'), ((3069, 3095), 'numpy.matmul', 'np.matmul', (['A_t', 'B[:, :, i]'], {}), '(A_t, B[:, :, i])\n', (3078, 3095), True, 'import numpy as np\n'), ((3166, 3204), 'numpy.linalg.svd', 'np.linalg.svd', (['AB'], {'full_matrices': '(False)'}), '(AB, full_matrices=False)\n', (3179, 3204), True, 'import numpy as np\n'), ((3341, 3356), 'numpy.matmul', 'np.matmul', (['U', 'V'], {}), '(U, V)\n', (3350, 3356), True, 'import numpy as np\n'), ((3379, 3402), 'numpy.matmul', 'np.matmul', (['(p - y_til)', 'M'], {}), '(p - y_til, M)\n', (3388, 3402), True, 'import numpy as np\n'), ((2138, 2174), 'numpy.dot', 'np.dot', (['(y[i] - p)[0]', '(y[i] - p)[0]'], {}), '((y[i] - p)[0], (y[i] - p)[0])\n', (2144, 2174), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import MiningEnv
import argparse
import numpy as np
import tensorflow as tf
import time
import pickle
from pdb import set_trace
import os
import errno
import gym
import maddpg.common.tf_util as U
from maddpg.trainer.maddpg import MADDPGAgentTrainer
import tensorflow.contrib.layers as layers
def parse_args():
parser = argparse.ArgumentParser(
"Reinforcement Learning experiments for multiagent environments")
# Environment
parser.add_argument("--scenario", type=str, default="simple",
help="name of the scenario script")
parser.add_argument("--max-episode-len", type=int, default=100, help="maximum episode length")
parser.add_argument("--num-episodes", type=int, default=60000, help="number of episodes")
parser.add_argument("--num-adversaries", type=int, default=0, help="number of adversaries")
parser.add_argument("--good-policy", type=str, default="maddpg", help="policy for good agents")
parser.add_argument("--adv-policy", type=str, default="maddpg", help="policy of adversaries")
# Core training parameters
parser.add_argument("--lr", type=float, default=1e-3, help="learning rate for Adam optimizer")
parser.add_argument("--gamma", type=float, default=0.95, help="discount factor")
parser.add_argument("--batch-size", type=int, default=1024,
help="number of episodes to optimize at the same time")
parser.add_argument("--num-units", type=int, default=64, help="number of units in the mlp")
# Checkpointing
parser.add_argument("--exp-name", type=str, default="None", help="name of the experiment")
parser.add_argument("--save-dir", type=str, default="/tmp/policy/",
help="directory in which training state and model should be saved")
parser.add_argument("--save-rate", type=int, default=1000,
help="save model once every time this many episodes are completed")
parser.add_argument("--load-dir", type=str, default="",
help="directory in which training state and model are loaded")
# Evaluation
parser.add_argument("--restore", action="store_true", default=False)
parser.add_argument("--display", action="store_true", default=False)
parser.add_argument("--benchmark", action="store_true", default=False)
parser.add_argument("--benchmark-iters", type=int, default=100000,
help="number of iterations run for benchmarking")
parser.add_argument("--benchmark-dir", type=str, default="./benchmark_files/",
help="directory where benchmark data is saved")
parser.add_argument("--plots-dir", type=str, default="./learning_curves/",
help="directory where plot data is saved")
return parser.parse_args()
def mlp_model(input, num_outputs, scope, reuse=False, num_units=64, rnn_cell=None):
# This model takes as input an observation and returns values of all actions
with tf.variable_scope(scope, reuse=reuse):
out = input
out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_outputs, activation_fn=tf.nn.sigmoid)
return out
def make_env():
# To change the environment all that is required is to alter this function
env = gym.make('MiningEnv-v0')
return env
def get_trainers(env, obs_shape_n, arglist):
trainers = []
model = mlp_model
trainer = MADDPGAgentTrainer
# Create trainers for good agents
for i in range(env.n):
trainers.append(trainer(
"agent_%d" % i, model, obs_shape_n, env.action_space, i, arglist,
local_q_func=(arglist.good_policy == 'ddpg')))
return trainers
def create_if_not_exist(filename):
# Creates the file by the name "filename" if it does not already exist
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def train(arglist):
with U.single_threaded_session():
# Create environment
env = make_env()
# Create agent trainers
obs_shape_n = [env.observation_space[i].shape for i in range(env.n)]
num_adversaries = 0
trainers = get_trainers(env, obs_shape_n, arglist)
print('Using good policy {} and adv policy {}'.format(
arglist.good_policy, arglist.adv_policy))
# Initialize
U.initialize()
# Load previous results, if necessary
if arglist.load_dir == "":
arglist.load_dir = arglist.save_dir
if arglist.display or arglist.restore or arglist.benchmark:
print('Loading previous state...')
U.load_state(arglist.load_dir)
# Create record savers
episode_rewards = [0.0] # sum of rewards for all agents for each episode
agent_rewards = [[0.0] for _ in range(env.n)] # individual agent reward for each episode
final_ep_rewards = [] # sum of rewards for training curve
final_ep_ag_rewards = [] # agent rewards for training curve
agent_info = [[[]]] # placeholder for benchmarking info
saver = tf.train.Saver()
obs_n = env.reset() # Get initial observation
episode_step = 0
train_step = 0
t_start = time.time()
update_num = 0
print('Starting iterations...')
while True:
# get action
action_n = [agent.action(obs) for agent, obs in zip(trainers, obs_n)]
truck_trgt, excvtr_trgt = action_n
# Make sure the action is within the action space
truck_trgt = np.clip(
truck_trgt, env.action_space[0].low, env.action_space[0].high)
excvtr_trgt = np.clip(
excvtr_trgt, env.action_space[1].low, env.action_space[1].high)
action_n = [truck_trgt, excvtr_trgt]
# environment step
new_obs_n, rew_n, done_n, info_n = env.step(action_n)
episode_step += 1
# Check for episode termination
done = all(done_n)
terminal = (episode_step >= arglist.max_episode_len)
# collect experience
for i, agent in enumerate(trainers):
agent.experience(obs_n[i], action_n[i], rew_n[i], new_obs_n[i], done_n[i], terminal)
obs_n = new_obs_n
for i, rew in enumerate(rew_n):
episode_rewards[-1] += rew
agent_rewards[i][-1] += rew
# print('Action is {}'.format(action_n))
if done or terminal:
# print('episode: {}, reward: {}'.format(len(episode_rewards), episode_rewards[-1]))
obs_n = env.reset()
episode_step = 0
episode_rewards.append(0)
for a in agent_rewards:
a.append(0)
agent_info.append([[]])
# increment global step counter
train_step += 1
# for displaying learned policies
if arglist.display:
time.sleep(0.1)
env.render()
continue
# update all trainers, if not in display or benchmark mode
loss = None
for agent in trainers:
agent.preupdate()
for agent in trainers:
loss = agent.update(trainers, train_step)
# save model, display training output
if (terminal or done) and (len(episode_rewards) % arglist.save_rate == 0) and not (arglist.display or arglist.restore):
U.save_state(arglist.save_dir, saver=saver)
# print statement depends on whether or not there are adversaries
if num_adversaries == 0:
print("steps: {}, episodes: {}, mean episode reward: {}, time: {}".format(
train_step, len(episode_rewards), np.mean(episode_rewards[-arglist.save_rate:]), round(time.time() - t_start, 3)))
else:
print("steps: {}, episodes: {}, mean episode reward: {}, agent episode reward: {}, time: {}".format(
train_step, len(episode_rewards), np.mean(
episode_rewards[-arglist.save_rate:]),
[np.mean(rew[-arglist.save_rate:]) for rew in agent_rewards], round(time.time() - t_start, 3)))
t_start = time.time()
# Keep track of final episode reward
final_ep_rewards.append(np.mean(episode_rewards[-arglist.save_rate:]))
for rew in agent_rewards:
final_ep_ag_rewards.append(np.mean(rew[-arglist.save_rate:]))
# saves final episode reward for plotting training curve later
if len(episode_rewards) >= arglist.num_episodes:
print('average reward is: {}'.format(np.mean(episode_rewards)))
rew_file_name = arglist.plots_dir + arglist.exp_name + '_rewards.pkl'
create_if_not_exist(rew_file_name)
with open(rew_file_name, 'wb') as fp:
pickle.dump(final_ep_rewards, fp)
agrew_file_name = arglist.plots_dir + arglist.exp_name + '_agrewards.pkl'
create_if_not_exist(agrew_file_name)
with open(agrew_file_name, 'wb') as fp:
pickle.dump(final_ep_ag_rewards, fp)
print('...Finished total of {} episodes.'.format(len(episode_rewards)))
break
if __name__ == '__main__':
arglist = parse_args()
train(arglist)
| [
"pickle.dump",
"gym.make",
"argparse.ArgumentParser",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.train.Saver",
"maddpg.common.tf_util.save_state",
"os.path.dirname",
"maddpg.common.tf_util.single_threaded_session",
"tensorflow.variable_scope",
"numpy.clip",
"time.time",
"time.sle... | [((345, 439), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Reinforcement Learning experiments for multiagent environments"""'], {}), "(\n 'Reinforcement Learning experiments for multiagent environments')\n", (368, 439), False, 'import argparse\n'), ((3465, 3489), 'gym.make', 'gym.make', (['"""MiningEnv-v0"""'], {}), "('MiningEnv-v0')\n", (3473, 3489), False, 'import gym\n'), ((3002, 3039), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'reuse': 'reuse'}), '(scope, reuse=reuse)\n', (3019, 3039), True, 'import tensorflow as tf\n'), ((3075, 3151), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['out'], {'num_outputs': 'num_units', 'activation_fn': 'tf.nn.relu'}), '(out, num_outputs=num_units, activation_fn=tf.nn.relu)\n', (3097, 3151), True, 'import tensorflow.contrib.layers as layers\n'), ((3166, 3242), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['out'], {'num_outputs': 'num_units', 'activation_fn': 'tf.nn.relu'}), '(out, num_outputs=num_units, activation_fn=tf.nn.relu)\n', (3188, 3242), True, 'import tensorflow.contrib.layers as layers\n'), ((3257, 3343), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['out'], {'num_outputs': 'num_outputs', 'activation_fn': 'tf.nn.sigmoid'}), '(out, num_outputs=num_outputs, activation_fn=tf.nn.\n sigmoid)\n', (3279, 3343), True, 'import tensorflow.contrib.layers as layers\n'), ((4268, 4295), 'maddpg.common.tf_util.single_threaded_session', 'U.single_threaded_session', ([], {}), '()\n', (4293, 4295), True, 'import maddpg.common.tf_util as U\n'), ((4694, 4708), 'maddpg.common.tf_util.initialize', 'U.initialize', ([], {}), '()\n', (4706, 4708), True, 'import maddpg.common.tf_util as U\n'), ((5426, 5442), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5440, 5442), True, 'import tensorflow as tf\n'), ((5564, 5575), 'time.time', 'time.time', ([], {}), '()\n', (5573, 5575), False, 'import time\n'), ((4018, 4043), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (4033, 4043), False, 'import os\n'), ((4966, 4996), 'maddpg.common.tf_util.load_state', 'U.load_state', (['arglist.load_dir'], {}), '(arglist.load_dir)\n', (4978, 4996), True, 'import maddpg.common.tf_util as U\n'), ((5902, 5972), 'numpy.clip', 'np.clip', (['truck_trgt', 'env.action_space[0].low', 'env.action_space[0].high'], {}), '(truck_trgt, env.action_space[0].low, env.action_space[0].high)\n', (5909, 5972), True, 'import numpy as np\n'), ((6016, 6087), 'numpy.clip', 'np.clip', (['excvtr_trgt', 'env.action_space[1].low', 'env.action_space[1].high'], {}), '(excvtr_trgt, env.action_space[1].low, env.action_space[1].high)\n', (6023, 6087), True, 'import numpy as np\n'), ((4083, 4108), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (4098, 4108), False, 'import os\n'), ((7349, 7364), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (7359, 7364), False, 'import time\n'), ((7876, 7919), 'maddpg.common.tf_util.save_state', 'U.save_state', (['arglist.save_dir'], {'saver': 'saver'}), '(arglist.save_dir, saver=saver)\n', (7888, 7919), True, 'import maddpg.common.tf_util as U\n'), ((8700, 8711), 'time.time', 'time.time', ([], {}), '()\n', (8709, 8711), False, 'import time\n'), ((8805, 8850), 'numpy.mean', 'np.mean', (['episode_rewards[-arglist.save_rate:]'], {}), '(episode_rewards[-arglist.save_rate:])\n', (8812, 8850), True, 'import numpy as np\n'), ((9404, 9437), 'pickle.dump', 'pickle.dump', (['final_ep_rewards', 'fp'], {}), '(final_ep_rewards, fp)\n', (9415, 9437), False, 'import pickle\n'), ((9658, 9694), 'pickle.dump', 'pickle.dump', (['final_ep_ag_rewards', 'fp'], {}), '(final_ep_ag_rewards, fp)\n', (9669, 9694), False, 'import pickle\n'), ((8941, 8974), 'numpy.mean', 'np.mean', (['rew[-arglist.save_rate:]'], {}), '(rew[-arglist.save_rate:])\n', (8948, 8974), True, 'import numpy as np\n'), ((9166, 9190), 'numpy.mean', 'np.mean', (['episode_rewards'], {}), '(episode_rewards)\n', (9173, 9190), True, 'import numpy as np\n'), ((8196, 8241), 'numpy.mean', 'np.mean', (['episode_rewards[-arglist.save_rate:]'], {}), '(episode_rewards[-arglist.save_rate:])\n', (8203, 8241), True, 'import numpy as np\n'), ((8478, 8523), 'numpy.mean', 'np.mean', (['episode_rewards[-arglist.save_rate:]'], {}), '(episode_rewards[-arglist.save_rate:])\n', (8485, 8523), True, 'import numpy as np\n'), ((8579, 8612), 'numpy.mean', 'np.mean', (['rew[-arglist.save_rate:]'], {}), '(rew[-arglist.save_rate:])\n', (8586, 8612), True, 'import numpy as np\n'), ((8249, 8260), 'time.time', 'time.time', ([], {}), '()\n', (8258, 8260), False, 'import time\n'), ((8646, 8657), 'time.time', 'time.time', ([], {}), '()\n', (8655, 8657), False, 'import time\n')] |
#
# Copyright (C) 2021 <NAME> and GHOST contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
def optimize_threshold_from_predictions(labels, probs, thresholds,
ThOpt_metrics = 'Kappa', N_subsets = 100,
subsets_size = 0.2, with_replacement = False, random_seed = None):
""" Optimize the decision threshold based on subsets of the training set.
The threshold that maximizes the Cohen's kappa coefficient or a ROC-based criterion
on the training subsets is chosen as optimal.
Parameters
----------
labels: sequence of ints
True labels for the training set
probs: sequence of floats
predicted probabilities for minority class from the training set
(e.g. output from cls.predict_proba(data)[:,1])
thresholds: list of floats
List of decision thresholds to screen for classification
ThOpt_metrics: str
Optimization metric. Choose between "Kappa" and "ROC"
N_subsets: int
Number of training subsets to use in the optimization
subsets_size: float or int
Size of the subsets. if float, represents the proportion of the dataset to include in the subsets.
If integer, it represents the actual number of instances to include in the subsets.
with_replacement: bool
The subsets are drawn randomly. True to draw the subsets with replacement
random_seed: int
random number to seed the drawing of the subsets
Returns
----------
thresh: float
Optimal decision threshold for classification
"""
# seeding
np.random.seed(random_seed)
random_seeds = np.random.randint(N_subsets*10, size=N_subsets)
df_preds = pd.DataFrame({'labels':labels,'probs':probs})
thresh_names = [str(x) for x in thresholds]
for thresh in thresholds:
df_preds[str(thresh)] = [1 if x>=thresh else 0 for x in probs]
# Optmize the decision threshold based on the Cohen's Kappa coefficient
if ThOpt_metrics == 'Kappa':
# pick N_subsets training subsets and determine the threshold that provides the highest kappa on each
# of the subsets
kappa_accum = []
for i in range(N_subsets):
if with_replacement:
if isinstance(subsets_size, float):
Nsamples = int(df_preds.shape[0]*subsets_size)
elif isinstance(subsets_size, int):
Nsamples = subsets_size
df_subset = resample(df_preds, replace=True, n_samples = Nsamples, stratify=labels, random_state = random_seeds[i])
labels_subset = df_subset['labels']
else:
df_tmp, df_subset, labels_tmp, labels_subset = train_test_split(df_preds, labels, test_size = subsets_size, stratify = labels, random_state = random_seeds[i])
kappa_train_subset = []
for col1 in thresh_names:
kappa_train_subset.append(metrics.cohen_kappa_score(labels_subset, list(df_subset[col1])))
kappa_accum.append(kappa_train_subset)
# determine the threshold that provides the best results on the training subsets
y_values_median, y_values_std = helper_calc_median_std(kappa_accum)
opt_thresh = thresholds[np.argmax(y_values_median)]
# Optmize the decision threshold based on the ROC-curve, as described here https://doi.org/10.1007/s11548-013-0913-8
elif ThOpt_metrics == 'ROC':
sensitivity_accum = []
specificity_accum = []
# Calculate sensitivity and specificity for a range of thresholds and N_subsets
for i in range(N_subsets):
if with_replacement:
if isinstance(subsets_size, float):
Nsamples = int(df_preds.shape[0]*subsets_size)
elif isinstance(subsets_size, int):
Nsamples = subsets_size
df_subset = resample(df_preds, n_samples = Nsamples, stratify=labels, random_state = random_seeds[i])
labels_subset = list(df_subset['labels'])
else:
df_tmp, df_subset, labels_tmp, labels_subset = train_test_split(df_preds, labels, test_size = subsets_size, stratify = labels, random_state = random_seeds[i])
sensitivity = []
specificity = []
for thresh in thresholds:
scores = [1 if x >= thresh else 0 for x in df_subset['probs']]
tn, fp, fn, tp = metrics.confusion_matrix(labels_subset, scores, labels=sorted(set(labels))).ravel()
sensitivity.append(tp/(tp+fn))
specificity.append(tn/(tn+fp))
sensitivity_accum.append(sensitivity)
specificity_accum.append(specificity)
# determine the threshold that provides the best results on the training subsets
median_sensitivity, std_sensitivity = helper_calc_median_std(sensitivity_accum)
median_specificity, std_specificity = helper_calc_median_std(specificity_accum)
roc_dist_01corner = (2*median_sensitivity*median_specificity)/(median_sensitivity+median_specificity)
opt_thresh = thresholds[np.argmax(roc_dist_01corner)]
return opt_thresh
def optimize_threshold_from_oob_predictions(labels_train, oob_probs, thresholds, ThOpt_metrics = 'Kappa'):
"""Optimize the decision threshold based on the prediction probabilities of the out-of-bag set of random forest.
The threshold that maximizes the Cohen's kappa coefficient or a ROC-based criterion
on the out-of-bag set is chosen as optimal.
Parameters
----------
labels_train: list of int
True labels for the training set
oob_probs : list of floats
Majority class prediction probabilities for the out-of-bag set of a trained random forest model
thresholds: list of floats
List of decision thresholds to screen for classification
ThOpt_metrics: str
Optimization metric. Choose between "Kappa" and "ROC"
Returns
----------
thresh: float
Optimal decision threshold for classification
"""
# Optmize the decision threshold based on the Cohen's Kappa coefficient
if ThOpt_metrics == 'Kappa':
tscores = []
# evaluate the score on the oob using different thresholds
for thresh in thresholds:
scores = [1 if x>=thresh else 0 for x in oob_probs]
kappa = metrics.cohen_kappa_score(labels_train,scores)
tscores.append((np.round(kappa,3),thresh))
# select the threshold providing the highest kappa score as optimal
tscores.sort(reverse=True)
thresh = tscores[0][-1]
# Optmize the decision threshold based on the ROC-curve
elif ThOpt_metrics == 'ROC':
# ROC optimization with thresholds determined by the roc_curve function of sklearn
fpr, tpr, thresholds_roc = metrics.roc_curve(labels_train, oob_probs, pos_label=1)
specificity = 1-fpr
roc_dist_01corner = (2*tpr*specificity)/(tpr+specificity)
thresh = thresholds_roc[np.argmax(roc_dist_01corner)]
return thresh
def helper_calc_median_std(specificity):
# Calculate median and std of the columns of a pandas dataframe
arr = np.array(specificity)
y_values_median = np.median(arr,axis=0)
y_values_std = np.std(arr,axis=0)
return y_values_median, y_values_std
| [
"pandas.DataFrame",
"numpy.random.seed",
"sklearn.metrics.roc_curve",
"numpy.argmax",
"numpy.median",
"numpy.std",
"sklearn.model_selection.train_test_split",
"numpy.random.randint",
"numpy.array",
"sklearn.metrics.cohen_kappa_score",
"sklearn.utils.resample",
"numpy.round"
] | [((2815, 2842), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (2829, 2842), True, 'import numpy as np\n'), ((2862, 2911), 'numpy.random.randint', 'np.random.randint', (['(N_subsets * 10)'], {'size': 'N_subsets'}), '(N_subsets * 10, size=N_subsets)\n', (2879, 2911), True, 'import numpy as np\n'), ((2932, 2980), 'pandas.DataFrame', 'pd.DataFrame', (["{'labels': labels, 'probs': probs}"], {}), "({'labels': labels, 'probs': probs})\n", (2944, 2980), True, 'import pandas as pd\n'), ((8487, 8508), 'numpy.array', 'np.array', (['specificity'], {}), '(specificity)\n', (8495, 8508), True, 'import numpy as np\n'), ((8531, 8553), 'numpy.median', 'np.median', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (8540, 8553), True, 'import numpy as np\n'), ((8572, 8591), 'numpy.std', 'np.std', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (8578, 8591), True, 'import numpy as np\n'), ((4506, 4532), 'numpy.argmax', 'np.argmax', (['y_values_median'], {}), '(y_values_median)\n', (4515, 4532), True, 'import numpy as np\n'), ((7671, 7718), 'sklearn.metrics.cohen_kappa_score', 'metrics.cohen_kappa_score', (['labels_train', 'scores'], {}), '(labels_train, scores)\n', (7696, 7718), False, 'from sklearn import metrics\n'), ((8135, 8190), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['labels_train', 'oob_probs'], {'pos_label': '(1)'}), '(labels_train, oob_probs, pos_label=1)\n', (8152, 8190), False, 'from sklearn import metrics\n'), ((3728, 3831), 'sklearn.utils.resample', 'resample', (['df_preds'], {'replace': '(True)', 'n_samples': 'Nsamples', 'stratify': 'labels', 'random_state': 'random_seeds[i]'}), '(df_preds, replace=True, n_samples=Nsamples, stratify=labels,\n random_state=random_seeds[i])\n', (3736, 3831), False, 'from sklearn.utils import resample\n'), ((3965, 4074), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df_preds', 'labels'], {'test_size': 'subsets_size', 'stratify': 'labels', 'random_state': 'random_seeds[i]'}), '(df_preds, labels, test_size=subsets_size, stratify=labels,\n random_state=random_seeds[i])\n', (3981, 4074), False, 'from sklearn.model_selection import train_test_split\n'), ((6403, 6431), 'numpy.argmax', 'np.argmax', (['roc_dist_01corner'], {}), '(roc_dist_01corner)\n', (6412, 6431), True, 'import numpy as np\n'), ((8317, 8345), 'numpy.argmax', 'np.argmax', (['roc_dist_01corner'], {}), '(roc_dist_01corner)\n', (8326, 8345), True, 'import numpy as np\n'), ((5169, 5259), 'sklearn.utils.resample', 'resample', (['df_preds'], {'n_samples': 'Nsamples', 'stratify': 'labels', 'random_state': 'random_seeds[i]'}), '(df_preds, n_samples=Nsamples, stratify=labels, random_state=\n random_seeds[i])\n', (5177, 5259), False, 'from sklearn.utils import resample\n'), ((5398, 5507), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df_preds', 'labels'], {'test_size': 'subsets_size', 'stratify': 'labels', 'random_state': 'random_seeds[i]'}), '(df_preds, labels, test_size=subsets_size, stratify=labels,\n random_state=random_seeds[i])\n', (5414, 5507), False, 'from sklearn.model_selection import train_test_split\n'), ((7746, 7764), 'numpy.round', 'np.round', (['kappa', '(3)'], {}), '(kappa, 3)\n', (7754, 7764), True, 'import numpy as np\n')] |
import numpy as np
class Rms_prop():
"""
Class that implements a RMSProp optimisation.
RMSprop is a very effective, but currently unpublished adaptive learning
rate method.
Amusingly, everyone who uses this method in their work currently cites slide
29 of Lecture 6 of <NAME>on's Coursera class.
The RMSProp update adjusts the Adagrad method in a very simple way in an
attempt to reduce its aggressive, monotonically decreasing learning rate.
Link to the course: http://cs231n.github.io/neural-networks-3/#sgd
"""
def __init__(self, learning_rate = 1e-2, decay_rate = 0.99, epsilon = 1e-8):
"""
Instantiates a RMSProp optimisation.
:param learning_rate: The learning rate to apply.
:type learning_rate: float.
:param decay_rate: The decay rate.
:type decay_rate: float.
:param epsilon: The epsilon hyperparameter.
:type epsilon: float.
"""
self.learning_rate = learning_rate
self.decay_rate = decay_rate
self.epsilon = epsilon
self.cache = None
def update(self, w, dw):
"""
Performs an update of RMSProp.
:param w: The weights.
:type w: A numpy array.
:param dw: The gradients of the weights.
:type dw: A numpy array of the same shape as w.
:return w: The updated weights.
:rtype w: A numpy array of the same shape as w.
"""
#If cache is not initialised it is set at a numpy array full of zeros of
#the same shape as w.
if self.cache is None:
self.cache = np.zeros_like(w)
dr = self.decay_rate
self.cache = dr * self.cache + (1 - dr) * dw**2
w += -self.learning_rate * dw / (np.sqrt(self.cache) + self.epsilon)
return w
| [
"numpy.zeros_like",
"numpy.sqrt"
] | [((1622, 1638), 'numpy.zeros_like', 'np.zeros_like', (['w'], {}), '(w)\n', (1635, 1638), True, 'import numpy as np\n'), ((1765, 1784), 'numpy.sqrt', 'np.sqrt', (['self.cache'], {}), '(self.cache)\n', (1772, 1784), True, 'import numpy as np\n')] |
""" A module for creating videos that show tracks (both in world coordinates and pixel coordinates)
"""
import imageio as iio
import cv2
import numpy as np
from random import shuffle, choice
from bisect import bisect
import click
from os.path import isfile
from tracking import DetTrack
from tracking_world import WorldTrack, WorldTrackingConfig
from storage import load
from apply_mask import Masker
from world import Calibration
from folder import runs_path, datasets_path
from config import DatasetConfig
from util import print_flush, right_remove, left_remove
import os
def get_colors(n=10):
colors = []
for i in range(0, n):
# This can probably be written in a more elegant manner
hue = 255*i/(n+1)
col = np.zeros((1,1,3)).astype("uint8")
col[0][0][0] = hue
col[0][0][1] = 180 # Saturation
col[0][0][2] = 255 # Value
cvcol = cv2.cvtColor(col, cv2.COLOR_HSV2BGR)
col = (int(cvcol[0][0][0]), int(cvcol[0][0][1]), int(cvcol[0][0][2]))
colors.append(col)
shuffle(colors)
return colors
"""
Renders a video of a list of tracks.
- tracks: List of DetTrack objects
- vidpath: Path to a video file, to which tracks will be drawn on
- outvidname: Path to output video file
- fps: Frames per second of output video
- ncols: How many different colors should be used for different tracks
- mask: Either None or a Masker object, which is applied to all frames before drawing (with alpha=0.5)
- id_mode: "global" to show track IDs consistent with other videos from the same dataset, or "local" to make the first track in this video be ID 1
- calib: If None, tracks are assumed to be in pixel coordinates. If a Calibration object (from the world.py module) then tracks are assumed to be in world coordinates and are projected back to pixel coordinates for this visualization
"""
def render_video(tracks, vidpath, outvidname, fps=10, ncols=50, mask=None, id_mode="global", calib=None, map_data_dir=None):
if id_mode == "global":
pass # Keep track IDs as they are
elif id_mode == "local":
# Reset all track IDs, to be locally logical for this video
i = 1
# Sort by first appearance
tracks = sorted(tracks, key= lambda x: x.history[0][0])
for track in tracks:
track.id = i
i += 1
colors = get_colors(ncols)
if map_data_dir:
map_image_file = '{d}/map.png'.format(d=map_data_dir)
tamap_file = '{d}/map.tamap'.format(d=map_data_dir)
if os.path.exists(map_image_file) and os.path.exists(tamap_file):
map_image = cv2.imread(map_image_file)[:,:,[2,1,0]]
for l in open(tamap_file).readlines():
if not l.strip():
break
name, val = l.split(':')
name = name.strip().lower()
val = float(val)
if name == 'x0':
x0 = val
elif name == 'y0':
y0 = val
elif name == 'dx':
dx = val
elif name == 'dy':
dy = val
elif name == 'scale':
scale = val
else:
map_data_dir=None
with iio.get_reader(vidpath) as invid:
with iio.get_writer(outvidname, fps=fps) as outvid:
first_frame = min([x.history[0][0] for x in tracks])
last_frame = max([x.history[-1][0] for x in tracks])
first_frame = max(first_frame, 1)
for i in range(first_frame, last_frame + 1):
frame = invid.get_data(i-1)
if not (mask is None):
frame = mask.mask(frame, alpha=0.5)
if map_data_dir:
frame_map = map_image.copy()
for track in tracks:
if calib is None:
draw(frame, track, i, colors[track.id%ncols])
else:
hist, text = draw_world(frame, track, i, colors[track.id%ncols], calib)
if hist is not None and map_data_dir:
x, y = hist[2:4]
# x, y = int(scale*(x - x0)), int(scale*(y - y0))
nx = int((((x - x0) * dx) + ((y - y0) * dy)) * scale)
ny = int(((-(x - x0) * dy) + ((y - y0) * dx)) * scale)
_draw_world(frame_map, text, nx, ny, colors[track.id%ncols])
cv2.putText(frame, 'frame: {}'.format(i), (10,20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)
if map_data_dir:
frame = np.concatenate((frame, frame_map), 1)
outvid.append_data(frame)
def clamp(x, lower, upper):
return sorted((lower, x, upper))[1]
def draw_world(to_draw, track, frame_number, color, calib):
history = track.history
fnums = [x[0] for x in history]
if (frame_number >= fnums[0]) and (frame_number <= fnums[-1]):
hist = history[bisect(fnums, frame_number)-1]
x, y = hist[2:4]
x, y = calib.to_pixels(x, y, 0)
x, y = map(int, (x, y))
text = "{} {}".format(track.cn, track.id)
to_draw = _draw_world(to_draw, text, x, y, color)
return hist, text
return None, None
def _draw_world(to_draw, text, x, y, color):
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.3
text_size = cv2.getTextSize(text, font, font_scale, 1)
text_top = (x, y-10)
text_bot = (x + text_size[0][0]+10, y-5+text_size[0][1])
text_pos = (x + 5, y-2)
cv2.rectangle(to_draw, text_top, text_bot, color, -1)
cv2.putText(to_draw, text, text_pos, font, font_scale, (0,0,0), 1, cv2.LINE_AA)
if 2 <= x < to_draw.shape[1]-2 and 2 <= x < to_draw.shape[0]-2:
to_draw[y-2:y+2, x-2:x+2] = (255,0,0)
return to_draw
def draw(to_draw, track, frame_number, color):
brightness = [1.8, 1.5, 1.2, 0.9, 0.7]
hists = [hist for hist in track.history if hist[0] == frame_number]
if not hists:
return
hist = None
for loop_hist in hists:
if loop_hist[5]:
hist = loop_hist
if hist is None:
hist = choice(hists)
bonustext = ""
if frame_number in track.klt_checkpoints:
klt_checkpoint = track.klt_checkpoints[frame_number]
for i_klt, k in klt_checkpoint:
kx = int(k[0])
ky = int(k[1])
bright = brightness[(i_klt%len(brightness))]
klt_color = tuple([clamp(int(bright*c),0,255) for c in color])
cv2.circle(to_draw, (kx, ky), 2, klt_color, -1)
x = hist[1]
y = hist[2]
w = hist[3]
h = hist[4]
xmin = int(x - w/2)
ymin = int(y - h/2)
xmax = int(x + w/2)
ymax = int(y + h/2)
linewidth = 1
if hist[5]:
linewidth = 3
cv2.rectangle(to_draw, (xmin, ymin), (xmax, ymax), color, linewidth)
text = "{} {} {}".format(track.c, track.id, bonustext)
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.3
text_size = cv2.getTextSize(text, font, font_scale, 1)
text_top = (xmin, ymin-10)
text_bot = (xmin + text_size[0][0]+10, ymin-5+text_size[0][1])
text_pos = (xmin + 5, ymin-2)
cv2.rectangle(to_draw, text_top, text_bot, color, -1)
cv2.putText(to_draw, text, text_pos, font, font_scale, (0,0,0), 1, cv2.LINE_AA)
@click.command()
@click.option("--dataset", type=str, help="Name of dataset")
@click.option("--run", type=str, help="Name of run")
@click.option("--videos", type=str, help="Either some videos separated by commas without spaces (or a single video), or, 'all' or 'random:X' where X is a number")
def main(dataset, run, videos):
# Note: This main function only works for world coordinate tracks!
calib = Calibration(dataset)
dc = DatasetConfig(dataset)
masker = Masker(dataset)
if videos == 'all':
from glob import glob
files = glob('{rp}{ds}_{r}/tracks_world/*_tracks.pklz'.format(rp=runs_path, ds=dataset, r=run))
video_names = [right_remove(x.split('/')[-1], '_tracks.pklz') for x in files]
elif videos.startswith('random:'):
num = int(left_remove(videos, 'random:'))
from glob import glob
files = glob('{rp}{ds}_{r}/tracks_world/*_tracks.pklz'.format(rp=runs_path, ds=dataset, r=run))
all_video_names = [right_remove(x.split('/')[-1], '_tracks.pklz') for x in files]
video_names = []
while len(video_names) < num:
video_name = choice(all_video_names)
if not video_name in video_names:
video_names.append(video_name)
# Just in case user wants more videos than there are
if len(video_names) == len(all_video_names):
break
else:
# Assumes the user types one or more videos, separated by commas with no spaces
video_names = videos.split(',')
# In case user includes endings
video_names = [right_remove(x.rstrip, '.mkv') for x in video_names]
# In case user includes spaces
video_names = [x.strip(' ') for x in video_names]
print_flush("Chosen videos: ")
print_flush(str(video_names))
for video_name in video_names:
print_flush(video_name)
print_flush("Loading...")
tracks = load('{rp}{ds}_{r}/tracks_world/{v}_tracks.pklz'.format(rp=runs_path, ds=dataset, r=run, v=video_name))
vidpath = "{dsp}{ds}/videos/{v}.mkv".format(dsp=datasets_path, ds=dataset, v=video_name)
if not isfile(vidpath):
raise(ValueError("Incorrect input {}".format(videos)))
outvidpath = '{rp}{ds}_{r}/tracks_world/{v}_tracks.mp4'.format(rp=runs_path, ds=dataset, r=run, v=video_name)
print_flush("Rendering...")
render_video(tracks, vidpath, outvidpath, mask=masker, id_mode="global", calib=calib, fps=dc.get('video_fps'))
print_flush("Done!")
if __name__ == '__main__':
main()
| [
"random.shuffle",
"click.option",
"os.path.isfile",
"cv2.rectangle",
"cv2.cvtColor",
"util.left_remove",
"os.path.exists",
"click.command",
"util.right_remove",
"util.print_flush",
"apply_mask.Masker",
"imageio.get_reader",
"cv2.circle",
"bisect.bisect",
"imageio.get_writer",
"numpy.co... | [((7560, 7575), 'click.command', 'click.command', ([], {}), '()\n', (7573, 7575), False, 'import click\n'), ((7577, 7636), 'click.option', 'click.option', (['"""--dataset"""'], {'type': 'str', 'help': '"""Name of dataset"""'}), "('--dataset', type=str, help='Name of dataset')\n", (7589, 7636), False, 'import click\n'), ((7638, 7689), 'click.option', 'click.option', (['"""--run"""'], {'type': 'str', 'help': '"""Name of run"""'}), "('--run', type=str, help='Name of run')\n", (7650, 7689), False, 'import click\n'), ((7691, 7862), 'click.option', 'click.option', (['"""--videos"""'], {'type': 'str', 'help': '"""Either some videos separated by commas without spaces (or a single video), or, \'all\' or \'random:X\' where X is a number"""'}), '(\'--videos\', type=str, help=\n "Either some videos separated by commas without spaces (or a single video), or, \'all\' or \'random:X\' where X is a number"\n )\n', (7703, 7862), False, 'import click\n'), ((1052, 1067), 'random.shuffle', 'shuffle', (['colors'], {}), '(colors)\n', (1059, 1067), False, 'from random import shuffle, choice\n'), ((5536, 5578), 'cv2.getTextSize', 'cv2.getTextSize', (['text', 'font', 'font_scale', '(1)'], {}), '(text, font, font_scale, 1)\n', (5551, 5578), False, 'import cv2\n'), ((5697, 5750), 'cv2.rectangle', 'cv2.rectangle', (['to_draw', 'text_top', 'text_bot', 'color', '(-1)'], {}), '(to_draw, text_top, text_bot, color, -1)\n', (5710, 5750), False, 'import cv2\n'), ((5763, 5849), 'cv2.putText', 'cv2.putText', (['to_draw', 'text', 'text_pos', 'font', 'font_scale', '(0, 0, 0)', '(1)', 'cv2.LINE_AA'], {}), '(to_draw, text, text_pos, font, font_scale, (0, 0, 0), 1, cv2.\n LINE_AA)\n', (5774, 5849), False, 'import cv2\n'), ((7024, 7092), 'cv2.rectangle', 'cv2.rectangle', (['to_draw', '(xmin, ymin)', '(xmax, ymax)', 'color', 'linewidth'], {}), '(to_draw, (xmin, ymin), (xmax, ymax), color, linewidth)\n', (7037, 7092), False, 'import cv2\n'), ((7233, 7275), 'cv2.getTextSize', 'cv2.getTextSize', (['text', 'font', 'font_scale', '(1)'], {}), '(text, font, font_scale, 1)\n', (7248, 7275), False, 'import cv2\n'), ((7412, 7465), 'cv2.rectangle', 'cv2.rectangle', (['to_draw', 'text_top', 'text_bot', 'color', '(-1)'], {}), '(to_draw, text_top, text_bot, color, -1)\n', (7425, 7465), False, 'import cv2\n'), ((7478, 7564), 'cv2.putText', 'cv2.putText', (['to_draw', 'text', 'text_pos', 'font', 'font_scale', '(0, 0, 0)', '(1)', 'cv2.LINE_AA'], {}), '(to_draw, text, text_pos, font, font_scale, (0, 0, 0), 1, cv2.\n LINE_AA)\n', (7489, 7564), False, 'import cv2\n'), ((7973, 7993), 'world.Calibration', 'Calibration', (['dataset'], {}), '(dataset)\n', (7984, 7993), False, 'from world import Calibration\n'), ((8003, 8025), 'config.DatasetConfig', 'DatasetConfig', (['dataset'], {}), '(dataset)\n', (8016, 8025), False, 'from config import DatasetConfig\n'), ((8039, 8054), 'apply_mask.Masker', 'Masker', (['dataset'], {}), '(dataset)\n', (8045, 8054), False, 'from apply_mask import Masker\n'), ((9384, 9414), 'util.print_flush', 'print_flush', (['"""Chosen videos: """'], {}), "('Chosen videos: ')\n", (9395, 9414), False, 'from util import print_flush, right_remove, left_remove\n'), ((10176, 10196), 'util.print_flush', 'print_flush', (['"""Done!"""'], {}), "('Done!')\n", (10187, 10196), False, 'from util import print_flush, right_remove, left_remove\n'), ((897, 933), 'cv2.cvtColor', 'cv2.cvtColor', (['col', 'cv2.COLOR_HSV2BGR'], {}), '(col, cv2.COLOR_HSV2BGR)\n', (909, 933), False, 'import cv2\n'), ((3314, 3337), 'imageio.get_reader', 'iio.get_reader', (['vidpath'], {}), '(vidpath)\n', (3328, 3337), True, 'import imageio as iio\n'), ((6342, 6355), 'random.choice', 'choice', (['hists'], {}), '(hists)\n', (6348, 6355), False, 'from random import shuffle, choice\n'), ((9492, 9515), 'util.print_flush', 'print_flush', (['video_name'], {}), '(video_name)\n', (9503, 9515), False, 'from util import print_flush, right_remove, left_remove\n'), ((9524, 9549), 'util.print_flush', 'print_flush', (['"""Loading..."""'], {}), "('Loading...')\n", (9535, 9549), False, 'from util import print_flush, right_remove, left_remove\n'), ((10020, 10047), 'util.print_flush', 'print_flush', (['"""Rendering..."""'], {}), "('Rendering...')\n", (10031, 10047), False, 'from util import print_flush, right_remove, left_remove\n'), ((2580, 2610), 'os.path.exists', 'os.path.exists', (['map_image_file'], {}), '(map_image_file)\n', (2594, 2610), False, 'import os\n'), ((2615, 2641), 'os.path.exists', 'os.path.exists', (['tamap_file'], {}), '(tamap_file)\n', (2629, 2641), False, 'import os\n'), ((3361, 3396), 'imageio.get_writer', 'iio.get_writer', (['outvidname'], {'fps': 'fps'}), '(outvidname, fps=fps)\n', (3375, 3396), True, 'import imageio as iio\n'), ((6735, 6782), 'cv2.circle', 'cv2.circle', (['to_draw', '(kx, ky)', '(2)', 'klt_color', '(-1)'], {}), '(to_draw, (kx, ky), 2, klt_color, -1)\n', (6745, 6782), False, 'import cv2\n'), ((9792, 9807), 'os.path.isfile', 'isfile', (['vidpath'], {}), '(vidpath)\n', (9798, 9807), False, 'from os.path import isfile\n'), ((745, 764), 'numpy.zeros', 'np.zeros', (['(1, 1, 3)'], {}), '((1, 1, 3))\n', (753, 764), True, 'import numpy as np\n'), ((2667, 2693), 'cv2.imread', 'cv2.imread', (['map_image_file'], {}), '(map_image_file)\n', (2677, 2693), False, 'import cv2\n'), ((5125, 5152), 'bisect.bisect', 'bisect', (['fnums', 'frame_number'], {}), '(fnums, frame_number)\n', (5131, 5152), False, 'from bisect import bisect\n'), ((8361, 8391), 'util.left_remove', 'left_remove', (['videos', '"""random:"""'], {}), "(videos, 'random:')\n", (8372, 8391), False, 'from util import print_flush, right_remove, left_remove\n'), ((8723, 8746), 'random.choice', 'choice', (['all_video_names'], {}), '(all_video_names)\n', (8729, 8746), False, 'from random import shuffle, choice\n'), ((9216, 9246), 'util.right_remove', 'right_remove', (['x.rstrip', '""".mkv"""'], {}), "(x.rstrip, '.mkv')\n", (9228, 9246), False, 'from util import print_flush, right_remove, left_remove\n'), ((4761, 4798), 'numpy.concatenate', 'np.concatenate', (['(frame, frame_map)', '(1)'], {}), '((frame, frame_map), 1)\n', (4775, 4798), True, 'import numpy as np\n')] |
import gurobipy as gp
import numpy as np
from mip.mip_nn import MIP_NN
from globals import EPSILON, CONT, GUROBI_ENV, LOG
class MAX_M(MIP_NN):
def __init__(self, data, architecture, bound, reg, fair):
model = gp.Model("Gurobi_NN", env=GUROBI_ENV)
if not LOG:
model.setParam("OutputFlag", 0)
self.N = len(data["train_x"])
self.architecture = architecture
self.data = data
self.train_x = data["train_x"]
self.oh_train_y = data["oh_train_y"]
self.bound = bound
self.reg = reg
self.fair = fair
self.m = model
self.init_params()
self.add_margins()
self.add_examples()
self.add_output_constraints()
self.calc_objective()
self.cutoff = 0
def add_margins(self):
self.margins = {}
for lastLayer, neurons_out in enumerate(self.architecture[1:]):
layer = lastLayer + 1
self.margins[layer] = np.full(neurons_out, None)
for j in range(neurons_out):
self.margins[layer][j] = self.add_var(CONT,"margin_%s-%s" % (layer,j), lb=0)
def add_examples(self):
for lastLayer, neurons_out in enumerate(self.architecture[1:]):
layer = lastLayer + 1
neurons_in = self.architecture[lastLayer]
for k in range(self.N):
for j in range(neurons_out):
inputs = []
for i in range(neurons_in):
if layer == 1:
inputs.append(self.train_x[k,i]*self.weights[layer][i,j])
else:
self.add_constraint(self.var_c[layer][k,i,j] - self.weights[layer][i,j] + 2*self.bound*self.act[lastLayer][k,i] <= 2*self.bound)
self.add_constraint(self.var_c[layer][k,i,j] + self.weights[layer][i,j] - 2*self.bound*self.act[lastLayer][k,i] <= 0*self.bound)
self.add_constraint(self.var_c[layer][k,i,j] - self.weights[layer][i,j] - 2*self.bound*self.act[lastLayer][k,i] >= -2*self.bound)
self.add_constraint(self.var_c[layer][k,i,j] + self.weights[layer][i,j] + 2*self.bound*self.act[lastLayer][k,i] >= 0*self.bound)
inputs.append(self.var_c[layer][k,i,j])
pre_activation = sum(inputs) + self.biases[layer][j]
if layer < len(self.architecture) - 1:
self.add_constraint((self.act[layer][k,j] == 1) >> (pre_activation >= self.margins[layer][j]))
self.add_constraint((self.act[layer][k,j] == 0) >> (pre_activation <= -EPSILON - self.margins[layer][j]))
def add_output_constraints(self):
layer = len(self.architecture) - 1
lastLayer = layer - 1
neurons_in = self.architecture[lastLayer]
neurons_out = self.architecture[layer]
for k in range(self.N):
for j in range(neurons_out):
inputs = []
for i in range(neurons_in):
if layer == 1:
inputs.append(self.train_x[k,i]*self.weights[layer][i,j])
else:
inputs.append(self.var_c[layer][k,i,j])
pre_activation = sum(inputs) + self.biases[layer][j]
if self.oh_train_y[k,j] > 0:
self.add_constraint(pre_activation >= self.margins[layer][j])
else:
self.add_constraint(pre_activation <= -EPSILON - self.margins[layer][j])
def calc_objective(self):
self.obj = 0
for layer in self.margins:
self.obj += self.margins[layer].sum()
self.set_objective(sense="max")
def extract_values(self, get_func=lambda z: z.x):
varMatrices = MIP_NN.extract_values(self, get_func)
for layer in self.margins:
varMatrices["margins_%s" % layer] = self.get_val(self.margins[layer], get_func)
return varMatrices | [
"numpy.full",
"mip.mip_nn.MIP_NN.extract_values",
"gurobipy.Model"
] | [((217, 254), 'gurobipy.Model', 'gp.Model', (['"""Gurobi_NN"""'], {'env': 'GUROBI_ENV'}), "('Gurobi_NN', env=GUROBI_ENV)\n", (225, 254), True, 'import gurobipy as gp\n'), ((3393, 3430), 'mip.mip_nn.MIP_NN.extract_values', 'MIP_NN.extract_values', (['self', 'get_func'], {}), '(self, get_func)\n', (3414, 3430), False, 'from mip.mip_nn import MIP_NN\n'), ((887, 913), 'numpy.full', 'np.full', (['neurons_out', 'None'], {}), '(neurons_out, None)\n', (894, 913), True, 'import numpy as np\n')] |
"""Utility functions."""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
import numpy as np
from .externals.mne import _validate_type
def _hammfilt(x, winsz):
"""Convolve with a hamming window."""
assert len(x) > winsz
win = np.hamming(winsz)
win /= sum(win)
return np.convolve(x, win, 'same')
# Savitzky-Golay filtering, lifted and adapted from mne-python (0.22)
def _savgol_filter(data, h_freq, sfreq):
"""Filter the data using Savitzky-Golay polynomial method.
Parameters
----------
data : array-like
The data to filter (1D)
h_freq : float
Approximate high cutoff frequency in Hz. Note that this
is not an exact cutoff, since Savitzky-Golay filtering
is done using polynomial fits
instead of FIR/IIR filtering. This parameter is thus used to
determine the length of the window over which a 5th-order
polynomial smoothing is applied.
sfreq : float
The sampling frequency (in Hz)
Returns
-------
filt_data : array-like
The filtered data
""" # noqa: E501
from scipy.signal import savgol_filter
_validate_type(sfreq, (float, int), 'sfreq')
assert sfreq > 0.
_validate_type(h_freq, (float, int), 'h_freq')
assert h_freq > 0.
h_freq = float(h_freq)
if h_freq >= sfreq / 2.:
raise ValueError('h_freq must be less than half the sample rate')
# savitzky-golay filtering
window_length = (int(np.round(sfreq / h_freq)) // 2) * 2 + 1
# loop over 'agg', 'L2', and 'L5'
filt_data = savgol_filter(data, axis=-1, polyorder=5,
window_length=window_length)
return filt_data
def smooth_waveform(data, window_len, sfreq):
"""Smooth an arbitrary waveform using Hamming-windowed convolution
Parameters
----------
data : list | np.ndarray
The data to filter
window_len : float
The length (in ms) of a `~numpy.hamming` window to convolve the
data with.
sfreq : float
The data sampling rate.
Returns
-------
data_filt : np.ndarray
The filtered data
"""
if ((isinstance(data, np.ndarray) and data.ndim > 1) or
(isinstance(data, list) and isinstance(data[0], list))):
raise RuntimeError('smoothing currently only supported for 1D-arrays')
if not isinstance(window_len, (float, int)) or window_len < 0:
raise ValueError('Window length must be a non-negative number')
elif 0 < window_len < 1:
raise ValueError('Window length less than 1 ms is not supported')
_validate_type(sfreq, (float, int), 'sfreq')
assert sfreq > 0.
# convolutional filter length is given in samples
winsz = np.round(1e-3 * window_len * sfreq)
if winsz > len(data):
raise ValueError(
f'Window length too long: {winsz} samples; data length is '
f'{len(data)} samples')
return _hammfilt(data, winsz)
| [
"numpy.convolve",
"scipy.signal.savgol_filter",
"numpy.round",
"numpy.hamming"
] | [((277, 294), 'numpy.hamming', 'np.hamming', (['winsz'], {}), '(winsz)\n', (287, 294), True, 'import numpy as np\n'), ((326, 353), 'numpy.convolve', 'np.convolve', (['x', 'win', '"""same"""'], {}), "(x, win, 'same')\n", (337, 353), True, 'import numpy as np\n'), ((1603, 1673), 'scipy.signal.savgol_filter', 'savgol_filter', (['data'], {'axis': '(-1)', 'polyorder': '(5)', 'window_length': 'window_length'}), '(data, axis=-1, polyorder=5, window_length=window_length)\n', (1616, 1673), False, 'from scipy.signal import savgol_filter\n'), ((2770, 2806), 'numpy.round', 'np.round', (['(0.001 * window_len * sfreq)'], {}), '(0.001 * window_len * sfreq)\n', (2778, 2806), True, 'import numpy as np\n'), ((1509, 1533), 'numpy.round', 'np.round', (['(sfreq / h_freq)'], {}), '(sfreq / h_freq)\n', (1517, 1533), True, 'import numpy as np\n')] |
from .dataset_tools import ExplicitPathDataset
from .definitions import resolve_path
import os
import pandas as pd
import numpy as np
import math
import glob
import json
def list_image_paths(basedir, prefixes=[""], extensions=["jpg", "jpeg", "png"]):
acc = []
for prefix in prefixes:
for ext in extensions:
pattern = f"{basedir}/{prefix}/**/*.{ext}"
imgs = glob.glob(pattern, recursive=True)
acc.extend(imgs)
print(f"found {len(imgs)} files with pattern {pattern}...")
relative_paths = [f[len(basedir) :].lstrip("./") for f in acc]
return list(set(relative_paths))
def infer_qgt_from_boxes(box_data, num_files):
qgt = box_data.groupby(["dbidx", "category"]).size().unstack(level=1).fillna(0)
qgt = qgt.reindex(np.arange(num_files)).fillna(0)
return qgt.clip(0, 1)
def prep_ground_truth(paths, box_data, qgt):
"""adds dbidx column to box data, sets dbidx in qgt and sorts qgt by dbidx"""
path2idx = dict(zip(paths, range(len(paths))))
mapfun = lambda x: path2idx.get(x, -1)
box_data = box_data.assign(dbidx=box_data.file_path.map(mapfun).astype("int"))
box_data = box_data[box_data.dbidx >= 0].reset_index(drop=True)
new_ids = qgt.index.map(mapfun)
qgt = qgt[new_ids >= 0]
qgt = qgt.set_index(new_ids[new_ids >= 0])
qgt = qgt.sort_index()
## Add entries for files with no labels...
qgt = qgt.reindex(np.arange(len(paths))) # na values will be ignored...
assert len(paths) == qgt.shape[0], "every path should be in the ground truth"
return box_data, qgt
class SeesawDatasetManager:
def __init__(self, dataset_path, cache=None):
"""Assumes layout created by create_dataset"""
dataset_path = resolve_path(dataset_path)
self.dataset_name = os.path.basename(dataset_path)
self.dataset_root = dataset_path
file_meta = pd.read_parquet(f"{self.dataset_root}/file_meta.parquet")
self.file_meta = file_meta
self.paths = file_meta["file_path"].values
self.image_root = os.path.realpath(f"{self.dataset_root}/images/")
self.cache = cache
def get_pytorch_dataset(self):
return ExplicitPathDataset(
root_dir=self.image_root, relative_path_list=self.paths
)
def __repr__(self):
return f"{self.__class__.__name__}({self.dataset_name})"
def save_ground_truth(self, box_data, qgt=None):
"""
Will add qgt and box information. or overwrite it.
"""
if qgt is None:
qgt = infer_qgt_from_boxes(box_data, num_files=self.paths.shape[0])
assert qgt.shape[0] == self.paths.shape[0]
gt_root = self.ground_truth_path()
os.makedirs(gt_root, exist_ok=True)
box_data.to_parquet(f"{gt_root}/boxes.parquet")
qgt.to_parquet(f"{gt_root}/qgt.parquet")
def ground_truth_path(self):
gt_root = f"{self.dataset_root}/ground_truth/"
return gt_root
def load_eval_categories(self):
assert os.path.exists(f"{self.dataset_root}/ground_truth")
return json.load(open(f"{self.dataset_root}/ground_truth/categories.json"))
def load_ground_truth(self):
assert os.path.exists(f"{self.dataset_root}/ground_truth")
qgt = self.cache.read_parquet(f"{self.dataset_root}/ground_truth/qgt.parquet")
box_data = self.cache.read_parquet(
f"{self.dataset_root}/ground_truth/box_data.parquet"
)
box_data, qgt = prep_ground_truth(self.paths, box_data, qgt)
return box_data, qgt
def create_dataset(image_src, output_path, paths=[]) -> SeesawDatasetManager:
"""
if not given explicit paths, it assumes every jpg, jpeg and png is wanted
"""
assert not os.path.exists(output_path), "output already exists"
dirname = os.path.dirname(output_path)
os.makedirs(dirname, exist_ok=True)
basename = os.path.basename(output_path)
final_dspath = f"{dirname}/{basename}"
dspath = f"{dirname}/.tmp.{basename}"
assert not os.path.exists(final_dspath), "name already used"
if os.path.exists(dspath):
os.rmdir(dspath)
image_src = resolve_path(image_src)
assert os.path.isdir(image_src)
os.mkdir(dspath)
image_path = f"{dspath}/images"
os.symlink(image_src, image_path)
if len(paths) == 0:
paths = list_image_paths(image_src)
df = pd.DataFrame({"file_path": paths})
df.to_parquet(f"{dspath}/file_meta.parquet")
os.rename(dspath, final_dspath)
return SeesawDatasetManager(final_dspath)
| [
"pandas.DataFrame",
"os.mkdir",
"os.makedirs",
"os.path.basename",
"os.path.isdir",
"os.path.dirname",
"os.rename",
"os.path.exists",
"os.path.realpath",
"numpy.arange",
"pandas.read_parquet",
"glob.glob",
"os.rmdir",
"os.symlink"
] | [((3836, 3864), 'os.path.dirname', 'os.path.dirname', (['output_path'], {}), '(output_path)\n', (3851, 3864), False, 'import os\n'), ((3869, 3904), 'os.makedirs', 'os.makedirs', (['dirname'], {'exist_ok': '(True)'}), '(dirname, exist_ok=True)\n', (3880, 3904), False, 'import os\n'), ((3920, 3949), 'os.path.basename', 'os.path.basename', (['output_path'], {}), '(output_path)\n', (3936, 3949), False, 'import os\n'), ((4109, 4131), 'os.path.exists', 'os.path.exists', (['dspath'], {}), '(dspath)\n', (4123, 4131), False, 'import os\n'), ((4209, 4233), 'os.path.isdir', 'os.path.isdir', (['image_src'], {}), '(image_src)\n', (4222, 4233), False, 'import os\n'), ((4239, 4255), 'os.mkdir', 'os.mkdir', (['dspath'], {}), '(dspath)\n', (4247, 4255), False, 'import os\n'), ((4296, 4329), 'os.symlink', 'os.symlink', (['image_src', 'image_path'], {}), '(image_src, image_path)\n', (4306, 4329), False, 'import os\n'), ((4408, 4442), 'pandas.DataFrame', 'pd.DataFrame', (["{'file_path': paths}"], {}), "({'file_path': paths})\n", (4420, 4442), True, 'import pandas as pd\n'), ((4497, 4528), 'os.rename', 'os.rename', (['dspath', 'final_dspath'], {}), '(dspath, final_dspath)\n', (4506, 4528), False, 'import os\n'), ((1812, 1842), 'os.path.basename', 'os.path.basename', (['dataset_path'], {}), '(dataset_path)\n', (1828, 1842), False, 'import os\n'), ((1904, 1961), 'pandas.read_parquet', 'pd.read_parquet', (['f"""{self.dataset_root}/file_meta.parquet"""'], {}), "(f'{self.dataset_root}/file_meta.parquet')\n", (1919, 1961), True, 'import pandas as pd\n'), ((2074, 2122), 'os.path.realpath', 'os.path.realpath', (['f"""{self.dataset_root}/images/"""'], {}), "(f'{self.dataset_root}/images/')\n", (2090, 2122), False, 'import os\n'), ((2734, 2769), 'os.makedirs', 'os.makedirs', (['gt_root'], {'exist_ok': '(True)'}), '(gt_root, exist_ok=True)\n', (2745, 2769), False, 'import os\n'), ((3039, 3090), 'os.path.exists', 'os.path.exists', (['f"""{self.dataset_root}/ground_truth"""'], {}), "(f'{self.dataset_root}/ground_truth')\n", (3053, 3090), False, 'import os\n'), ((3224, 3275), 'os.path.exists', 'os.path.exists', (['f"""{self.dataset_root}/ground_truth"""'], {}), "(f'{self.dataset_root}/ground_truth')\n", (3238, 3275), False, 'import os\n'), ((3769, 3796), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (3783, 3796), False, 'import os\n'), ((4052, 4080), 'os.path.exists', 'os.path.exists', (['final_dspath'], {}), '(final_dspath)\n', (4066, 4080), False, 'import os\n'), ((4141, 4157), 'os.rmdir', 'os.rmdir', (['dspath'], {}), '(dspath)\n', (4149, 4157), False, 'import os\n'), ((399, 433), 'glob.glob', 'glob.glob', (['pattern'], {'recursive': '(True)'}), '(pattern, recursive=True)\n', (408, 433), False, 'import glob\n'), ((795, 815), 'numpy.arange', 'np.arange', (['num_files'], {}), '(num_files)\n', (804, 815), True, 'import numpy as np\n')] |
import torch
from torch.utils.data import Dataset
import numpy as np
import scipy.io as sio
import utils
import copy
import os
import cv2
class RoomTypeDataset(Dataset):
def __init__(self, data_root, phase, max_room_per_type):
self.data_split_root = os.path.join(data_root, phase)
self.floorplans = os.listdir(self.data_split_root)
self.max_room_per_type = max_room_per_type
self.max_room_num = np.array(max_room_per_type).sum()
self.len = self.__len__()
def __len__(self):
return len(self.floorplans)
def __getitem__(self, index):
assert index <= self.__len__(), 'index range error'
floorplan_name = self.floorplans[index]
floorplan_path = os.path.join(self.data_split_root, floorplan_name)
floorplan = copy.deepcopy(sio.loadmat(floorplan_path, squeeze_me=True, struct_as_record=False))['data']
boundary = floorplan.Boundary
rTypes = floorplan.gt_rTypes
input_img = self.init_input_img(boundary)
input_vector = self.init_input_vector(rTypes)
input_img = torch.FloatTensor(input_img).unsqueeze(0)
input_img = self.normalize(input_img)
input_vector = torch.FloatTensor(input_vector)
return input_img, input_vector
def normalize(self, data):
data = torch.div(data, utils.total_num_category-1)
data = torch.sub(data, 0.5)
data = torch.div(data, 0.5)
return data
def init_input_vector(self, rTypes):
input_vector = []
for i in range(13): # 13 is the total room types
temp = np.zeros(self.max_room_per_type[i])
num = (rTypes == i).sum()
temp[:num] = 1
input_vector.append(temp)
input_vector = np.concatenate(input_vector, 0)
return input_vector
def init_input_img(self, boundary):
boundary = boundary.astype(np.int)
boundary = boundary[:, [1, 0, 2, 3]]
image = np.ones((128, 128)) * 13
image = cv2.polylines(image, boundary[:, :2].reshape(1, -1, 2), True, 14, 5)
for w, h in boundary[:, :2]:
image[h - 3:h + 4, w - 3:w + 4] = 14
if boundary[0, 2] == 0:
image[boundary[0, 1] - 3:boundary[1, 1], boundary[0, 0]: boundary[1, 0]] = 15
elif boundary[0, 2] == 1:
image[boundary[0, 1]:boundary[1, 1], boundary[0, 0] + 1: boundary[1, 0] + 4] = 15
elif boundary[0, 2] == 2:
image[boundary[0, 1] + 1:boundary[1, 1] + 4, boundary[1, 0]: boundary[0, 0]] = 15
elif boundary[0, 2] == 3:
image[boundary[1, 1]:boundary[0, 1], boundary[0, 0] - 3: boundary[1, 0]] = 15
image = cv2.fillPoly(image, boundary[:, :2].reshape(1, -1, 2), 16)
return image
| [
"os.listdir",
"scipy.io.loadmat",
"torch.FloatTensor",
"numpy.zeros",
"numpy.ones",
"torch.sub",
"numpy.array",
"os.path.join",
"torch.div",
"numpy.concatenate"
] | [((264, 294), 'os.path.join', 'os.path.join', (['data_root', 'phase'], {}), '(data_root, phase)\n', (276, 294), False, 'import os\n'), ((321, 353), 'os.listdir', 'os.listdir', (['self.data_split_root'], {}), '(self.data_split_root)\n', (331, 353), False, 'import os\n'), ((729, 779), 'os.path.join', 'os.path.join', (['self.data_split_root', 'floorplan_name'], {}), '(self.data_split_root, floorplan_name)\n', (741, 779), False, 'import os\n'), ((1205, 1236), 'torch.FloatTensor', 'torch.FloatTensor', (['input_vector'], {}), '(input_vector)\n', (1222, 1236), False, 'import torch\n'), ((1324, 1369), 'torch.div', 'torch.div', (['data', '(utils.total_num_category - 1)'], {}), '(data, utils.total_num_category - 1)\n', (1333, 1369), False, 'import torch\n'), ((1383, 1403), 'torch.sub', 'torch.sub', (['data', '(0.5)'], {}), '(data, 0.5)\n', (1392, 1403), False, 'import torch\n'), ((1419, 1439), 'torch.div', 'torch.div', (['data', '(0.5)'], {}), '(data, 0.5)\n', (1428, 1439), False, 'import torch\n'), ((1768, 1799), 'numpy.concatenate', 'np.concatenate', (['input_vector', '(0)'], {}), '(input_vector, 0)\n', (1782, 1799), True, 'import numpy as np\n'), ((1605, 1640), 'numpy.zeros', 'np.zeros', (['self.max_room_per_type[i]'], {}), '(self.max_room_per_type[i])\n', (1613, 1640), True, 'import numpy as np\n'), ((1974, 1993), 'numpy.ones', 'np.ones', (['(128, 128)'], {}), '((128, 128))\n', (1981, 1993), True, 'import numpy as np\n'), ((433, 460), 'numpy.array', 'np.array', (['max_room_per_type'], {}), '(max_room_per_type)\n', (441, 460), True, 'import numpy as np\n'), ((814, 882), 'scipy.io.loadmat', 'sio.loadmat', (['floorplan_path'], {'squeeze_me': '(True)', 'struct_as_record': '(False)'}), '(floorplan_path, squeeze_me=True, struct_as_record=False)\n', (825, 882), True, 'import scipy.io as sio\n'), ((1094, 1122), 'torch.FloatTensor', 'torch.FloatTensor', (['input_img'], {}), '(input_img)\n', (1111, 1122), False, 'import torch\n')] |
#!/usr/bin/env python3
# encoding: utf-8
import warnings
import click
import h5py
import matplotlib.pyplot as pl
import numpy as np
import pandas as pd
import pypllon as plon
from pypllon.parsers import load_simdata
from glob import glob
pl.style.use('ggplot')
try:
from tools.helpers import Progress
except ImportError:
Progress = lambda iterator: iterator
@click.group()
def main():
pass
###############################################################################
# Simulation Plots #
###############################################################################
@main.command()
@click.argument('h5file')
@click.argument('key')
@click.option('--outfile', help='File to save to', default='simdata.h5')
@click.option('--append/--overwrite', help='If we should try to load existing dataframe under key',
default=True)
def pandarize(h5file, key, outfile, append):
with h5py.File(h5file, 'r') as infile:
df = load_simdata(infile)
print('Number of failed reconstructions:', len(df[df.isnull().any(axis=1)]))
if append:
try:
print('Appending to {}/{}'.format(outfile, key))
df_old = pd.read_hdf(outfile, key)
df = pd.concat([df_old, df], verify_integrity=True,
ignore_index=True)
except (FileNotFoundError, KeyError):
print('Cannot append to non-existing entry')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
df.to_hdf(outfile, key=key, mode='a')
return 0
@np.vectorize
def recons_error(target, recov):
a, b = plon.fix_phases('rows_by_max', target, recov)
return np.linalg.norm(a - b)
@main.command()
@click.argument('key')
@click.option('--infile', help='File to load pandas dataframe from', default='simdata.h5')
def simplot(key, infile):
fig = pl.figure(0, figsize=(5.5, 4))
print('Loading {} from {}'.format(key, infile))
df = pd.read_hdf(infile, key)
# Preprocessing
full_len = len(df)
df = df[df['recons'].notnull()]
print('Dropping {} failed simulations'.format(full_len - len(df)))
# Creating error & success variables
df['recons_err'] = recons_error(df['target'], df['recons'])
df['recons_err'].fillna(1.0, inplace=True)
df['recons_success'] = df['recons_err'] < 0.3 * df['dim']
# create success matrix
p_success = df.groupby(['dim', 'measurements']) \
.recons_success.mean().reset_index().pivot('measurements', 'dim')
# fill in the gaps using recover probability from lower nr. of measurements
p_success.fillna(method='ffill', inplace=True)
# for too low nr. of measurements just set to 0
p_success.fillna(value=0, inplace=True)
p_success = p_success[:70]
fig = pl.figure(0, figsize=(10, 8))
ax = fig.add_subplot(1, 1, 1)
ax.set_title(r'$\mathbb{P}\left(\Vert M - M^\sharp \Vert_2 < 10^{-3} \times n\right)$')
ax.set_xlabel(r'$n$ (dimension)')
ax.set_ylabel(r'$m$ (number of measurements)')
dims = p_success.columns.levels[1].values
ax.set_xticks(range(0, max(dims)))
ax.set_xticklabels(np.sort(dims))
measurements = p_success.index.values
yticks = list(range(0, max(measurements), 10))
ax.set_yticks(yticks - min(measurements))
ax.set_yticklabels(yticks[::-1])
ax.grid(False)
img = ax.imshow(p_success.values[::-1], aspect='auto', cmap=pl.cm.gray)
fig.colorbar(img)
fig.savefig('sim_{}.pdf'.format(key))
###############################################################################
# Experimental Plots #
###############################################################################
def get_intensities(df, key, timesteps=None):
# NOT NORMALIZED!
deteff = df['RAWCOUNTS'][key].attrs['DETEFF']
intensities = {}
for key, counts in df['RAWCOUNTS'][key].items():
counts = counts.value * deteff
intensities[key] = 1.0 * np.mean(counts[:timesteps], axis=0) # take time-average
# we normalize them globally for now, otherwise the solver might have trouble
normalization = max(sum(x) for x in intensities.values())
return {key: x / normalization for key, x in intensities.items()}
def recover(df, key, optim_func=plon.lr_recover_l2, mmax=-1, timesteps=None):
pvec_keys = list(df['INVECS'][key].keys())[:mmax]
# note that intensities are not normalized!
intesity_dict = get_intensities(df, key, timesteps=timesteps)
valid_keys = set(pvec_keys).intersection(set(intesity_dict.keys()))
pvecs = np.asarray([df['INVECS'][key][pkey].value for pkey in valid_keys])
intensities = np.asarray([intesity_dict[pkey] for pkey in valid_keys])
recov, errs = plon.recover(pvecs, intensities, optim_func=optim_func, reterr=True)
# but also force our normalization on the reconstruction
recov /= np.sqrt(np.max(np.sum(np.abs(recov)**2, axis=1)))
return recov, errs
def get_tmat_singles(df):
DETECTORS = df.attrs['DETECTORS']
# Average total photon count (for normalization purposes)
tmat_single = np.array([df['SINGLE_COUNTS'][str(i)] for i in range(len(DETECTORS))], dtype=float)
deteff = df['SINGLE_COUNTS'].attrs['DETEFF']
tmat_single = tmat_single * deteff
# axis = 0 since we flip the tmat later
tmat_single /= np.max(np.sum(tmat_single, axis=0))
tmat_single = np.sqrt(tmat_single.T)
return tmat_single
def get_dip_reference(df):
try:
dip_reconstructed = df['DIP_RECONSTRUCTED'].value
normalization_sq = np.max(np.sum(np.abs(dip_reconstructed)**2, axis=1))
return dip_reconstructed / np.sqrt(normalization_sq), True
except KeyError:
return get_tmat_singles(df), False
def square_mean(x):
return np.sqrt(np.sum(x**2)) / np.size(x)
EXDESC = {'Fou': 'Fourier', 'Id': 'Identity', 'Swap': 'Swap',
'_01': 'Random', '_02': 'Random', '_03': 'Random'}
SIZEDESC = {'M2': '2x2', 'M3': '3x3', 'M5': '5x5'}
EXDATAFILES = ['M2Fou.h5', 'M2Id.h5', 'M2_01.h5', 'M2_02.h5', 'M2_03.h5',
'M3Fou.h5', 'M3Id.h5', 'M3_01.h5', 'M3_02.h5', 'M3_03.h5',
'M5Fou_rotated.h5', 'M5Id_rotated.h5', 'M5Swap_rotated.h5',
'M5_01_rotated.h5', 'M5_02_rotated.h5', 'M5_03_rotated.h5']
def id_to_label(the_id):
label = SIZEDESC[the_id[:2]] + ' '
for key, value in EXDESC.items():
if the_id[2:].startswith(key):
return label + value
raise ValueError('No label found for', the_id)
def make_explot(datadir, key, ax, dipsref, offset=0.0, color='red'):
for counter, datafile in enumerate(Progress(EXDATAFILES)):
with h5py.File(datadir + datafile) as df:
if key == 'RECR_OFFSET':
try:
recov, _ = recover(df, key)
except KeyError as e:
print(key, 'not found for', datafile, '(Using RECR instead.)')
recov, _ = recover(df, 'RECR')
else:
recov, _ = recover(df, key)
ref, with_phases = get_dip_reference(df) if dipsref \
else (df['TARGET'], True)
if with_phases:
recov, _ = plon.best_tmat_phases(ref, recov)
errors = np.abs(recov - ref)
else:
errors = np.abs(np.abs(recov) - np.abs(ref))
artist = ax.scatter([counter + offset], np.mean(errors), marker='D',
s=40, c=color)
ax.scatter([counter + offset] * errors.size, np.ravel(errors), marker='_',
s=40, c=color, lw=1.5)
return artist, counter
@main.command()
@click.argument('key')
@click.option('--datadir', help='Directory containing datafiles',
default='/Users/dsuess/Code/pypllon/data/')
@click.option('--dipsref/--targetref', help='Use dips as reference, otherwise use target',
default=True)
def explot(key, datadir, dipsref):
fig = pl.figure(0, figsize=(9, 5))
ax = fig.add_subplot(1, 1, 1)
if key != 'GAUSS':
artist_gauss, counter = make_explot(datadir, 'GAUSS', ax, dipsref,
offset=-.15, color='red')
artist_recr, counter = make_explot(datadir, key, ax, dipsref,
offset=.15, color='blue')
fig.legend([artist_gauss, artist_recr], ['Gaussian', 'RECR'])
else:
_, counter = make_explot(datadir, key, ax, dipsref)
if dipsref:
ax.set_ylabel(r'$\vert M_\mathrm{dips} - M^\sharp \vert$')
else:
ax.set_ylabel(r'$\vert M_\mathrm{target} - M^\sharp \vert$')
ax.set_xlim(-.5, counter + .5)
ax.set_xticks(range(counter + 1))
ax.set_xticklabels([id_to_label(the_id) for the_id in EXDATAFILES],
rotation=80)
ax.grid(True)
fig.subplots_adjust(bottom=0.2)
pl.savefig('ex_{}.pdf'.format(key))
if __name__ == '__main__':
main()
| [
"numpy.sum",
"tools.helpers.Progress",
"numpy.abs",
"numpy.ravel",
"click.option",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"numpy.linalg.norm",
"numpy.mean",
"pandas.read_hdf",
"pypllon.recover",
"warnings.simplefilter",
"pypllon.parsers.load_simdata",
"warnings.catch_wa... | [((242, 264), 'matplotlib.pyplot.style.use', 'pl.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (254, 264), True, 'import matplotlib.pyplot as pl\n'), ((374, 387), 'click.group', 'click.group', ([], {}), '()\n', (385, 387), False, 'import click\n'), ((668, 692), 'click.argument', 'click.argument', (['"""h5file"""'], {}), "('h5file')\n", (682, 692), False, 'import click\n'), ((694, 715), 'click.argument', 'click.argument', (['"""key"""'], {}), "('key')\n", (708, 715), False, 'import click\n'), ((717, 788), 'click.option', 'click.option', (['"""--outfile"""'], {'help': '"""File to save to"""', 'default': '"""simdata.h5"""'}), "('--outfile', help='File to save to', default='simdata.h5')\n", (729, 788), False, 'import click\n'), ((790, 907), 'click.option', 'click.option', (['"""--append/--overwrite"""'], {'help': '"""If we should try to load existing dataframe under key"""', 'default': '(True)'}), "('--append/--overwrite', help=\n 'If we should try to load existing dataframe under key', default=True)\n", (802, 907), False, 'import click\n'), ((1765, 1786), 'click.argument', 'click.argument', (['"""key"""'], {}), "('key')\n", (1779, 1786), False, 'import click\n'), ((1788, 1882), 'click.option', 'click.option', (['"""--infile"""'], {'help': '"""File to load pandas dataframe from"""', 'default': '"""simdata.h5"""'}), "('--infile', help='File to load pandas dataframe from', default\n ='simdata.h5')\n", (1800, 1882), False, 'import click\n'), ((7730, 7751), 'click.argument', 'click.argument', (['"""key"""'], {}), "('key')\n", (7744, 7751), False, 'import click\n'), ((7753, 7866), 'click.option', 'click.option', (['"""--datadir"""'], {'help': '"""Directory containing datafiles"""', 'default': '"""/Users/dsuess/Code/pypllon/data/"""'}), "('--datadir', help='Directory containing datafiles', default=\n '/Users/dsuess/Code/pypllon/data/')\n", (7765, 7866), False, 'import click\n'), ((7877, 7985), 'click.option', 'click.option', (['"""--dipsref/--targetref"""'], {'help': '"""Use dips as reference, otherwise use target"""', 'default': '(True)'}), "('--dipsref/--targetref', help=\n 'Use dips as reference, otherwise use target', default=True)\n", (7889, 7985), False, 'import click\n'), ((1667, 1712), 'pypllon.fix_phases', 'plon.fix_phases', (['"""rows_by_max"""', 'target', 'recov'], {}), "('rows_by_max', target, recov)\n", (1682, 1712), True, 'import pypllon as plon\n'), ((1724, 1745), 'numpy.linalg.norm', 'np.linalg.norm', (['(a - b)'], {}), '(a - b)\n', (1738, 1745), True, 'import numpy as np\n'), ((1914, 1944), 'matplotlib.pyplot.figure', 'pl.figure', (['(0)'], {'figsize': '(5.5, 4)'}), '(0, figsize=(5.5, 4))\n', (1923, 1944), True, 'import matplotlib.pyplot as pl\n'), ((2006, 2030), 'pandas.read_hdf', 'pd.read_hdf', (['infile', 'key'], {}), '(infile, key)\n', (2017, 2030), True, 'import pandas as pd\n'), ((2823, 2852), 'matplotlib.pyplot.figure', 'pl.figure', (['(0)'], {'figsize': '(10, 8)'}), '(0, figsize=(10, 8))\n', (2832, 2852), True, 'import matplotlib.pyplot as pl\n'), ((4641, 4707), 'numpy.asarray', 'np.asarray', (["[df['INVECS'][key][pkey].value for pkey in valid_keys]"], {}), "([df['INVECS'][key][pkey].value for pkey in valid_keys])\n", (4651, 4707), True, 'import numpy as np\n'), ((4726, 4782), 'numpy.asarray', 'np.asarray', (['[intesity_dict[pkey] for pkey in valid_keys]'], {}), '([intesity_dict[pkey] for pkey in valid_keys])\n', (4736, 4782), True, 'import numpy as np\n'), ((4801, 4869), 'pypllon.recover', 'plon.recover', (['pvecs', 'intensities'], {'optim_func': 'optim_func', 'reterr': '(True)'}), '(pvecs, intensities, optim_func=optim_func, reterr=True)\n', (4813, 4869), True, 'import pypllon as plon\n'), ((5452, 5474), 'numpy.sqrt', 'np.sqrt', (['tmat_single.T'], {}), '(tmat_single.T)\n', (5459, 5474), True, 'import numpy as np\n'), ((8040, 8068), 'matplotlib.pyplot.figure', 'pl.figure', (['(0)'], {'figsize': '(9, 5)'}), '(0, figsize=(9, 5))\n', (8049, 8068), True, 'import matplotlib.pyplot as pl\n'), ((971, 993), 'h5py.File', 'h5py.File', (['h5file', '"""r"""'], {}), "(h5file, 'r')\n", (980, 993), False, 'import h5py\n'), ((1018, 1038), 'pypllon.parsers.load_simdata', 'load_simdata', (['infile'], {}), '(infile)\n', (1030, 1038), False, 'from pypllon.parsers import load_simdata\n'), ((1481, 1506), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (1504, 1506), False, 'import warnings\n'), ((1516, 1547), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1537, 1547), False, 'import warnings\n'), ((3178, 3191), 'numpy.sort', 'np.sort', (['dims'], {}), '(dims)\n', (3185, 3191), True, 'import numpy as np\n'), ((5405, 5432), 'numpy.sum', 'np.sum', (['tmat_single'], {'axis': '(0)'}), '(tmat_single, axis=0)\n', (5411, 5432), True, 'import numpy as np\n'), ((5862, 5872), 'numpy.size', 'np.size', (['x'], {}), '(x)\n', (5869, 5872), True, 'import numpy as np\n'), ((6684, 6705), 'tools.helpers.Progress', 'Progress', (['EXDATAFILES'], {}), '(EXDATAFILES)\n', (6692, 6705), False, 'from tools.helpers import Progress\n'), ((1232, 1257), 'pandas.read_hdf', 'pd.read_hdf', (['outfile', 'key'], {}), '(outfile, key)\n', (1243, 1257), True, 'import pandas as pd\n'), ((1275, 1340), 'pandas.concat', 'pd.concat', (['[df_old, df]'], {'verify_integrity': '(True)', 'ignore_index': '(True)'}), '([df_old, df], verify_integrity=True, ignore_index=True)\n', (1284, 1340), True, 'import pandas as pd\n'), ((4037, 4072), 'numpy.mean', 'np.mean', (['counts[:timesteps]'], {'axis': '(0)'}), '(counts[:timesteps], axis=0)\n', (4044, 4072), True, 'import numpy as np\n'), ((5846, 5860), 'numpy.sum', 'np.sum', (['(x ** 2)'], {}), '(x ** 2)\n', (5852, 5860), True, 'import numpy as np\n'), ((6721, 6750), 'h5py.File', 'h5py.File', (['(datadir + datafile)'], {}), '(datadir + datafile)\n', (6730, 6750), False, 'import h5py\n'), ((5709, 5734), 'numpy.sqrt', 'np.sqrt', (['normalization_sq'], {}), '(normalization_sq)\n', (5716, 5734), True, 'import numpy as np\n'), ((7263, 7296), 'pypllon.best_tmat_phases', 'plon.best_tmat_phases', (['ref', 'recov'], {}), '(ref, recov)\n', (7284, 7296), True, 'import pypllon as plon\n'), ((7322, 7341), 'numpy.abs', 'np.abs', (['(recov - ref)'], {}), '(recov - ref)\n', (7328, 7341), True, 'import numpy as np\n'), ((7474, 7489), 'numpy.mean', 'np.mean', (['errors'], {}), '(errors)\n', (7481, 7489), True, 'import numpy as np\n'), ((7607, 7623), 'numpy.ravel', 'np.ravel', (['errors'], {}), '(errors)\n', (7615, 7623), True, 'import numpy as np\n'), ((4966, 4979), 'numpy.abs', 'np.abs', (['recov'], {}), '(recov)\n', (4972, 4979), True, 'import numpy as np\n'), ((5635, 5660), 'numpy.abs', 'np.abs', (['dip_reconstructed'], {}), '(dip_reconstructed)\n', (5641, 5660), True, 'import numpy as np\n'), ((7392, 7405), 'numpy.abs', 'np.abs', (['recov'], {}), '(recov)\n', (7398, 7405), True, 'import numpy as np\n'), ((7408, 7419), 'numpy.abs', 'np.abs', (['ref'], {}), '(ref)\n', (7414, 7419), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 9 08:05:52 2020
@author: BK
"""
import numpy as np
import matplotlib.pyplot as plt
import math
import cmath
def par(alpha,bata):
"""
並聯運算
"""
par_result = (alpha*bata)/(alpha+bata)
return par_result
def circuit_list2mat_diagonal(circuit_list_main,anti_diagonal=0):
"""
建立矩陣,輸入一list例如[A,B,C,D]
list內有4個element,為了建立符合這個list的矩陣
因此建立一常數J獲得list內的element的數量,python的計數從0開始,因此數量要減1
才能建立符合list的矩陣。
anti_digonal:如矩陣建立方向由z(4,4)建立到z(1,1),正常不會用到,我寫爽的
"""
j=np.int(len(circuit_list_main))-1
circuit_mat=np.zeros([len(circuit_list_main),len(circuit_list_main)],dtype=complex)
if anti_diagonal == 0:
for i in range(len(circuit_list_main)):
circuit_mat[i,i]=circuit_list_main[i]
elif anti_diagonal == 1:
for i in range(len(circuit_list_main)):
circuit_mat[j,i]=circuit_list_main[i]
j=j-1
else:
print("input index/diagonal error,please check!")
return circuit_mat
def circuit_mat_subelement(circuit_mat,circuit_list_sub):
"""
將circuit_list2mat_diagonal建立起的對角線矩陣帶入,並加入非對角線的矩陣element
同時建立矩陣對稱性關聯z(2,1)=z(1,2)
"""
circuit_mat_whole=np.zeros([circuit_mat.shape[0],circuit_mat.shape[0]],dtype=complex)
for i in range(len(circuit_mat)-1):
for j in range(len(circuit_mat)-1):
circuit_mat_whole[i,j+1]=circuit_list_sub[i,j]
for i in range(len(circuit_mat)):
for j in range(len(circuit_mat)):
circuit_mat_whole[j,i]=circuit_mat_whole[i,j]
circuit_mat_whole=circuit_mat_whole+circuit_mat
return circuit_mat_whole
def circuit_impedence_cal (imp_matrix,vol_mat_number,force_mat_number,curve_type):
"""
計算速度/體積速度:
ex:[1 0 0 0]*inv(Z)*[1;0;0;0]
判讀等效電路的長寬後,建立同等元素數的列、行矩陣
依據目標的迴路,給與行、列矩陣需要的目標元素位置(vol_mat_number,force_mat_number)
再以上方舉例的運算式算出目標的速度/體積速度(依使用者轉換得domain決定)
下方暫定註解(有誤)
impedance_type:
0 for frequencr response
1 for impedance curve
"""
impedance=np.zeros(1,dtype=complex)
vol_mat=np.zeros([1,len(imp_matrix)])
vol_mat[0,vol_mat_number-1]=1
force_mat=np.zeros([len(imp_matrix),1])
force_mat[force_mat_number-1,0]=1
if curve_type == 0:
imp_matrix_inv=np.linalg.inv(imp_matrix)
impedance=(np.dot(np.dot(vol_mat, imp_matrix_inv),force_mat))
elif curve_type == 1:
imp_matrix_inv=np.linalg.inv(imp_matrix)
impedance=(np.dot(np.dot(vol_mat, imp_matrix_inv),force_mat))
return impedance, imp_matrix_inv
def complex_value_calculate(complex_number):
return cmath.sqrt(np.real(complex_number)**2+np.imag(complex_number)**2)
| [
"numpy.zeros",
"numpy.imag",
"numpy.linalg.inv",
"numpy.real",
"numpy.dot"
] | [((1223, 1292), 'numpy.zeros', 'np.zeros', (['[circuit_mat.shape[0], circuit_mat.shape[0]]'], {'dtype': 'complex'}), '([circuit_mat.shape[0], circuit_mat.shape[0]], dtype=complex)\n', (1231, 1292), True, 'import numpy as np\n'), ((2050, 2076), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'complex'}), '(1, dtype=complex)\n', (2058, 2076), True, 'import numpy as np\n'), ((2281, 2306), 'numpy.linalg.inv', 'np.linalg.inv', (['imp_matrix'], {}), '(imp_matrix)\n', (2294, 2306), True, 'import numpy as np\n'), ((2333, 2364), 'numpy.dot', 'np.dot', (['vol_mat', 'imp_matrix_inv'], {}), '(vol_mat, imp_matrix_inv)\n', (2339, 2364), True, 'import numpy as np\n'), ((2426, 2451), 'numpy.linalg.inv', 'np.linalg.inv', (['imp_matrix'], {}), '(imp_matrix)\n', (2439, 2451), True, 'import numpy as np\n'), ((2478, 2509), 'numpy.dot', 'np.dot', (['vol_mat', 'imp_matrix_inv'], {}), '(vol_mat, imp_matrix_inv)\n', (2484, 2509), True, 'import numpy as np\n'), ((2627, 2650), 'numpy.real', 'np.real', (['complex_number'], {}), '(complex_number)\n', (2634, 2650), True, 'import numpy as np\n'), ((2654, 2677), 'numpy.imag', 'np.imag', (['complex_number'], {}), '(complex_number)\n', (2661, 2677), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.