code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# Importing the Required libraries
import cv2
import numpy as np
# Private Required functions
def defineTheCoOrdinates(image, lane_paramters):
print(lane_paramters)
slope, intercept = lane_paramters
# Now Defining the co-ordinates
Y1 = image.shape[0] # We'll get image height, it means our line should start from the bottom line of the image
Y2 = int(Y1*(3/5)) #We'll get the 402, it means our line will extend till the 3/5th of the total image height
# From the basic concept(y=m*x+c), we can get the X's value as x=(y-c)/m which means x=(Y-Y_intercept)/slope
X1 = int((Y1-intercept)/slope)
X2 = int((Y2-intercept)/slope)
return np.array([X1, Y1, X2, Y2])
def drawOptimizedLanes(passedImage, lanes):
finalImage = passedImage.copy()
if lanes is not None: # Check to avoid errors in future for safety
for lane in lanes:
X1, Y1, X2, Y2 = lane
cv2.line(finalImage, (X1, Y1), (X2, Y2), (255, 255, 255), 5) # Drawing the final lane with white color so as to appear unique in blue-lanes
return finalImage
def findCannyEdges(image):
# Applying the gaussian blur
blur_image = cv2.GaussianBlur(image, (5, 5), 10000) # Vary the sigmaX parameter based on the output
# Find the edges through a edged detector
cannyImage = cv2.Canny(blur_image, 50, 150)
return cannyImage
def findRegionOfInterest(cannyImage):
height = cannyImage.shape[0]
width = cannyImage.shape[1]
print("Height ",height)
roadAsPolygon = np.array([[(200, height), (1100, height), (550, 250)]], np.int32) # Coded as 2D array as cv2.fillPoly() needed multiple-Dimensional arrays..
roadAsPolygon = np.array([[(0, height), (width, height), (int(0.55*width), int(0.6*height)), (int(0.45*width), int(0.6*height))]], dtype=np.int32)
# Creating the mask image
maskImage = np.zeros_like(cannyImage)
cv2.fillPoly(maskImage, roadAsPolygon, 255) # Filling the required area with the white color(??? Why only white color? :: Because the white color can accept any color, later in this function defined more clearly)
# Merging the image
maskedImage = cv2.bitwise_and(cannyImage, maskImage)# The mask contains the required area if road with the white color(means 255, which means all 1's in binary, so when we perform Bitwise& operation or image area will be retained as same and rest other made black....if confusing figure it on a paper)
return maskedImage
def drawTheLinesOnTheImage(orgImage, lines):
# Creating a copy for safety
lane_image = np.zeros_like(orgImage) # Creating the black_image with the orgImage dimensions
#Checking whether lines is empty or not, sometimes they may be empty for some worst cases, to avoid error at that time..
if lines is not None:
for line in lines:
X1, Y1, X2, Y2 = line.reshape(4)# The parameter 4 defines that reshape the 2D array as 4 columns and unpack and assign respective values to X1, Y1, X2, Y2
# Now drawing the line
cv2.line(lane_image, (X1, Y1), (X2, Y2), (255, 0, 0), 3)
return lane_image # Finally returning the work done, NOTE: This is a black colored Image
def blendTheImages(lanedImage, colorImage):
blendedImg = cv2.addWeighted(src1=lanedImage, alpha=0.8, src2=colorImage, beta=1, gamma=1)
# What we doing here is blending the original image and the image in which the lines are detected are merged together
# Here the "lanedImage" pixels are multiplied by 0.8{alpha} so that to make it darker(How darker..?? If the pixel value is less it seems darker, if higher it seems brighter right..??)
# and the "colorImage" pixels are multiplied by 1{theta}, so to keep it brighter and to visible it more brighter in the background image
# finally 1{gamma}, just to have a round off value
return blendedImg
def optimizeTheLanesOnImage(alreadyLanedImage, multipleLines):
# for this we need to optimize the left and right road lane individually.
"""
but how we achieve this...??
There is one thing difference between the left-road_lane and the right-road_lane.
i.e., The slope of right-road_lane is +ve and the slope of left-road_lane is -ve.
But how the slope of right-road_lane is +ve..??
:: NOTE: In our image the Yaxis(i.e., rows) increases while coming down and the Xaxis(i.e., columns) increases while moving from left-to right(to have even more clarity, display the image through matplotlib, so we'll get a crystal-clear clarity).
* for the right-road_lane which is slant a bit (towards the left), will have a +ve slope as X-axis increased, while the Y-axis increasing..
* for the left-road_lane which slant a bit (towards right), will have a -ve slope as Y-axis decrease, while the Y-axis incresed.
For more clarity refer down the example...
The (X1, Y1) values denote the co-ordinates at the top portion of the image as we come on parsing the image from top-bottom...?? clear
and (X2, Y2) values denote the co-ordinates at the bottom portion of the image..
0
1 / \
2 / \
3 / \
4 / \
5 / \
6 / \
7 / \
8 / \
9 / \
10 / \
0 1 2 3 4 5 6 7 8 9 10 11 12 13
{Basic concept used here is : a general form of a line is y=m*x+c, where m=slope, c=intercept. m=(Y2-Y1)(x2-X1) and c = -m*x+y}
So now for instance assume the (X1, Y1) and (X2,Y2) denotes the right-road_lane and their values respectively are (9, 1) and (10, 12)
and to find the slope we use the formula m=(Y2-Y1)/(X2-X1) (theme is rise_over_run, rise means increasing the angle, and run means increse the distance)
so finally m = (12-1)/(10-9) == 11/1 == 11 (which means +ve).................is it clear..??
Now for the instance teh (X1, Y1) and (X2, Y2) denote the left-road_lane as (5, 1) and (2, 10)
finally m = (10-1)/(2-5) = 9/-3 == -3 (which means -ve).......................is it clear..?
"""
left_fit = [] # Defining the empty list to have the final averaged left-lane values
right_fit = [] # Defining the empty list to have the final averaged right-lane values
if multipleLines is not None: # If empty, to avoid error..
for line in multipleLines:
X1, Y1, X2, Y2 = line.reshape(4) # Reshaping 2D array into 4 columns and unpacking into 4 different variables
parameters = np.polyfit(x=(X1, X2), y=(Y1, Y2), deg=1)
# What np.polyfit() does is it will fit a first degree polynomial which will be a simple function like a line equation(i.e., basically like y=mx+c), it gonna fit the polynomial for our co-ordinates(i.e.,. X1, Y1, X2, and Y2) and return a vector of co-efficients which describes the slope and the y-intercept
# The parameters are the tuple of X co-ordinates and the tuple of Y-co-ordinates and atlast defines the degree of the equation framed(Here for our case we are dealing with a linear equation(line equation), we've passed '1')
# The output of polyfit() contains the slope at index 0 and intercept at 1 at each row
slope = parameters[0]
intercept = parameters[1]
# Now we took the values individually, its time to separate the left-road_lane and the right-road_lane
if slope <0: # Then it will be of left-road_lane which we discussed above
left_fit.append((slope, intercept))
else: # Then it will be of right-road_lane
right_fit.append((slope, intercept))
# Making average of all the lines to have a single precise line
left_fit_average = np.average(left_fit, axis=0) # The axis denotes that average from top-bottom two fields(slope and intercept)
right_fit_average = np.average(right_fit, axis=0)
# Till now we've separated the left-road_lanes and the right-road_lanes, but we need the 2 co-ordinates to draw a line right...?? which we still didn't have
# print(left_fit_average, "Left ones")
# print(right_fit_average, "right ones")
# So getting the co-ordinates
left_lane = defineTheCoOrdinates(image, left_fit_average)
right_lane = defineTheCoOrdinates(image, right_fit_average)
#print(left_lane, "left lane")
#print(right_lane, "right lane")
return drawOptimizedLanes(alreadyLanedImage, np.array([left_lane, right_lane]))
# Actual Code begins...
# Loading the image
original_image = cv2.imread("road_image.jpg")
image = original_image.copy() # Copying the image to have one more instance
# Get the edges on the image
cannyEdgedImage = findCannyEdges(image)
# Define the Region of Interest
croppedImage = findRegionOfInterest(cannyEdgedImage)
# Drawing the lines on the image
lines = cv2.HoughLinesP(croppedImage,rho=1,theta=np.pi/180,threshold= 100, minLineLength=40, maxLineGap=50)# maxLineGap defines that the lines which differ with 40pixels can be merged together to become a single line, minLineGap defines that the lines shorter than the specified length can be ignored, rho defines the accumulator bin's distance resolution and the theta defines the angular resolution in the hough space...
lanedImage = drawTheLinesOnTheImage(image, lines) # NOTE: we get a black-colored image..
# P2N: Till now we've got the lines on the road_lanes, but there are multiple lines, we can't decide our movement based on multiple lines, so we need to optimize them as a single line to take the decision clearly and precisely+accurately right..??
optimizedLanesImage = optimizeTheLanesOnImage(lanedImage, lines)
# Blending the original image and the lines-detected image
blendedImage = blendTheImages(image, optimizedLanesImage)
# Displaying the image
cv2.imshow("Final Road", blendedImage)
cv2.waitKey()
cv2.destroyAllWindows() | [
"cv2.line",
"cv2.GaussianBlur",
"cv2.Canny",
"numpy.zeros_like",
"numpy.average",
"cv2.bitwise_and",
"numpy.polyfit",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.fillPoly",
"cv2.addWeighted",
"cv2.imread",
"numpy.array",
"cv2.HoughLinesP",
"cv2.imshow"
] | [((8540, 8568), 'cv2.imread', 'cv2.imread', (['"""road_image.jpg"""'], {}), "('road_image.jpg')\n", (8550, 8568), False, 'import cv2\n'), ((8854, 8961), 'cv2.HoughLinesP', 'cv2.HoughLinesP', (['croppedImage'], {'rho': '(1)', 'theta': '(np.pi / 180)', 'threshold': '(100)', 'minLineLength': '(40)', 'maxLineGap': '(50)'}), '(croppedImage, rho=1, theta=np.pi / 180, threshold=100,\n minLineLength=40, maxLineGap=50)\n', (8869, 8961), False, 'import cv2\n'), ((9818, 9856), 'cv2.imshow', 'cv2.imshow', (['"""Final Road"""', 'blendedImage'], {}), "('Final Road', blendedImage)\n", (9828, 9856), False, 'import cv2\n'), ((9858, 9871), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (9869, 9871), False, 'import cv2\n'), ((9873, 9896), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (9894, 9896), False, 'import cv2\n'), ((661, 687), 'numpy.array', 'np.array', (['[X1, Y1, X2, Y2]'], {}), '([X1, Y1, X2, Y2])\n', (669, 687), True, 'import numpy as np\n'), ((1133, 1171), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image', '(5, 5)', '(10000)'], {}), '(image, (5, 5), 10000)\n', (1149, 1171), False, 'import cv2\n'), ((1280, 1310), 'cv2.Canny', 'cv2.Canny', (['blur_image', '(50)', '(150)'], {}), '(blur_image, 50, 150)\n', (1289, 1310), False, 'import cv2\n'), ((1477, 1542), 'numpy.array', 'np.array', (['[[(200, height), (1100, height), (550, 250)]]', 'np.int32'], {}), '([[(200, height), (1100, height), (550, 250)]], np.int32)\n', (1485, 1542), True, 'import numpy as np\n'), ((1811, 1836), 'numpy.zeros_like', 'np.zeros_like', (['cannyImage'], {}), '(cannyImage)\n', (1824, 1836), True, 'import numpy as np\n'), ((1839, 1882), 'cv2.fillPoly', 'cv2.fillPoly', (['maskImage', 'roadAsPolygon', '(255)'], {}), '(maskImage, roadAsPolygon, 255)\n', (1851, 1882), False, 'import cv2\n'), ((2090, 2128), 'cv2.bitwise_and', 'cv2.bitwise_and', (['cannyImage', 'maskImage'], {}), '(cannyImage, maskImage)\n', (2105, 2128), False, 'import cv2\n'), ((2495, 2518), 'numpy.zeros_like', 'np.zeros_like', (['orgImage'], {}), '(orgImage)\n', (2508, 2518), True, 'import numpy as np\n'), ((3151, 3228), 'cv2.addWeighted', 'cv2.addWeighted', ([], {'src1': 'lanedImage', 'alpha': '(0.8)', 'src2': 'colorImage', 'beta': '(1)', 'gamma': '(1)'}), '(src1=lanedImage, alpha=0.8, src2=colorImage, beta=1, gamma=1)\n', (3166, 3228), False, 'import cv2\n'), ((7752, 7780), 'numpy.average', 'np.average', (['left_fit'], {'axis': '(0)'}), '(left_fit, axis=0)\n', (7762, 7780), True, 'import numpy as np\n'), ((7885, 7914), 'numpy.average', 'np.average', (['right_fit'], {'axis': '(0)'}), '(right_fit, axis=0)\n', (7895, 7914), True, 'import numpy as np\n'), ((891, 951), 'cv2.line', 'cv2.line', (['finalImage', '(X1, Y1)', '(X2, Y2)', '(255, 255, 255)', '(5)'], {}), '(finalImage, (X1, Y1), (X2, Y2), (255, 255, 255), 5)\n', (899, 951), False, 'import cv2\n'), ((2938, 2994), 'cv2.line', 'cv2.line', (['lane_image', '(X1, Y1)', '(X2, Y2)', '(255, 0, 0)', '(3)'], {}), '(lane_image, (X1, Y1), (X2, Y2), (255, 0, 0), 3)\n', (2946, 2994), False, 'import cv2\n'), ((6613, 6654), 'numpy.polyfit', 'np.polyfit', ([], {'x': '(X1, X2)', 'y': '(Y1, Y2)', 'deg': '(1)'}), '(x=(X1, X2), y=(Y1, Y2), deg=1)\n', (6623, 6654), True, 'import numpy as np\n'), ((8436, 8469), 'numpy.array', 'np.array', (['[left_lane, right_lane]'], {}), '([left_lane, right_lane])\n', (8444, 8469), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
from matplotlib.pyplot import rc
from jamo import h2j, j2hcj
from text import PAD, EOS
from text.korean import normalize
FONT_NAME = "NanumBarunGothic"
def check_font():
flist = font_manager.findSystemFonts()
names = [font_manager.FontProperties(fname=fname).get_name() for fname in flist]
if not (FONT_NAME in names):
font_manager._rebuild()
check_font()
rc('font', family=FONT_NAME)
def plot(alignment, info, text, isKorean=True):
char_len, audio_len = alignment.shape # 145, 200
fig, ax = plt.subplots(figsize=(char_len / 5, 5))
im = ax.imshow(
alignment.T,
aspect='auto',
origin='lower',
interpolation='none')
# fig.colorbar(im, ax=ax)
xlabel = 'Encoder timestep'
ylabel = 'Decoder timestep'
if info is not None:
xlabel += '\n{}'.format(info)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if text:
if isKorean:
jamo_text = j2hcj(h2j(normalize(text)))
else:
jamo_text = text
pad = [PAD] * (char_len - len(jamo_text) - 1)
plt.xticks(range(char_len),
[tok for tok in jamo_text] + [EOS] + pad)
if text is not None:
while True:
if text[-1] in [EOS, PAD]:
text = text[:-1]
else:
break
plt.title(text)
plt.tight_layout()
def plot_alignment(
alignment, path, info=None, text=None, isKorean=True):
if text:
tmp_alignment = alignment[:len(h2j(text)) + 2]
plot(tmp_alignment, info, text, isKorean)
plt.savefig(path, format='png')
else:
plot(alignment, info, text, isKorean)
plt.savefig(path, format='png')
print(" [*] Plot saved: {}".format(path))
if __name__ == '__main__':
# guided alignment test
import numpy as np
max_N = 90
max_T = 200
g = 0.2
W = np.zeros((max_N, max_T), dtype=np.float32)
for n_pos in range(W.shape[0]):
for t_pos in range(W.shape[1]):
W[n_pos, t_pos] = 1 - np.exp(-(t_pos / float(max_T) - n_pos / float(max_N)) ** 2 / (2 * g * g))
# plot(W, None, None, False)
alignment = np.zeros((max_N, max_T), dtype=np.float32)
for n_pos in range(alignment.shape[0]):
for t_pos in range(alignment.shape[1]):
alignment[n_pos, t_pos] = 1 / (1 + abs(n_pos * (max_T / max_N) - t_pos))
# plot(alignment, None, None, False)
attention = alignment * W
# plot(attention, None, None, False)
plt.subplot(1, 3, 1), plt.imshow(W, origin='lower'), plt.title('weight'), plt.xticks([]), plt.yticks([])
plt.subplot(1, 3, 2), plt.imshow(alignment, origin='lower'), plt.title('alignment'), plt.xticks([]), plt.yticks([])
plt.subplot(1, 3, 3), plt.imshow(attention, origin='lower'), plt.title('attention'), plt.xticks([]), plt.yticks([])
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplot",
"jamo.h2j",
"matplotlib.font_manager.findSystemFonts",
"matplotlib.pyplot.show",
"matplotlib.font_manager.FontProperties",
"text.korean.normalize",
"matplotlib.font_manager._rebuild",
"matplotlib.pyplot.imsho... | [((460, 488), 'matplotlib.pyplot.rc', 'rc', (['"""font"""'], {'family': 'FONT_NAME'}), "('font', family=FONT_NAME)\n", (462, 488), False, 'from matplotlib.pyplot import rc\n'), ((264, 294), 'matplotlib.font_manager.findSystemFonts', 'font_manager.findSystemFonts', ([], {}), '()\n', (292, 294), True, 'import matplotlib.font_manager as font_manager\n'), ((608, 647), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(char_len / 5, 5)'}), '(figsize=(char_len / 5, 5))\n', (620, 647), True, 'import matplotlib.pyplot as plt\n'), ((930, 948), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (940, 948), True, 'import matplotlib.pyplot as plt\n'), ((953, 971), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (963, 971), True, 'import matplotlib.pyplot as plt\n'), ((1441, 1459), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1457, 1459), True, 'import matplotlib.pyplot as plt\n'), ((1978, 2020), 'numpy.zeros', 'np.zeros', (['(max_N, max_T)'], {'dtype': 'np.float32'}), '((max_N, max_T), dtype=np.float32)\n', (1986, 2020), True, 'import numpy as np\n'), ((2255, 2297), 'numpy.zeros', 'np.zeros', (['(max_N, max_T)'], {'dtype': 'np.float32'}), '((max_N, max_T), dtype=np.float32)\n', (2263, 2297), True, 'import numpy as np\n'), ((2942, 2952), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2950, 2952), True, 'import matplotlib.pyplot as plt\n'), ((421, 444), 'matplotlib.font_manager._rebuild', 'font_manager._rebuild', ([], {}), '()\n', (442, 444), True, 'import matplotlib.font_manager as font_manager\n'), ((1420, 1435), 'matplotlib.pyplot.title', 'plt.title', (['text'], {}), '(text)\n', (1429, 1435), True, 'import matplotlib.pyplot as plt\n'), ((1672, 1703), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'format': '"""png"""'}), "(path, format='png')\n", (1683, 1703), True, 'import matplotlib.pyplot as plt\n'), ((1768, 1799), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'format': '"""png"""'}), "(path, format='png')\n", (1779, 1799), True, 'import matplotlib.pyplot as plt\n'), ((2593, 2613), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (2604, 2613), True, 'import matplotlib.pyplot as plt\n'), ((2615, 2644), 'matplotlib.pyplot.imshow', 'plt.imshow', (['W'], {'origin': '"""lower"""'}), "(W, origin='lower')\n", (2625, 2644), True, 'import matplotlib.pyplot as plt\n'), ((2646, 2665), 'matplotlib.pyplot.title', 'plt.title', (['"""weight"""'], {}), "('weight')\n", (2655, 2665), True, 'import matplotlib.pyplot as plt\n'), ((2667, 2681), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (2677, 2681), True, 'import matplotlib.pyplot as plt\n'), ((2683, 2697), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2693, 2697), True, 'import matplotlib.pyplot as plt\n'), ((2702, 2722), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (2713, 2722), True, 'import matplotlib.pyplot as plt\n'), ((2724, 2761), 'matplotlib.pyplot.imshow', 'plt.imshow', (['alignment'], {'origin': '"""lower"""'}), "(alignment, origin='lower')\n", (2734, 2761), True, 'import matplotlib.pyplot as plt\n'), ((2763, 2785), 'matplotlib.pyplot.title', 'plt.title', (['"""alignment"""'], {}), "('alignment')\n", (2772, 2785), True, 'import matplotlib.pyplot as plt\n'), ((2787, 2801), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (2797, 2801), True, 'import matplotlib.pyplot as plt\n'), ((2803, 2817), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2813, 2817), True, 'import matplotlib.pyplot as plt\n'), ((2822, 2842), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (2833, 2842), True, 'import matplotlib.pyplot as plt\n'), ((2844, 2881), 'matplotlib.pyplot.imshow', 'plt.imshow', (['attention'], {'origin': '"""lower"""'}), "(attention, origin='lower')\n", (2854, 2881), True, 'import matplotlib.pyplot as plt\n'), ((2883, 2905), 'matplotlib.pyplot.title', 'plt.title', (['"""attention"""'], {}), "('attention')\n", (2892, 2905), True, 'import matplotlib.pyplot as plt\n'), ((2907, 2921), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (2917, 2921), True, 'import matplotlib.pyplot as plt\n'), ((2923, 2937), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2933, 2937), True, 'import matplotlib.pyplot as plt\n'), ((308, 348), 'matplotlib.font_manager.FontProperties', 'font_manager.FontProperties', ([], {'fname': 'fname'}), '(fname=fname)\n', (335, 348), True, 'import matplotlib.font_manager as font_manager\n'), ((1041, 1056), 'text.korean.normalize', 'normalize', (['text'], {}), '(text)\n', (1050, 1056), False, 'from text.korean import normalize\n'), ((1597, 1606), 'jamo.h2j', 'h2j', (['text'], {}), '(text)\n', (1600, 1606), False, 'from jamo import h2j, j2hcj\n')] |
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
import torch
from mmdet.core import get_classes
from mmdet.datasets import to_tensor
from mmdet.datasets.transforms import ImageTransform
from PIL import Image
import cv2
def _prepare_data(img, img_transform, cfg, device):
ori_shape = img.shape
img, img_shape, pad_shape, scale_factor = img_transform(
img,
scale=cfg.data.test.img_scale,
keep_ratio=cfg.data.test.get('resize_keep_ratio', True))
img = to_tensor(img).to(device).unsqueeze(0)
img_meta = [
dict(
ori_shape=ori_shape,
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=False)
]
return dict(img=[img], img_meta=[img_meta])
def _prepare_data_3d(img_np, img_transform, cfg, device):
ori_shape = (img_np.shape[0], img_np.shape[1], 3)
total_num_slices = img_np.shape[2]
imgs = []
for cur_slice in range(total_num_slices):
img = img_np[:,:,cur_slice]
img = Image.fromarray(img).convert('RGB')
img = np.array(img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
img, img_shape, pad_shape, scale_factor = img_transform(
img,
scale=cfg.data.test.img_scale,
keep_ratio=cfg.data.test.get('resize_keep_ratio', True))
imgs.append(img)
imgs = to_tensor(np.array(imgs)).to(device).unsqueeze(0)
img_meta = [
dict(
ori_shape=ori_shape,
img_shape=(*img_shape, total_num_slices),
pad_shape=(*pad_shape, total_num_slices),
scale_factor=scale_factor,
flip=False)
]
imgs = imgs.permute(0, 2, 1, 3, 4)
assert imgs.shape[1] == 3 # make sure channel size is 3
return dict(imgs=imgs, img_meta=[img_meta])
def _prepare_data_3d_2scales(img_np, img_np_2, img_transform, cfg, device):
ori_shape = (img_np.shape[0], img_np.shape[1], 3)
ori_shape_2 = (img_np_2.shape[0], img_np_2.shape[1], 3)
total_num_slices = img_np.shape[2]
total_num_slices_2 = img_np_2.shape[2]
# first image
imgs = []
for cur_slice in range(total_num_slices):
img = img_np[:,:,cur_slice]
img = Image.fromarray(img).convert('RGB')
img = np.array(img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
img, img_shape, pad_shape, scale_factor = img_transform(
img,
scale=cfg.data.test.img_scale,
keep_ratio=cfg.data.test.get('resize_keep_ratio', True))
imgs.append(img)
imgs = to_tensor(np.array(imgs)).to(device).unsqueeze(0)
img_meta = [
dict(
ori_shape=ori_shape,
img_shape=(*img_shape, total_num_slices),
pad_shape=(*pad_shape, total_num_slices),
# scale_factor=1.0 / (img_np_2.shape[0] / img_np.shape[0]), # scale up to 1.5x
scale_factor=1.0, # scale down 1.0x
flip=False)
]
imgs = imgs.permute(0, 2, 1, 3, 4)
# second image
imgs_2 = []
for cur_slice in range(total_num_slices_2):
img = img_np_2[:,:,cur_slice]
img = Image.fromarray(img).convert('RGB')
img = np.array(img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
img, img_shape, pad_shape, scale_factor = img_transform(
img,
scale=cfg.data2_2scales.test.img_scale,
keep_ratio=cfg.data.test.get('resize_keep_ratio', True))
imgs_2.append(img)
imgs_2 = to_tensor(np.array(imgs_2)).to(device).unsqueeze(0)
img_meta_2 = [
dict(
ori_shape=ori_shape_2,
img_shape=(*img_shape, total_num_slices_2),
pad_shape=(*pad_shape, total_num_slices_2),
# scale_factor=scale_factor, # scale up to 1.5x
scale_factor=1.5, # scale down 1.0x
flip=False)
]
imgs_2 = imgs_2.permute(0, 2, 1, 3, 4)
assert imgs.shape[1] == 3 # make sure channel size is 3
assert imgs_2.shape[1] == 3
return dict(imgs=imgs, img_meta=[img_meta], imgs_2=imgs_2, img_meta_2=[img_meta_2])
def _inference_single(model, img, img_transform, cfg, device):
img = mmcv.imread(img)
data = _prepare_data(img, img_transform, cfg, device)
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result
def _inference_single_3d(model, img, img_transform, cfg, device):
img_np = np.load(img)
data = _prepare_data_3d(img_np, img_transform, cfg, device)
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result
def _inference_single_3d_2scales(model, img, img_2, img_transform, cfg, device):
img_np = np.load(img)
img_np_2 = np.load(img_2)
data = _prepare_data_3d_2scales(img_np, img_np_2, img_transform, cfg, device)
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result
def _inference_generator(model, imgs, img_transform, cfg, device):
for img in imgs:
yield _inference_single(model, img, img_transform, cfg, device)
def _inference_generator_3d(model, imgs, img_transform, cfg, device):
for img in imgs:
yield _inference_single_3d(model, img, img_transform, cfg, device)
def _inference_generator_3d_2scales(model, imgs, imgs_2, img_transform, cfg, device):
for img, img_2 in zip(imgs, imgs_2):
assert img.split('/')[-1] == img_2.split('/')[-1]
yield _inference_single_3d_2scales(model, img, img_2, img_transform, cfg, device)
def inference_detector(model, imgs, cfg, device='cuda:0'):
img_transform = ImageTransform(
size_divisor=cfg.data.test.size_divisor, **cfg.img_norm_cfg)
model = model.to(device)
model.eval()
if not isinstance(imgs, list):
return _inference_single(model, imgs, img_transform, cfg, device)
else:
return _inference_generator(model, imgs, img_transform, cfg, device)
def inference_detector_3d(model, imgs, cfg, device='cuda:0'):
img_transform = ImageTransform(
size_divisor=cfg.data.test.size_divisor, **cfg.img_norm_cfg)
model = model.to(device)
model.eval()
if not isinstance(imgs, list):
return _inference_single_3d(model, imgs, img_transform, cfg, device)
else:
return _inference_generator_3d(model, imgs, img_transform, cfg, device)
def inference_detector_3d_2scales(model, imgs, imgs_2, cfg, device='cuda:0'):
img_transform = ImageTransform(
size_divisor=cfg.data.test.size_divisor, **cfg.img_norm_cfg)
model = model.to(device)
model.eval()
if not isinstance(imgs, list):
return _inference_single_3d_2scales(model, imgs, imgs_2, img_transform, cfg, device)
else:
return _inference_generator_3d_2scales(model, imgs, imgs_2, img_transform, cfg, device)
def show_result(img, result, dataset='coco', score_thr=0.3, out_file=None, font_scale=0.5):
img = mmcv.imread(img)
class_names = get_classes(dataset)
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
# draw segmentation masks
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(
0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
# draw bounding boxes
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
write_bboxes_to_npy(bboxes, out_file)
mmcv.imshow_det_bboxes(
img.copy(),
bboxes,
labels,
class_names=class_names,
score_thr=score_thr,
show=out_file is None,
out_file=out_file,
font_scale=font_scale)
def show_result_3d(img, result, dataset='coco', score_thr=0.3, out_file=None, font_scale=0.5):
img_np = np.load(img)
class_names = get_classes(dataset)
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
# draw segmentation masks
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(
0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
bboxes_placeholders = [[] for i in range(0, 160)]
for bbox in bboxes:
for z_index in range(int(np.floor(bbox[4])), int(np.ceil(bbox[5])+ 1)):
bboxes_placeholders[z_index].append([bbox[0], bbox[1], bbox[2], bbox[3], bbox[6]])
for index, boxes in enumerate(bboxes_placeholders):
if len(boxes) > 0:
img = img_np[:,:,index]
img = Image.fromarray(img).convert('RGB')
img = np.array(img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
labels = np.array([0 for i in range(len(boxes))])
mmcv.imshow_det_bboxes(
img.copy(),
np.array(boxes),
labels,
class_names=class_names,
score_thr=score_thr,
show=out_file is None,
out_file=out_file.split('.')[-2] + '-{}.png'.format(index),
font_scale=0)
def display_result_3d(img, result, dataset='coco', score_thr=0.3):
img_np = np.load(img)
class_names = get_classes(dataset)
bbox_result = result
bboxes = np.vstack(bbox_result)
bboxes_placeholders = [[] for i in range(0, 160)]
for bbox in bboxes:
for z_index in range(int(np.floor(bbox[4])), int(np.ceil(bbox[5])+ 1)):
bboxes_placeholders[z_index].append([bbox[0], bbox[1], bbox[2], bbox[3], bbox[6]])
for index, boxes in enumerate(bboxes_placeholders):
if len(boxes) > 0:
for box in boxes:
if box[4] > score_thr:
print('slice {} score {}'.format(index, box[4]))
'''
write bounding boxes result to npy file
'''
def write_bboxes_to_npy(bboxes, out_file):
if out_file is not None:
bboxes_filename = out_file.split('.')[0] # A001-2342.jpeg => A001-2342
bboxes_filename = bboxes_filename + '.npy'
np.save(bboxes_filename, bboxes) | [
"numpy.load",
"pycocotools.mask.decode",
"numpy.floor",
"numpy.random.randint",
"mmdet.datasets.transforms.ImageTransform",
"torch.no_grad",
"mmcv.imread",
"numpy.full",
"cv2.cvtColor",
"mmdet.core.get_classes",
"numpy.save",
"numpy.ceil",
"mmcv.concat_list",
"mmdet.datasets.to_tensor",
... | [((4181, 4197), 'mmcv.imread', 'mmcv.imread', (['img'], {}), '(img)\n', (4192, 4197), False, 'import mmcv\n'), ((4444, 4456), 'numpy.load', 'np.load', (['img'], {}), '(img)\n', (4451, 4456), True, 'import numpy as np\n'), ((4724, 4736), 'numpy.load', 'np.load', (['img'], {}), '(img)\n', (4731, 4736), True, 'import numpy as np\n'), ((4752, 4766), 'numpy.load', 'np.load', (['img_2'], {}), '(img_2)\n', (4759, 4766), True, 'import numpy as np\n'), ((5641, 5716), 'mmdet.datasets.transforms.ImageTransform', 'ImageTransform', ([], {'size_divisor': 'cfg.data.test.size_divisor'}), '(size_divisor=cfg.data.test.size_divisor, **cfg.img_norm_cfg)\n', (5655, 5716), False, 'from mmdet.datasets.transforms import ImageTransform\n'), ((6052, 6127), 'mmdet.datasets.transforms.ImageTransform', 'ImageTransform', ([], {'size_divisor': 'cfg.data.test.size_divisor'}), '(size_divisor=cfg.data.test.size_divisor, **cfg.img_norm_cfg)\n', (6066, 6127), False, 'from mmdet.datasets.transforms import ImageTransform\n'), ((6485, 6560), 'mmdet.datasets.transforms.ImageTransform', 'ImageTransform', ([], {'size_divisor': 'cfg.data.test.size_divisor'}), '(size_divisor=cfg.data.test.size_divisor, **cfg.img_norm_cfg)\n', (6499, 6560), False, 'from mmdet.datasets.transforms import ImageTransform\n'), ((6954, 6970), 'mmcv.imread', 'mmcv.imread', (['img'], {}), '(img)\n', (6965, 6970), False, 'import mmcv\n'), ((6989, 7009), 'mmdet.core.get_classes', 'get_classes', (['dataset'], {}), '(dataset)\n', (7000, 7009), False, 'from mmdet.core import get_classes\n'), ((7157, 7179), 'numpy.vstack', 'np.vstack', (['bbox_result'], {}), '(bbox_result)\n', (7166, 7179), True, 'import numpy as np\n'), ((7734, 7756), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (7748, 7756), True, 'import numpy as np\n'), ((8141, 8153), 'numpy.load', 'np.load', (['img'], {}), '(img)\n', (8148, 8153), True, 'import numpy as np\n'), ((8176, 8196), 'mmdet.core.get_classes', 'get_classes', (['dataset'], {}), '(dataset)\n', (8187, 8196), False, 'from mmdet.core import get_classes\n'), ((8344, 8366), 'numpy.vstack', 'np.vstack', (['bbox_result'], {}), '(bbox_result)\n', (8353, 8366), True, 'import numpy as np\n'), ((9742, 9754), 'numpy.load', 'np.load', (['img'], {}), '(img)\n', (9749, 9754), True, 'import numpy as np\n'), ((9777, 9797), 'mmdet.core.get_classes', 'get_classes', (['dataset'], {}), '(dataset)\n', (9788, 9797), False, 'from mmdet.core import get_classes\n'), ((9836, 9858), 'numpy.vstack', 'np.vstack', (['bbox_result'], {}), '(bbox_result)\n', (9845, 9858), True, 'import numpy as np\n'), ((1106, 1119), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1114, 1119), True, 'import numpy as np\n'), ((1134, 1170), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2BGR'], {}), '(img, cv2.COLOR_RGB2BGR)\n', (1146, 1170), False, 'import cv2\n'), ((2292, 2305), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2300, 2305), True, 'import numpy as np\n'), ((2320, 2356), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2BGR'], {}), '(img, cv2.COLOR_RGB2BGR)\n', (2332, 2356), False, 'import cv2\n'), ((3204, 3217), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3212, 3217), True, 'import numpy as np\n'), ((3232, 3268), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2BGR'], {}), '(img, cv2.COLOR_RGB2BGR)\n', (3244, 3268), False, 'import cv2\n'), ((4265, 4280), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4278, 4280), False, 'import torch\n'), ((4530, 4545), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4543, 4545), False, 'import torch\n'), ((4858, 4873), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4871, 4873), False, 'import torch\n'), ((7258, 7287), 'mmcv.concat_list', 'mmcv.concat_list', (['segm_result'], {}), '(segm_result)\n', (7274, 7287), False, 'import mmcv\n'), ((7627, 7668), 'numpy.full', 'np.full', (['bbox.shape[0]', 'i'], {'dtype': 'np.int32'}), '(bbox.shape[0], i, dtype=np.int32)\n', (7634, 7668), True, 'import numpy as np\n'), ((8445, 8474), 'mmcv.concat_list', 'mmcv.concat_list', (['segm_result'], {}), '(segm_result)\n', (8461, 8474), False, 'import mmcv\n'), ((10612, 10644), 'numpy.save', 'np.save', (['bboxes_filename', 'bboxes'], {}), '(bboxes_filename, bboxes)\n', (10619, 10644), True, 'import numpy as np\n'), ((7303, 7338), 'numpy.where', 'np.where', (['(bboxes[:, -1] > score_thr)'], {}), '(bboxes[:, -1] > score_thr)\n', (7311, 7338), True, 'import numpy as np\n'), ((7390, 7439), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)', '(1, 3)'], {'dtype': 'np.uint8'}), '(0, 256, (1, 3), dtype=np.uint8)\n', (7407, 7439), True, 'import numpy as np\n'), ((8490, 8525), 'numpy.where', 'np.where', (['(bboxes[:, -1] > score_thr)'], {}), '(bboxes[:, -1] > score_thr)\n', (8498, 8525), True, 'import numpy as np\n'), ((8577, 8626), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)', '(1, 3)'], {'dtype': 'np.uint8'}), '(0, 256, (1, 3), dtype=np.uint8)\n', (8594, 8626), True, 'import numpy as np\n'), ((9215, 9228), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (9223, 9228), True, 'import numpy as np\n'), ((9247, 9283), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2BGR'], {}), '(img, cv2.COLOR_RGB2BGR)\n', (9259, 9283), False, 'import cv2\n'), ((1056, 1076), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (1071, 1076), False, 'from PIL import Image\n'), ((2242, 2262), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (2257, 2262), False, 'from PIL import Image\n'), ((3154, 3174), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (3169, 3174), False, 'from PIL import Image\n'), ((8877, 8894), 'numpy.floor', 'np.floor', (['bbox[4]'], {}), '(bbox[4])\n', (8885, 8894), True, 'import numpy as np\n'), ((9420, 9435), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (9428, 9435), True, 'import numpy as np\n'), ((9974, 9991), 'numpy.floor', 'np.floor', (['bbox[4]'], {}), '(bbox[4])\n', (9982, 9991), True, 'import numpy as np\n'), ((508, 522), 'mmdet.datasets.to_tensor', 'to_tensor', (['img'], {}), '(img)\n', (517, 522), False, 'from mmdet.datasets import to_tensor\n'), ((7476, 7502), 'pycocotools.mask.decode', 'maskUtils.decode', (['segms[i]'], {}), '(segms[i])\n', (7492, 7502), True, 'import pycocotools.mask as maskUtils\n'), ((8663, 8689), 'pycocotools.mask.decode', 'maskUtils.decode', (['segms[i]'], {}), '(segms[i])\n', (8679, 8689), True, 'import pycocotools.mask as maskUtils\n'), ((8901, 8917), 'numpy.ceil', 'np.ceil', (['bbox[5]'], {}), '(bbox[5])\n', (8908, 8917), True, 'import numpy as np\n'), ((9161, 9181), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (9176, 9181), False, 'from PIL import Image\n'), ((9998, 10014), 'numpy.ceil', 'np.ceil', (['bbox[5]'], {}), '(bbox[5])\n', (10005, 10014), True, 'import numpy as np\n'), ((1412, 1426), 'numpy.array', 'np.array', (['imgs'], {}), '(imgs)\n', (1420, 1426), True, 'import numpy as np\n'), ((2598, 2612), 'numpy.array', 'np.array', (['imgs'], {}), '(imgs)\n', (2606, 2612), True, 'import numpy as np\n'), ((3523, 3539), 'numpy.array', 'np.array', (['imgs_2'], {}), '(imgs_2)\n', (3531, 3539), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from datetime import date
import numpy as np
import csv
import requests
import random
import os.path
import pickle
import math
import tensorflow as tf
download_path = '../pickle'
import matplotlib.pyplot as plt
class Stock:
VALID_ACTIONS = [0, 1, -1]
def __init__(self):
self.profit = 0
self.count = 0
self.bought = False
self.my_list = None
self.boughtStock = None
self.boughtPrice = None
# uncle_bob = db.Person.create(name='Bob', birthday=date(1960, 1, 15), is_relative=True)
# uncle_bob.save()
#
#
# uncle_bob.update(name = "test")
# query = db.Person.select(db.Person.name == 'Bob')
#
# for user in query:
# test = user.name
#
# pass
self.spec = type('',(object,),{'id':"Stock" })()
with open('symbols.csv') as csvfile:
self.symbols = list(csv.reader(csvfile))
def getState(self):
x = self.getX()
index2 = int(x < 0)
x = abs(x)
margin = self.getX(margin=True)
index3 = int(margin < 0)
margin = abs(margin)
result = math.pow(x, float(1)/3)
if result >= 10:
result = 10
max = 399
steps = float(10) / max
result = round(result / steps)
if margin >=50:
margin = 50
marginsteps = float(50) / max
marginresult = round(margin/marginsteps)
environment = np.zeros(shape=(2, 2, 2, 2, 400), dtype=np.uint8)
environment[int(self.bought),index2 ,index3 , 0, result] = 1
environment[int(self.bought), index2, index3, 1, marginresult] = 1
reshape = np.reshape(environment, [80,80])
# plt.imshow(reshape)
# plt.show()
return environment
def getQuotes(self):
statuscode = None
my_list =None
while statuscode != 200 or (not my_list or (len(my_list) < 2000 or my_list[0][1] == my_list[1][1])):
symbol = random.choice(self.symbols)[0]
download_file = download_path + "/" + symbol + ".pickle"
if os.path.exists(download_file) and os.path.getsize(download_file) > 0:
with open(download_file, 'rb') as f:
my_list = pickle.load(f)
statuscode = 200
else:
CSV_URL = "http://chart.finance.yahoo.com/table.csv?s=" + symbol + "&a=10&b=13&c=1800&d=10&e=13&f=2016&g=d&ignore=.csv"
with requests.Session() as s:
download = s.get(CSV_URL)
statuscode = download.status_code
decoded_content = download.content.decode('utf-8')
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
next(cr, None)
my_list = list(cr)
with open(download_file, 'wb') as f:
my_list = pickle.dump(my_list,f)
return my_list
def step(self, action, profit = False):
reward = 0.0
done = False
state = self.my_list[self.index]
close = state[6]
#Stock is not profitable, so exit
if not self.bought and action == -1:
done = True
#Sell Stock
if self.bought and action == 1:
done = True
reward = ((((float(close) / (float(self.boughtStock) + 0.01))) * self.boughtPrice) - self.boughtPrice) * 100
if profit:
self.profit += reward
self.count += 1
print("Profit:", self.profit/self.count)
#Buy Stock
if not self.bought and action == 1:
self.boughtStock = float(close)
self.bought = True
self.boughtPrice = 1
if self.index >= 1:
self.index -= 1
else:
self.index = 0
done = True
next_state = self.getState()
_ = None
return [next_state, reward, done, _]
def getX(self, margin = False):
state = self.my_list[self.index]
open = state[1]
close = state[6]
if self.bought and not margin:
x = ((float(close) / (float(self.boughtStock) + 0.01)) * 100) - 100
else:
x = ((float(close) / (float(open) + 0.01)) * 100) - 100
return x
def reset(self):
self.bought = False
self.boughtPrice = None
self.boughtStock = None
self.my_list = self.getQuotes()
self.index = random.choice(range(len(self.my_list)))
environment = self.getState()
return environment
| [
"pickle.dump",
"csv.reader",
"requests.Session",
"numpy.zeros",
"random.choice",
"pickle.load",
"numpy.reshape"
] | [((1514, 1563), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, 400)', 'dtype': 'np.uint8'}), '(shape=(2, 2, 2, 2, 400), dtype=np.uint8)\n', (1522, 1563), True, 'import numpy as np\n'), ((1726, 1759), 'numpy.reshape', 'np.reshape', (['environment', '[80, 80]'], {}), '(environment, [80, 80])\n', (1736, 1759), True, 'import numpy as np\n'), ((949, 968), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (959, 968), False, 'import csv\n'), ((2043, 2070), 'random.choice', 'random.choice', (['self.symbols'], {}), '(self.symbols)\n', (2056, 2070), False, 'import random\n'), ((2312, 2326), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2323, 2326), False, 'import pickle\n'), ((2539, 2557), 'requests.Session', 'requests.Session', ([], {}), '()\n', (2555, 2557), False, 'import requests\n'), ((2981, 3004), 'pickle.dump', 'pickle.dump', (['my_list', 'f'], {}), '(my_list, f)\n', (2992, 3004), False, 'import pickle\n')] |
"""Taylor Green vortex flow (5 minutes).
"""
import numpy as np
import os
from pysph.base.nnps import DomainManager
from pysph.base.utils import get_particle_array
from pysph.base.kernels import QuinticSpline
from pysph.solver.application import Application
from pysph.sph.equation import Group, Equation
from pysph.sph.scheme import TVFScheme, WCSPHScheme, SchemeChooser
from pysph.sph.wc.edac import ComputeAveragePressure, EDACScheme
from pysph.sph.wc.kernel_correction import (GradientCorrectionPreStep,
GradientCorrection,
MixedKernelCorrectionPreStep)
from pysph.sph.wc.crksph import CRKSPHPreStep, CRKSPH
# domain and constants
L = 1.0
U = 1.0
rho0 = 1.0
c0 = 10 * U
p0 = c0**2 * rho0
def exact_solution(U, b, t, x, y):
pi = np.pi
sin = np.sin
cos = np.cos
factor = U * np.exp(b * t)
u = -cos(2 * pi * x) * sin(2 * pi * y)
v = sin(2 * pi * x) * cos(2 * pi * y)
p = -0.25 * (cos(4 * pi * x) + cos(4 * pi * y))
return factor * u, factor * v, factor * factor * p
class TaylorGreen(Application):
def add_user_options(self, group):
corrections = ['', 'mixed-corr', 'grad-corr', 'kernel-corr', 'crksph']
group.add_argument(
"--init", action="store", type=str, default=None,
help="Initialize particle positions from given file."
)
group.add_argument(
"--perturb", action="store", type=float, dest="perturb", default=0,
help="Random perturbation of initial particles as a fraction "
"of dx (setting it to zero disables it, the default)."
)
group.add_argument(
"--nx", action="store", type=int, dest="nx", default=50,
help="Number of points along x direction. (default 50)"
)
group.add_argument(
"--re", action="store", type=float, dest="re", default=100,
help="Reynolds number (defaults to 100)."
)
group.add_argument(
"--hdx", action="store", type=float, dest="hdx", default=1.0,
help="Ratio h/dx."
)
group.add_argument(
"--pb-factor", action="store", type=float, dest="pb_factor",
default=1.0,
help="Use fraction of the background pressure (default: 1.0)."
)
group.add_argument(
"--kernel-corr", action="store", type=str, dest='kernel_corr',
default='', help="Type of Kernel Correction", choices=corrections
)
def consume_user_options(self):
nx = self.options.nx
re = self.options.re
self.nu = nu = U * L / re
self.dx = dx = L / nx
self.volume = dx * dx
self.hdx = self.options.hdx
h0 = self.hdx * self.dx
dt_cfl = 0.25 * h0 / (c0 + U)
dt_viscous = 0.125 * h0**2 / nu
dt_force = 0.25 * 1.0
self.tf = 5.0
self.dt = min(dt_cfl, dt_viscous, dt_force)
self.kernel_corr = self.options.kernel_corr
def configure_scheme(self):
scheme = self.scheme
h0 = self.hdx * self.dx
if self.options.scheme == 'tvf':
scheme.configure(pb=self.options.pb_factor * p0, nu=self.nu, h0=h0)
elif self.options.scheme == 'wcsph':
scheme.configure(hdx=self.hdx, nu=self.nu, h0=h0)
elif self.options.scheme == 'edac':
scheme.configure(h=h0, nu=self.nu, pb=self.options.pb_factor * p0)
kernel = QuinticSpline(dim=2)
scheme.configure_solver(kernel=kernel, tf=self.tf, dt=self.dt)
def create_scheme(self):
h0 = None
hdx = None
wcsph = WCSPHScheme(
['fluid'], [], dim=2, rho0=rho0, c0=c0, h0=h0,
hdx=hdx, nu=None, gamma=7.0, alpha=0.0, beta=0.0
)
tvf = TVFScheme(
['fluid'], [], dim=2, rho0=rho0, c0=c0, nu=None,
p0=p0, pb=None, h0=h0
)
edac = EDACScheme(
['fluid'], [], dim=2, rho0=rho0, c0=c0, nu=None,
pb=p0, h=h0
)
s = SchemeChooser(default='tvf', wcsph=wcsph, tvf=tvf, edac=edac)
return s
def create_equations(self):
eqns = self.scheme.get_equations()
n = len(eqns)
tol = 1.0
if self.kernel_corr == 'grad-corr':
eqn1 = Group(equations=[
GradientCorrectionPreStep('fluid', ['fluid'])
], real=False)
for i in range(n):
eqn2 = GradientCorrection('fluid', ['fluid'], 2, tol)
eqns[i].equations.insert(0, eqn2)
eqns.insert(0, eqn1)
elif self.kernel_corr == 'mixed-corr':
eqn1 = Group(equations=[
MixedKernelCorrectionPreStep('fluid', ['fluid'])
], real=False)
for i in range(n):
eqn2 = GradientCorrection('fluid', ['fluid'], 2, tol)
eqns[i].equations.insert(0, eqn2)
eqns.insert(0, eqn1)
elif self.kernel_corr == 'crksph':
eqn1 = Group(equations=[
CRKSPHPreStep('fluid', ['fluid'])
], real=False)
for i in range(n):
eqn2 = CRKSPH('fluid', ['fluid'], 2, tol)
eqns[i].equations.insert(0, eqn2)
eqns.insert(0, eqn1)
return eqns
def create_domain(self):
return DomainManager(
xmin=0, xmax=L, ymin=0, ymax=L, periodic_in_x=True,
periodic_in_y=True
)
def create_particles(self):
# create the particles
dx = self.dx
_x = np.arange(dx / 2, L, dx)
x, y = np.meshgrid(_x, _x)
x = x.ravel()
y = y.ravel()
if self.options.init is not None:
fname = self.options.init
from pysph.solver.utils import load
data = load(fname)
_f = data['arrays']['fluid']
x, y = _f.x.copy(), _f.y.copy()
if self.options.perturb > 0:
np.random.seed(1)
factor = dx * self.options.perturb
x += np.random.random(x.shape) * factor
y += np.random.random(x.shape) * factor
h = np.ones_like(x) * dx
# create the arrays
fluid = get_particle_array(name='fluid', x=x, y=y, h=h)
self.scheme.setup_properties([fluid])
# add the requisite arrays
fluid.add_property('color')
fluid.add_output_arrays(['color'])
print("Taylor green vortex problem :: nfluid = %d, dt = %g" % (
fluid.get_number_of_particles(), self.dt))
# setup the particle properties
pi = np.pi
cos = np.cos
sin = np.sin
# color
fluid.color[:] = cos(2 * pi * x) * cos(4 * pi * y)
# velocities
fluid.u[:] = -U * cos(2 * pi * x) * sin(2 * pi * y)
fluid.v[:] = +U * sin(2 * pi * x) * cos(2 * pi * y)
fluid.p[:] = -U * U * (np.cos(4 * np.pi * x) +
np.cos(4 * np.pi * y)) * 0.25
# mass is set to get the reference density of each phase
fluid.rho[:] = rho0
fluid.m[:] = self.volume * fluid.rho
# volume is set as dx^2
if self.options.scheme == 'tvf':
fluid.V[:] = 1. / self.volume
# smoothing lengths
fluid.h[:] = self.hdx * dx
corr = self.kernel_corr
if corr == 'kernel-corr' or corr == 'mixed-corr':
fluid.add_property('cwij')
if corr == 'mixed-corr' or corr == 'grad-corr':
fluid.add_property('m_mat', stride=9)
elif corr == 'crksph':
fluid.add_property('ai')
fluid.add_property('gradbi', stride=9)
for prop in ['gradai', 'bi']:
fluid.add_property(prop, stride=2)
# return the particle list
return [fluid]
# The following are all related to post-processing.
def _get_post_process_props(self, array):
"""Return x, y, m, u, v, p.
"""
if 'pavg' not in array.properties or \
'pavg' not in array.output_property_arrays:
self._add_extra_props(array)
sph_eval = self._get_sph_evaluator(array)
sph_eval.update_particle_arrays([array])
sph_eval.evaluate()
x, y, m, u, v, p, pavg = array.get(
'x', 'y', 'm', 'u', 'v', 'p', 'pavg'
)
return x, y, m, u, v, p - pavg
def _add_extra_props(self, array):
extra = ['pavg', 'nnbr']
for prop in extra:
if prop not in array.properties:
array.add_property(prop)
array.add_output_arrays(extra)
def _get_sph_evaluator(self, array):
if not hasattr(self, '_sph_eval'):
from pysph.tools.sph_evaluator import SPHEvaluator
equations = [
ComputeAveragePressure(dest='fluid', sources=['fluid'])
]
dm = self.create_domain()
sph_eval = SPHEvaluator(
arrays=[array], equations=equations, dim=2,
kernel=QuinticSpline(dim=2), domain_manager=dm
)
self._sph_eval = sph_eval
return self._sph_eval
def post_process(self, info_fname):
info = self.read_info(info_fname)
if len(self.output_files) == 0:
return
from pysph.solver.utils import iter_output
decay_rate = -8.0 * np.pi**2 / self.options.re
files = self.output_files
t, ke, ke_ex, decay, linf, l1, p_l1 = [], [], [], [], [], [], []
for sd, array in iter_output(files, 'fluid'):
_t = sd['t']
t.append(_t)
x, y, m, u, v, p = self._get_post_process_props(array)
u_e, v_e, p_e = exact_solution(U, decay_rate, _t, x, y)
vmag2 = u**2 + v**2
vmag = np.sqrt(vmag2)
ke.append(0.5 * np.sum(m * vmag2))
vmag2_e = u_e**2 + v_e**2
vmag_e = np.sqrt(vmag2_e)
ke_ex.append(0.5 * np.sum(m * vmag2_e))
vmag_max = vmag.max()
decay.append(vmag_max)
theoretical_max = U * np.exp(decay_rate * _t)
linf.append(abs((vmag_max - theoretical_max) / theoretical_max))
l1_err = np.average(np.abs(vmag - vmag_e))
avg_vmag_e = np.average(np.abs(vmag_e))
# scale the error by the maximum velocity.
l1.append(l1_err / avg_vmag_e)
p_e_max = np.abs(p_e).max()
p_error = np.average(np.abs(p - p_e)) / p_e_max
p_l1.append(p_error)
t, ke, ke_ex, decay, l1, linf, p_l1 = list(map(
np.asarray, (t, ke, ke_ex, decay, l1, linf, p_l1))
)
decay_ex = U * np.exp(decay_rate * t)
fname = os.path.join(self.output_dir, 'results.npz')
np.savez(
fname, t=t, ke=ke, ke_ex=ke_ex, decay=decay, linf=linf, l1=l1,
p_l1=p_l1, decay_ex=decay_ex
)
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
plt.clf()
plt.semilogy(t, decay_ex, label="exact")
plt.semilogy(t, decay, label="computed")
plt.xlabel('t')
plt.ylabel('max velocity')
plt.legend()
fig = os.path.join(self.output_dir, "decay.png")
plt.savefig(fig, dpi=300)
plt.clf()
plt.plot(t, linf)
plt.xlabel('t')
plt.ylabel(r'$L_\infty$ error')
fig = os.path.join(self.output_dir, "linf_error.png")
plt.savefig(fig, dpi=300)
plt.clf()
plt.plot(t, l1, label="error")
plt.xlabel('t')
plt.ylabel(r'$L_1$ error')
fig = os.path.join(self.output_dir, "l1_error.png")
plt.savefig(fig, dpi=300)
plt.clf()
plt.plot(t, p_l1, label="error")
plt.xlabel('t')
plt.ylabel(r'$L_1$ error for $p$')
fig = os.path.join(self.output_dir, "p_l1_error.png")
plt.savefig(fig, dpi=300)
if __name__ == '__main__':
app = TaylorGreen()
app.run()
app.post_process(app.info_filename)
| [
"numpy.random.seed",
"numpy.abs",
"numpy.sum",
"matplotlib.pyplot.clf",
"pysph.sph.wc.crksph.CRKSPH",
"pysph.sph.scheme.SchemeChooser",
"pysph.sph.wc.edac.ComputeAveragePressure",
"numpy.arange",
"numpy.exp",
"os.path.join",
"pysph.sph.wc.crksph.CRKSPHPreStep",
"numpy.meshgrid",
"pysph.solve... | [((889, 902), 'numpy.exp', 'np.exp', (['(b * t)'], {}), '(b * t)\n', (895, 902), True, 'import numpy as np\n'), ((3517, 3537), 'pysph.base.kernels.QuinticSpline', 'QuinticSpline', ([], {'dim': '(2)'}), '(dim=2)\n', (3530, 3537), False, 'from pysph.base.kernels import QuinticSpline\n'), ((3692, 3804), 'pysph.sph.scheme.WCSPHScheme', 'WCSPHScheme', (["['fluid']", '[]'], {'dim': '(2)', 'rho0': 'rho0', 'c0': 'c0', 'h0': 'h0', 'hdx': 'hdx', 'nu': 'None', 'gamma': '(7.0)', 'alpha': '(0.0)', 'beta': '(0.0)'}), "(['fluid'], [], dim=2, rho0=rho0, c0=c0, h0=h0, hdx=hdx, nu=None,\n gamma=7.0, alpha=0.0, beta=0.0)\n", (3703, 3804), False, 'from pysph.sph.scheme import TVFScheme, WCSPHScheme, SchemeChooser\n'), ((3849, 3934), 'pysph.sph.scheme.TVFScheme', 'TVFScheme', (["['fluid']", '[]'], {'dim': '(2)', 'rho0': 'rho0', 'c0': 'c0', 'nu': 'None', 'p0': 'p0', 'pb': 'None', 'h0': 'h0'}), "(['fluid'], [], dim=2, rho0=rho0, c0=c0, nu=None, p0=p0, pb=None,\n h0=h0)\n", (3858, 3934), False, 'from pysph.sph.scheme import TVFScheme, WCSPHScheme, SchemeChooser\n'), ((3980, 4052), 'pysph.sph.wc.edac.EDACScheme', 'EDACScheme', (["['fluid']", '[]'], {'dim': '(2)', 'rho0': 'rho0', 'c0': 'c0', 'nu': 'None', 'pb': 'p0', 'h': 'h0'}), "(['fluid'], [], dim=2, rho0=rho0, c0=c0, nu=None, pb=p0, h=h0)\n", (3990, 4052), False, 'from pysph.sph.wc.edac import ComputeAveragePressure, EDACScheme\n'), ((4099, 4160), 'pysph.sph.scheme.SchemeChooser', 'SchemeChooser', ([], {'default': '"""tvf"""', 'wcsph': 'wcsph', 'tvf': 'tvf', 'edac': 'edac'}), "(default='tvf', wcsph=wcsph, tvf=tvf, edac=edac)\n", (4112, 4160), False, 'from pysph.sph.scheme import TVFScheme, WCSPHScheme, SchemeChooser\n'), ((5402, 5491), 'pysph.base.nnps.DomainManager', 'DomainManager', ([], {'xmin': '(0)', 'xmax': 'L', 'ymin': '(0)', 'ymax': 'L', 'periodic_in_x': '(True)', 'periodic_in_y': '(True)'}), '(xmin=0, xmax=L, ymin=0, ymax=L, periodic_in_x=True,\n periodic_in_y=True)\n', (5415, 5491), False, 'from pysph.base.nnps import DomainManager\n'), ((5620, 5644), 'numpy.arange', 'np.arange', (['(dx / 2)', 'L', 'dx'], {}), '(dx / 2, L, dx)\n', (5629, 5644), True, 'import numpy as np\n'), ((5660, 5679), 'numpy.meshgrid', 'np.meshgrid', (['_x', '_x'], {}), '(_x, _x)\n', (5671, 5679), True, 'import numpy as np\n'), ((6266, 6313), 'pysph.base.utils.get_particle_array', 'get_particle_array', ([], {'name': '"""fluid"""', 'x': 'x', 'y': 'y', 'h': 'h'}), "(name='fluid', x=x, y=y, h=h)\n", (6284, 6313), False, 'from pysph.base.utils import get_particle_array\n'), ((9589, 9616), 'pysph.solver.utils.iter_output', 'iter_output', (['files', '"""fluid"""'], {}), "(files, 'fluid')\n", (9600, 9616), False, 'from pysph.solver.utils import iter_output\n'), ((10781, 10825), 'os.path.join', 'os.path.join', (['self.output_dir', '"""results.npz"""'], {}), "(self.output_dir, 'results.npz')\n", (10793, 10825), False, 'import os\n'), ((10834, 10939), 'numpy.savez', 'np.savez', (['fname'], {'t': 't', 'ke': 'ke', 'ke_ex': 'ke_ex', 'decay': 'decay', 'linf': 'linf', 'l1': 'l1', 'p_l1': 'p_l1', 'decay_ex': 'decay_ex'}), '(fname, t=t, ke=ke, ke_ex=ke_ex, decay=decay, linf=linf, l1=l1,\n p_l1=p_l1, decay_ex=decay_ex)\n', (10842, 10939), True, 'import numpy as np\n'), ((11005, 11026), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (11019, 11026), False, 'import matplotlib\n'), ((11081, 11090), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (11088, 11090), True, 'from matplotlib import pyplot as plt\n'), ((11099, 11139), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['t', 'decay_ex'], {'label': '"""exact"""'}), "(t, decay_ex, label='exact')\n", (11111, 11139), True, 'from matplotlib import pyplot as plt\n'), ((11148, 11188), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['t', 'decay'], {'label': '"""computed"""'}), "(t, decay, label='computed')\n", (11160, 11188), True, 'from matplotlib import pyplot as plt\n'), ((11197, 11212), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (11207, 11212), True, 'from matplotlib import pyplot as plt\n'), ((11221, 11247), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""max velocity"""'], {}), "('max velocity')\n", (11231, 11247), True, 'from matplotlib import pyplot as plt\n'), ((11256, 11268), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11266, 11268), True, 'from matplotlib import pyplot as plt\n'), ((11283, 11325), 'os.path.join', 'os.path.join', (['self.output_dir', '"""decay.png"""'], {}), "(self.output_dir, 'decay.png')\n", (11295, 11325), False, 'import os\n'), ((11334, 11359), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig'], {'dpi': '(300)'}), '(fig, dpi=300)\n', (11345, 11359), True, 'from matplotlib import pyplot as plt\n'), ((11369, 11378), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (11376, 11378), True, 'from matplotlib import pyplot as plt\n'), ((11387, 11404), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'linf'], {}), '(t, linf)\n', (11395, 11404), True, 'from matplotlib import pyplot as plt\n'), ((11413, 11428), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (11423, 11428), True, 'from matplotlib import pyplot as plt\n'), ((11437, 11468), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$L_\\\\infty$ error"""'], {}), "('$L_\\\\infty$ error')\n", (11447, 11468), True, 'from matplotlib import pyplot as plt\n'), ((11483, 11530), 'os.path.join', 'os.path.join', (['self.output_dir', '"""linf_error.png"""'], {}), "(self.output_dir, 'linf_error.png')\n", (11495, 11530), False, 'import os\n'), ((11539, 11564), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig'], {'dpi': '(300)'}), '(fig, dpi=300)\n', (11550, 11564), True, 'from matplotlib import pyplot as plt\n'), ((11574, 11583), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (11581, 11583), True, 'from matplotlib import pyplot as plt\n'), ((11592, 11622), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'l1'], {'label': '"""error"""'}), "(t, l1, label='error')\n", (11600, 11622), True, 'from matplotlib import pyplot as plt\n'), ((11631, 11646), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (11641, 11646), True, 'from matplotlib import pyplot as plt\n'), ((11655, 11680), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$L_1$ error"""'], {}), "('$L_1$ error')\n", (11665, 11680), True, 'from matplotlib import pyplot as plt\n'), ((11696, 11741), 'os.path.join', 'os.path.join', (['self.output_dir', '"""l1_error.png"""'], {}), "(self.output_dir, 'l1_error.png')\n", (11708, 11741), False, 'import os\n'), ((11750, 11775), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig'], {'dpi': '(300)'}), '(fig, dpi=300)\n', (11761, 11775), True, 'from matplotlib import pyplot as plt\n'), ((11785, 11794), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (11792, 11794), True, 'from matplotlib import pyplot as plt\n'), ((11803, 11835), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'p_l1'], {'label': '"""error"""'}), "(t, p_l1, label='error')\n", (11811, 11835), True, 'from matplotlib import pyplot as plt\n'), ((11844, 11859), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (11854, 11859), True, 'from matplotlib import pyplot as plt\n'), ((11868, 11901), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$L_1$ error for $p$"""'], {}), "('$L_1$ error for $p$')\n", (11878, 11901), True, 'from matplotlib import pyplot as plt\n'), ((11917, 11964), 'os.path.join', 'os.path.join', (['self.output_dir', '"""p_l1_error.png"""'], {}), "(self.output_dir, 'p_l1_error.png')\n", (11929, 11964), False, 'import os\n'), ((11973, 11998), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig'], {'dpi': '(300)'}), '(fig, dpi=300)\n', (11984, 11998), True, 'from matplotlib import pyplot as plt\n'), ((5871, 5882), 'pysph.solver.utils.load', 'load', (['fname'], {}), '(fname)\n', (5875, 5882), False, 'from pysph.solver.utils import load\n'), ((6018, 6035), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (6032, 6035), True, 'import numpy as np\n'), ((6199, 6214), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (6211, 6214), True, 'import numpy as np\n'), ((9854, 9868), 'numpy.sqrt', 'np.sqrt', (['vmag2'], {}), '(vmag2)\n', (9861, 9868), True, 'import numpy as np\n'), ((9975, 9991), 'numpy.sqrt', 'np.sqrt', (['vmag2_e'], {}), '(vmag2_e)\n', (9982, 9991), True, 'import numpy as np\n'), ((10742, 10764), 'numpy.exp', 'np.exp', (['(decay_rate * t)'], {}), '(decay_rate * t)\n', (10748, 10764), True, 'import numpy as np\n'), ((4518, 4564), 'pysph.sph.wc.kernel_correction.GradientCorrection', 'GradientCorrection', (['"""fluid"""', "['fluid']", '(2)', 'tol'], {}), "('fluid', ['fluid'], 2, tol)\n", (4536, 4564), False, 'from pysph.sph.wc.kernel_correction import GradientCorrectionPreStep, GradientCorrection, MixedKernelCorrectionPreStep\n'), ((6100, 6125), 'numpy.random.random', 'np.random.random', (['x.shape'], {}), '(x.shape)\n', (6116, 6125), True, 'import numpy as np\n'), ((6152, 6177), 'numpy.random.random', 'np.random.random', (['x.shape'], {}), '(x.shape)\n', (6168, 6177), True, 'import numpy as np\n'), ((8857, 8912), 'pysph.sph.wc.edac.ComputeAveragePressure', 'ComputeAveragePressure', ([], {'dest': '"""fluid"""', 'sources': "['fluid']"}), "(dest='fluid', sources=['fluid'])\n", (8879, 8912), False, 'from pysph.sph.wc.edac import ComputeAveragePressure, EDACScheme\n'), ((10148, 10171), 'numpy.exp', 'np.exp', (['(decay_rate * _t)'], {}), '(decay_rate * _t)\n', (10154, 10171), True, 'import numpy as np\n'), ((10282, 10303), 'numpy.abs', 'np.abs', (['(vmag - vmag_e)'], {}), '(vmag - vmag_e)\n', (10288, 10303), True, 'import numpy as np\n'), ((10341, 10355), 'numpy.abs', 'np.abs', (['vmag_e'], {}), '(vmag_e)\n', (10347, 10355), True, 'import numpy as np\n'), ((4878, 4924), 'pysph.sph.wc.kernel_correction.GradientCorrection', 'GradientCorrection', (['"""fluid"""', "['fluid']", '(2)', 'tol'], {}), "('fluid', ['fluid'], 2, tol)\n", (4896, 4924), False, 'from pysph.sph.wc.kernel_correction import GradientCorrectionPreStep, GradientCorrection, MixedKernelCorrectionPreStep\n'), ((6955, 6976), 'numpy.cos', 'np.cos', (['(4 * np.pi * x)'], {}), '(4 * np.pi * x)\n', (6961, 6976), True, 'import numpy as np\n'), ((7010, 7031), 'numpy.cos', 'np.cos', (['(4 * np.pi * y)'], {}), '(4 * np.pi * y)\n', (7016, 7031), True, 'import numpy as np\n'), ((9085, 9105), 'pysph.base.kernels.QuinticSpline', 'QuinticSpline', ([], {'dim': '(2)'}), '(dim=2)\n', (9098, 9105), False, 'from pysph.base.kernels import QuinticSpline\n'), ((9897, 9914), 'numpy.sum', 'np.sum', (['(m * vmag2)'], {}), '(m * vmag2)\n', (9903, 9914), True, 'import numpy as np\n'), ((10023, 10042), 'numpy.sum', 'np.sum', (['(m * vmag2_e)'], {}), '(m * vmag2_e)\n', (10029, 10042), True, 'import numpy as np\n'), ((10478, 10489), 'numpy.abs', 'np.abs', (['p_e'], {}), '(p_e)\n', (10484, 10489), True, 'import numpy as np\n'), ((10529, 10544), 'numpy.abs', 'np.abs', (['(p - p_e)'], {}), '(p - p_e)\n', (10535, 10544), True, 'import numpy as np\n'), ((4391, 4436), 'pysph.sph.wc.kernel_correction.GradientCorrectionPreStep', 'GradientCorrectionPreStep', (['"""fluid"""', "['fluid']"], {}), "('fluid', ['fluid'])\n", (4416, 4436), False, 'from pysph.sph.wc.kernel_correction import GradientCorrectionPreStep, GradientCorrection, MixedKernelCorrectionPreStep\n'), ((5219, 5253), 'pysph.sph.wc.crksph.CRKSPH', 'CRKSPH', (['"""fluid"""', "['fluid']", '(2)', 'tol'], {}), "('fluid', ['fluid'], 2, tol)\n", (5225, 5253), False, 'from pysph.sph.wc.crksph import CRKSPHPreStep, CRKSPH\n'), ((4748, 4796), 'pysph.sph.wc.kernel_correction.MixedKernelCorrectionPreStep', 'MixedKernelCorrectionPreStep', (['"""fluid"""', "['fluid']"], {}), "('fluid', ['fluid'])\n", (4776, 4796), False, 'from pysph.sph.wc.kernel_correction import GradientCorrectionPreStep, GradientCorrection, MixedKernelCorrectionPreStep\n'), ((5104, 5137), 'pysph.sph.wc.crksph.CRKSPHPreStep', 'CRKSPHPreStep', (['"""fluid"""', "['fluid']"], {}), "('fluid', ['fluid'])\n", (5117, 5137), False, 'from pysph.sph.wc.crksph import CRKSPHPreStep, CRKSPH\n')] |
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import os, logging
import itertools
try:
from hashlib import md5
except ImportError:
from md5 import md5
log = logging.getLogger(__name__)
try:
from scipy.stats import chi2 as _chi2
except ImportError:
_chi2 = None
def np_tostring(a, scale=1e6):
b = a.copy()
b[:,0] /= scale
return " ".join(list(map(str,b.ravel())))
def np_fromstring(values, coldim=2, scale=1e6):
a = np.fromstring(values, dtype=np.float32, sep=' ').reshape(-1, coldim)
a[:,0] *= scale
return a
class Buf(np.ndarray): pass
def ahash(a):
"""
* http://stackoverflow.com/questions/16589791/most-efficient-property-to-hash-for-numpy-array
::
hash(a.data)
Out[7]: 7079931724019902235
In [8]: "%x" % hash(a.data)
Out[8]: '6240f8645439a71b'
"""
a.flags.writeable = False
return "%x" % hash(a.data)
def find_ranges(i):
"""
:param i: sorted list of integers
E.g. given the set {0, 1, 2, 3, 4, 7, 8, 9, 11} I want to get { {0,4}, {7,9}, {11,11} }.
http://stackoverflow.com/questions/4628333/converting-a-list-of-integers-into-range-in-python
"""
func = lambda x,y:y-x
for a, b in itertools.groupby(enumerate(i), func):
b = list(b)
yield b[0][1], b[-1][1]
pass
def count_unique_truncating(vals):
"""
http://stackoverflow.com/questions/10741346/numpy-frequency-counts-for-unique-values-in-an-array
"""
uniq = np.unique(vals)
bins = uniq.searchsorted(vals)
return np.vstack((uniq, np.bincount(bins))).T
def count_unique_old(vals):
"""
"""
uniq = np.unique(vals)
bins = uniq.searchsorted(vals)
cnts = np.bincount(bins)
return np.vstack((uniq, cnts.astype(np.uint64))).T
def count_unique_new(vals):
"""
np.unique return_counts option requires at least numpy 1.9
"""
uniq, cnts = np.unique(vals, return_counts=True)
return np.vstack((uniq, cnts.astype(np.uint64))).T
def count_unique(vals):
try:
cu = count_unique_new(vals)
except TypeError:
cu = count_unique_old(vals)
pass
return cu
def unique2D_subarray(a):
"""
https://stackoverflow.com/questions/40674696/numpy-unique-2d-sub-array
"""
dtype1 = np.dtype((np.void, a.dtype.itemsize * np.prod(a.shape[1:]))) # eg for (10,4,4) -> np.dtype( (np.void,
b = np.ascontiguousarray(a.reshape(a.shape[0],-1)).view(dtype1)
return a[np.unique(b, return_index=1)[1]]
def np_digest(a):
"""
https://stackoverflow.com/questions/5386694/fast-way-to-hash-numpy-objects-for-caching
file digest includes the header, not just the data : so will not match this
"""
dig = md5()
data = np.ascontiguousarray(a.view(np.uint8))
dig.update(data)
return dig.hexdigest()
def array_digest(a):
"""
https://stackoverflow.com/questions/5386694/fast-way-to-hash-numpy-objects-for-caching
file digest includes the header, not just the data : so will not match this
"""
return np_digest(a)
def count_unique_sorted(vals):
"""
Older numpy has problem with the argsort line when cu is empty
"""
#vals = vals.astype(np.uint64) cannot do this with -ve pdgcode
cu = count_unique(vals)
if len(cu) != 0:
cu = cu[np.argsort(cu[:,1])[::-1]] # descending frequency order
pass
return cu.astype(np.uint64)
def vnorm(a):
"""
Older numpy lacks the third axis argument form of np.linalg.norm
so this replaces it::
#r = np.linalg.norm(xyz[:,:2], 2, 1)
r = vnorm(xyz[:,:2])
"""
return np.sqrt(np.sum(a*a,1))
vnorm_ = lambda _:np.sqrt(np.sum(_*_,1))
costheta_ = lambda a,b:np.sum(a * b, axis = 1)/(vnorm(a)*vnorm(b))
def chi2_pvalue( c2obs, ndf ):
"""
:param c2obs: observed chi2 value
:param ndf:
:return pvalue: 1 - _chi2.cdf(c2obs, ndf)
# probability of getting a chi2 <= c2obs for the ndf
# https://onlinecourses.science.psu.edu/stat414/node/147
::
In [49]: _chi2.cdf( 15.99, 10 ) ## for ndf 10, probability for chi2 < 15.99 is 0.900
Out[49]: 0.90008098002000925
In [53]: _chi2.cdf( range(10,21), 10 ) ## probability of getting chi2 < the value
Out[53]: array([ 0.5595, 0.6425, 0.7149, 0.7763, 0.827 , 0.8679, 0.9004, 0.9256, 0.945 , 0.9597, 0.9707])
In [54]: 1 - _chi2.cdf( range(10,21), 10 ) ## probability of getting chi2 > the value
Out[54]: array([ 0.4405, 0.3575, 0.2851, 0.2237, 0.173 , 0.1321, 0.0996, 0.0744, 0.055 , 0.0403, 0.0293])
* https://en.wikipedia.org/wiki/Chi-squared_distribution
The p-value is the probability of observing a test statistic at least as
extreme in a chi-squared distribution. Accordingly, since the cumulative
distribution function (CDF) for the appropriate degrees of freedom (df) gives
the probability of having obtained a value less extreme than this point,
subtracting the CDF value from 1 gives the p-value. The table below gives a
number of p-values matching to chi2 for the first 10 degrees of freedom.
A low p-value indicates greater statistical significance, i.e. greater
confidence that the observed deviation from the null hypothesis is significant.
A p-value of 0.05 is often used as a cutoff between significant and
not-significant results.
Of course for Opticks-CFG4 comparisons I wish to see no significant
deviations, so I want the p-value to be close to 1 indicating nothing of significance.
::
In [56]: _chi2.cdf( [3.94,4.87,6.18,7.27,9.34,11.78,13.44,15.99,18.31,23.21,29.59], 10 )
Out[56]: array([ 0.05 , 0.1003, 0.2001, 0.3003, 0.4998, 0.6999, 0.7999, 0.9001, 0.95 , 0.99 , 0.999 ])
In [57]: 1 - _chi2.cdf( [3.94,4.87,6.18,7.27,9.34,11.78,13.44,15.99,18.31,23.21,29.59], 10 ) ## P-value (Probability)
Out[57]: array([ 0.95 , 0.8997, 0.7999, 0.6997, 0.5002, 0.3001, 0.2001, 0.0999, 0.05 , 0.01 , 0.001 ])
* ~/opticks_refs/PoissonConsistency.pdf
* http://www.hep.caltech.edu/~fcp/statistics/hypothesisTest/PoissonConsistency/PoissonConsistency.pdf
"""
if _chi2 is None:
return 1
p_value = 1 - _chi2.cdf(c2obs, ndf)
return p_value
def chi2one(a, b):
return (a-b)*(a-b)/(a+b)
def chi2(a_, b_, cut=30):
"""
:param a_: array of counts
:param b_: array of counts
:param cut: applied to sum of and and b excludes low counts from the chi2
:return c2,c2n,c2c:
*c2*
array with elementwise square of difference over the sum
(masked by the cut on the sum, zeros provided for low stat entries)
*c2n*
number of bins within the count cut
*c2c*
number of bins not within count cut
::
c2, c2n, c2c = chi2(a, b)
c2ndf = c2.sum()/c2n
# ChiSquared or KS
# http://www.itl.nist.gov/div898/handbook/eda/section3/eda35f.htm
# https://en.wikipedia.org/wiki/Propagation_of_uncertainty
# http://stats.stackexchange.com/questions/7400/how-to-assess-the-similarity-of-two-histograms
# http://www.hep.caltech.edu/~fcp/statistics/hypothesisTest/PoissonConsistency/PoissonConsistency.pdf
"""
a = a_.astype(np.float32)
b = b_.astype(np.float32)
msk = a+b > cut
c2 = np.zeros_like(a)
c2[msk] = np.power(a-b,2)[msk]/(a+b)[msk]
c2n = len(a[msk])
c2c = len(a[~msk])
assert c2n + c2c == len(a)
return c2, c2n, c2c
def ratio(a, b):
m = np.logical_and(a > 0, b > 0)
ab = np.zeros((len(a),2))
ba = np.zeros((len(a),2))
ab[m,0] = a[m]/b[m]
ab[m,1] = np.sqrt(a[m])/b[m]
ba[m,0] = b[m]/a[m]
ba[m,1] = np.sqrt(b[m])/a[m]
return ab, ba
def test_count_unique_(fn, a):
"""
count_unique appears to go via floats which
looses precision for large numbers
"""
aa = np.array([ a,a,a ], dtype=np.uint64 )
cu = fn(aa)
n = cu[0,1]
assert n == 3
a_ = np.uint64(cu[0,0])
ok = a_ == a
if ok:
msg = "OK"
else:
msg = "FAIL"
pass
log.info("test_count_unique_ %16x %16x %s %s %s " % (a, a_, msg, a, a_) )
def test_count_unique():
log.info("test_count_unique")
vals=[0xfedcba9876543210,0xffffffffffffffff]
msk_ = lambda n:(1 << 4*n) - 1
for n in range(16):
msk = msk_(n)
vals.append(msk)
for fn in [count_unique_new, count_unique_old, count_unique_truncating]:
log.info(fn.__name__)
map(lambda v:test_count_unique_(fn, v), vals)
def test_chi2():
a = np.array([0,100,500,500],dtype=np.int32)
b = np.array([0,100,500,0], dtype=np.int32)
c2,c2n,c2c = chi2(a,b, cut=30)
def test_count_unique_2():
log.info("test_count_unique_2")
t = np.array( [1,2,2,3,3,3,4,4,4,4,5,5,5,5,5], dtype=np.uint32 )
x = np.array( [[1,1],[2,2],[3,3],[4,4], [5,5]], dtype=np.uint64 )
r1 = count_unique( t )
assert np.all( r1 == x )
def test_count_unique_sorted():
log.info("test_count_unique_sorted")
t = np.array( [1,2,2,3,3,3,4,4,4,4,5,5,5,5,5], dtype=np.uint32 )
y = np.array( [[5,5],[4,4],[3,3],[2,2], [1,1]], dtype=np.uint64 )
r = count_unique_sorted( t )
assert np.all( r == y )
def test_count_unique_sorted_empty():
log.info("test_count_unique_sorted_empty np.__version__ %s " % np.__version__ )
t = np.array( [], dtype=np.uint32 )
r = count_unique_sorted( t )
u = np.array( [], dtype=np.uint64 )
c = np.array( [], dtype=np.uint64 )
x = np.vstack( (u,c) ).T
assert np.all( r == x )
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
log.info("np.__version__ %s " % np.__version__ )
#test_count_unique()
#test_chi2()
test_count_unique_2()
test_count_unique_sorted()
test_count_unique_sorted_empty()
| [
"numpy.uint64",
"numpy.zeros_like",
"numpy.sum",
"numpy.logical_and",
"logging.basicConfig",
"numpy.power",
"numpy.all",
"logging.getLogger",
"numpy.prod",
"numpy.argsort",
"md5.md5",
"numpy.fromstring",
"numpy.array",
"numpy.vstack",
"numpy.bincount",
"scipy.stats.chi2.cdf",
"numpy.... | [((865, 892), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (882, 892), False, 'import os, logging\n'), ((2214, 2229), 'numpy.unique', 'np.unique', (['vals'], {}), '(vals)\n', (2223, 2229), True, 'import numpy as np\n'), ((2372, 2387), 'numpy.unique', 'np.unique', (['vals'], {}), '(vals)\n', (2381, 2387), True, 'import numpy as np\n'), ((2434, 2451), 'numpy.bincount', 'np.bincount', (['bins'], {}), '(bins)\n', (2445, 2451), True, 'import numpy as np\n'), ((2633, 2668), 'numpy.unique', 'np.unique', (['vals'], {'return_counts': '(True)'}), '(vals, return_counts=True)\n', (2642, 2668), True, 'import numpy as np\n'), ((3451, 3456), 'md5.md5', 'md5', ([], {}), '()\n', (3454, 3456), False, 'from md5 import md5\n'), ((8091, 8107), 'numpy.zeros_like', 'np.zeros_like', (['a'], {}), '(a)\n', (8104, 8107), True, 'import numpy as np\n'), ((8284, 8312), 'numpy.logical_and', 'np.logical_and', (['(a > 0)', '(b > 0)'], {}), '(a > 0, b > 0)\n', (8298, 8312), True, 'import numpy as np\n'), ((8661, 8697), 'numpy.array', 'np.array', (['[a, a, a]'], {'dtype': 'np.uint64'}), '([a, a, a], dtype=np.uint64)\n', (8669, 8697), True, 'import numpy as np\n'), ((8760, 8779), 'numpy.uint64', 'np.uint64', (['cu[0, 0]'], {}), '(cu[0, 0])\n', (8769, 8779), True, 'import numpy as np\n'), ((9363, 9407), 'numpy.array', 'np.array', (['[0, 100, 500, 500]'], {'dtype': 'np.int32'}), '([0, 100, 500, 500], dtype=np.int32)\n', (9371, 9407), True, 'import numpy as np\n'), ((9412, 9454), 'numpy.array', 'np.array', (['[0, 100, 500, 0]'], {'dtype': 'np.int32'}), '([0, 100, 500, 0], dtype=np.int32)\n', (9420, 9454), True, 'import numpy as np\n'), ((9562, 9634), 'numpy.array', 'np.array', (['[1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5]'], {'dtype': 'np.uint32'}), '([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5], dtype=np.uint32)\n', (9570, 9634), True, 'import numpy as np\n'), ((9631, 9698), 'numpy.array', 'np.array', (['[[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]'], {'dtype': 'np.uint64'}), '([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]], dtype=np.uint64)\n', (9639, 9698), True, 'import numpy as np\n'), ((9732, 9747), 'numpy.all', 'np.all', (['(r1 == x)'], {}), '(r1 == x)\n', (9738, 9747), True, 'import numpy as np\n'), ((9833, 9905), 'numpy.array', 'np.array', (['[1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5]'], {'dtype': 'np.uint32'}), '([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5], dtype=np.uint32)\n', (9841, 9905), True, 'import numpy as np\n'), ((9902, 9969), 'numpy.array', 'np.array', (['[[5, 5], [4, 4], [3, 3], [2, 2], [1, 1]]'], {'dtype': 'np.uint64'}), '([[5, 5], [4, 4], [3, 3], [2, 2], [1, 1]], dtype=np.uint64)\n', (9910, 9969), True, 'import numpy as np\n'), ((10009, 10023), 'numpy.all', 'np.all', (['(r == y)'], {}), '(r == y)\n', (10015, 10023), True, 'import numpy as np\n'), ((10158, 10187), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.uint32'}), '([], dtype=np.uint32)\n', (10166, 10187), True, 'import numpy as np\n'), ((10232, 10261), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.uint64'}), '([], dtype=np.uint64)\n', (10240, 10261), True, 'import numpy as np\n'), ((10272, 10301), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.uint64'}), '([], dtype=np.uint64)\n', (10280, 10301), True, 'import numpy as np\n'), ((10346, 10360), 'numpy.all', 'np.all', (['(r == x)'], {}), '(r == x)\n', (10352, 10360), True, 'import numpy as np\n'), ((10398, 10437), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (10417, 10437), False, 'import os, logging\n'), ((4364, 4380), 'numpy.sum', 'np.sum', (['(a * a)', '(1)'], {}), '(a * a, 1)\n', (4370, 4380), True, 'import numpy as np\n'), ((4406, 4422), 'numpy.sum', 'np.sum', (['(_ * _)', '(1)'], {}), '(_ * _, 1)\n', (4412, 4422), True, 'import numpy as np\n'), ((4445, 4466), 'numpy.sum', 'np.sum', (['(a * b)'], {'axis': '(1)'}), '(a * b, axis=1)\n', (4451, 4466), True, 'import numpy as np\n'), ((6999, 7020), 'scipy.stats.chi2.cdf', '_chi2.cdf', (['c2obs', 'ndf'], {}), '(c2obs, ndf)\n', (7008, 7020), True, 'from scipy.stats import chi2 as _chi2\n'), ((8414, 8427), 'numpy.sqrt', 'np.sqrt', (['a[m]'], {}), '(a[m])\n', (8421, 8427), True, 'import numpy as np\n'), ((8473, 8486), 'numpy.sqrt', 'np.sqrt', (['b[m]'], {}), '(b[m])\n', (8480, 8486), True, 'import numpy as np\n'), ((10312, 10329), 'numpy.vstack', 'np.vstack', (['(u, c)'], {}), '((u, c))\n', (10321, 10329), True, 'import numpy as np\n'), ((1157, 1205), 'numpy.fromstring', 'np.fromstring', (['values'], {'dtype': 'np.float32', 'sep': '""" """'}), "(values, dtype=np.float32, sep=' ')\n", (1170, 1205), True, 'import numpy as np\n'), ((3196, 3224), 'numpy.unique', 'np.unique', (['b'], {'return_index': '(1)'}), '(b, return_index=1)\n', (3205, 3224), True, 'import numpy as np\n'), ((8122, 8140), 'numpy.power', 'np.power', (['(a - b)', '(2)'], {}), '(a - b, 2)\n', (8130, 8140), True, 'import numpy as np\n'), ((2293, 2310), 'numpy.bincount', 'np.bincount', (['bins'], {}), '(bins)\n', (2304, 2310), True, 'import numpy as np\n'), ((3048, 3068), 'numpy.prod', 'np.prod', (['a.shape[1:]'], {}), '(a.shape[1:])\n', (3055, 3068), True, 'import numpy as np\n'), ((4041, 4061), 'numpy.argsort', 'np.argsort', (['cu[:, 1]'], {}), '(cu[:, 1])\n', (4051, 4061), True, 'import numpy as np\n')] |
import math
import numpy as np
from ..utils.darray import DependArray
from .distance import *
from .elec_near import ElecNear
try:
from .pme import Ewald
except ImportError:
from .ewald import Ewald
from .ewald import Ewald as EwaldQMQM
class Elec(object):
def __init__(
self,
qm_positions,
positions,
qm_charges,
charges,
qm_total_charge,
cell_basis,
switching_type=None,
cutoff=None,
swdist=None,
pbc=False,
):
self.rij = DependArray(
name="rij",
func=get_rij,
dependencies=[qm_positions, positions],
)
self.dij = DependArray(
name="dij",
func=get_dij,
dependencies=[self.rij],
)
self.dij_gradient = DependArray(
name="dij_gradient",
func=get_dij_gradient,
dependencies=[self.rij, self.dij],
)
self.dij_inverse = DependArray(
name="dij_inverse",
func=get_dij_inverse,
dependencies=[self.dij],
)
self.dij_inverse_gradient = DependArray(
name="dij_inverse_gradient",
func=get_dij_inverse_gradient,
dependencies=[self.dij_inverse, self.dij_gradient],
)
self.dij_min = DependArray(
name="dij_min",
func=get_dij_min,
dependencies=[self.dij_inverse],
)
self.dij_min_gradient = DependArray(
name="dij_min_gradient",
func=get_dij_min_gradient,
dependencies=[self.dij_min, self.dij_inverse, self.dij_inverse_gradient],
)
self.coulomb_exclusion = DependArray(
name="coulomb_exclusion",
func=(lambda x: np.nonzero(x < .8)[0]),
dependencies=[self.dij_min],
)
self.mm1_index = DependArray(
name="mm1_index",
func=(lambda x: np.nonzero(np.logical_and(x > 0., x < .8))[0]),
dependencies=[self.dij_min],
)
self.mm2_index = DependArray(
name="mm2_index",
func=get_mm2_index,
dependencies=[
positions,
self.mm1_index,
self.dij,
]
)
if pbc:
self.full = Ewald(
qm_positions=qm_positions,
positions=positions,
charges=charges,
cell_basis=cell_basis,
exclusion=self.coulomb_exclusion,
cutoff=cutoff,
)
self.qmqm = EwaldQMQM(
qm_positions=qm_positions,
positions=qm_positions,
charges=qm_charges,
cell_basis=cell_basis,
exclusion=np.arange(len(qm_charges)),
)
else:
import importlib
NonPBC = importlib.import_module(".nonpbc", package='qmhub.electools').__getattribute__('NonPBC')
self.full = NonPBC(
rij=self.rij,
charges=charges,
cell_basis=cell_basis,
dij_inverse=self.dij_inverse,
dij_inverse_gradient=self.dij_inverse_gradient,
exclusion=self.coulomb_exclusion,
)
self.qmqm = None
self.near_field = ElecNear(
dij_min=self.dij_min,
dij_min_gradient=self.dij_min_gradient,
dij_inverse=self.dij_inverse,
dij_inverse_gradient=self.dij_inverse_gradient,
charges=charges,
switching_type=switching_type,
cutoff=cutoff,
swdist=swdist,
)
self.qm_residual_esp = DependArray(
name="qm_residual_esp",
func=Elec._get_qm_residual_esp,
dependencies=[
self.full.qm_total_esp,
self.near_field.qm_scaled_esp,
],
)
self.scaled_mm_charges = DependArray(
name="scaled_mm_charges",
func=Elec._get_scaled_mm_charges,
dependencies=[
self.near_field.charges,
self.near_field.scaling_factor,
],
)
self.projected_mm_charges = DependArray(
name="projected_mm_charges",
func=Elec._get_projected_mm_charges,
dependencies=[
self.near_field.weighted_qmmm_coulomb_tensor_inv,
self.qm_residual_esp,
self.near_field.scaling_factor,
],
)
self.embedding_mm_charges = DependArray(
name="embedding_mm_charges",
func=Elec._get_embedding_mm_charges,
dependencies=[
self.near_field.scaling_factor,
self.near_field.weighted_qmmm_coulomb_tensor_inv,
self.qm_residual_esp,
self.near_field.charges,
],
)
self.embedding_mm_positions = DependArray(
name="embedding_mm_positions",
func=Elec._get_embedding_mm_positions,
dependencies=[
positions,
self.near_field.near_field_mask,
],
)
@staticmethod
def _get_qm_residual_esp(qm_total_esp, qm_scaled_esp):
return qm_total_esp - qm_scaled_esp
@staticmethod
def _get_scaled_mm_charges(charges, scaling_factor):
return charges * scaling_factor
@staticmethod
def _get_projected_mm_charges(wt_inv, qm_esp, w):
return (wt_inv @ qm_esp[0]) * w
@staticmethod
def _get_embedding_mm_charges(w, wt_inv, qm_esp, charges):
return (wt_inv @ qm_esp[0] + charges) * w
@staticmethod
def _get_embedding_mm_positions(positions, near_field_mask):
return positions[:, near_field_mask]
| [
"numpy.nonzero",
"numpy.logical_and",
"importlib.import_module"
] | [((2935, 2996), 'importlib.import_module', 'importlib.import_module', (['""".nonpbc"""'], {'package': '"""qmhub.electools"""'}), "('.nonpbc', package='qmhub.electools')\n", (2958, 2996), False, 'import importlib\n'), ((1805, 1824), 'numpy.nonzero', 'np.nonzero', (['(x < 0.8)'], {}), '(x < 0.8)\n', (1815, 1824), True, 'import numpy as np\n'), ((1987, 2019), 'numpy.logical_and', 'np.logical_and', (['(x > 0.0)', '(x < 0.8)'], {}), '(x > 0.0, x < 0.8)\n', (2001, 2019), True, 'import numpy as np\n')] |
import sys
import os
import numpy as np
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from math_helpers import vectors as vec
from math_helpers import rotations as rot
from math_helpers import matrices as mat
from math_helpers.constants import *
def get_orbital_elements(rvec, vvec, center='earth', printout=False):
"""computes Keplerian elements from positon/velocity vectors
:param rvec: positional vectors of spacecraft [IJK?] (km)
:param vvec: velocity vectors of spacecraft [IJK?] (km/s)
:param center: center object of orbit; default=earth
:return sma: semi-major axis (km)
:return e: eccentricity
:return i: inclination (rad)
:return raan: right ascending node (rad)
:return aop: argument of periapsis (rad)
:return ta: true anomaly (rad)
"""
# get Keplerian class
k = Keplerian(rvec, vvec, center=center)
# eccentricity, specific energy, semi-major axis, semi-parameter
e = k.e_mag
zeta = k.v_mag**2/2. - k.mu/k.r_mag
if e != 1:
sma = -k.mu/(2*zeta)
p = sma*(1-e**2)
else:
sma = float('inf')
p = k.h_mag**2/(k.mu)
# node vec, inclination, true anomaly, mean/eccentric anomaly
node_vec = k.node_vec
node_mag = vec.norm(node_vec)
i = k.inclination
ta = k.true_anomaly
if e < 1:
M, E = mean_anomalies(e, ta)
elif e == 1:
M = mean_anomalies(e, ta) # parabolic anomaly
else:
M = mean_anomalies(e, ta) # hyperbolic anomaly
# true longitude of periapsis - from vernal equinox to eccentricty vector
if e == 0:
true_lon_peri = float('nan')
else:
true_lon_peri = arccos(vec.vdotv(k.eccentricity_vector, [1.,0.,0.])/k.e_mag)
if k.e_vec[1] < 0:
true_lon_peri = 2*np.pi - true_lon_peri
lon_peri_mean = k.raan + k.aop # for small inclinations
# RAAN, argument of periapsis, arg. of lat., true long.
# for inclined orbits
if i != 0:
raan = k.raan
aop = k.aop
# argument of latitude - from ascending node to satellite position
# vector in direction of satellite motion
arglat = arccos(vec.vdotv(node_vec, rvec)/(node_mag*k.r_mag))
if rvec[2] < 0:
arglat = 2*np.pi - arglat
if e != 0:
# can also use arglat = aop + ta for inclined elliptical orbits
arglat = aop + ta
arglat_mean = aop + M # mean = includes mean anomaly
else:
arglat_mean = arglat
true_lon = raan + aop + ta
# for equatorial orbits
else:
raan = float('nan')
aop = float('nan')
arglat = float('nan')
arglat_mean = float('nan')
true_lon = float('nan')
# for circular and equatorial orbits
# true longitude - from vernal equinox to satellite position
if e == 0:
true_lon = arccos(vec.vdotv([1.,0.,0.], rvec)/k.r_mag)
if rvec[1] < 0:
true_lon = 2*np.pi - true_lon
mean_lon = true_lon_peri + M # for small incl and e
if printout:
print(f'\nOrbital Elements:\n',
f'Semi-major axis: {sma:0.06f} km\n',
f'Semi-latus Rectum: {p:0.6f} km\n',
f'Eccentricity: {e:0.6f}\n',
f'Inclination: {np.rad2deg(i):0.6f} deg')
print(f' RAAN: {np.rad2deg(raan):0.6f} deg\n',
f'Argument of Periapsis: {np.rad2deg(aop):0.6f} deg\n',
f'True Longitude of Periapsis: {np.rad2deg(true_lon_peri):0.6f} deg\n',
f'Mean Longitude of Periapsis: {np.rad2deg(lon_peri_mean):0.6f} deg\n',
f'True Anomaly: {np.rad2deg(ta):0.6f} deg\n',
f'Argument of Latitude: {np.rad2deg(arglat):0.6f} deg\n',
f'Argument of Latitude - Mean: {np.rad2deg(arglat_mean):0.6f} deg\n',
f'True longitude: {np.rad2deg(true_lon):0.6f} deg\n',
f'Mean Longitude: {np.rad2deg(mean_lon):0.6f} deg')
return np.array([sma, e, i, raan, aop, ta])
def get_rv_frm_elements(elements, center='earth', method='sma'):
"""computes positon/velocity vectors from Keplerian elements.
We first compute pos/vel in the PQW system, then rotate to the
geocentric equatorial system.
:param elements: for method = 'p': (p, e, i, raan, aop, ta)
for method = 'sma': (a, e, i, raan, aop, ta)
a: semi-major axis (km); e: eccentricity
i: inclination (rad); raan: right ascending node (rad)
aop: argument of periapsis (rad); ta: true anomaly (rad)
:param center: center object of orbit; default=earth
:return rvec: positional vectors of spacecraft [IJK] (km)
:return vvec: velocity vectors of spacecraft [IJK] (km/s)
"""
if method == 'p':
p, e, i, raan, aop, ta = elements
elif method =='sma':
a, e, i, raan, aop, ta = elements
p = a*(1-e**2)
# determine which planet center to compute from
mu = get_mu(center=center)
# declaring trig functions
s, c = sin, cos
if method =='sma':
r = p / (1+e*c(ta))
h = sqrt( mu*a*(1-e**2) )
rvec = [r*(c(raan)*c(aop+ta) - s(raan)*s(aop+ta)*c(i)),
r*(s(raan)*c(aop+ta) + c(raan)*s(aop+ta)*c(i)),
r*(s(i)*s(aop+ta))]
vvec = [rvec[0]*h*e*s(ta)/(r*p) - h/r*(c(raan)*s(aop+ta) + s(raan)*c(aop+ta)*c(i)),
rvec[1]*h*e*s(ta)/(r*p) - h/r*(s(raan)*s(aop+ta) - c(raan)*c(aop+ta)*c(i)),
rvec[2]*h*e*s(ta)/(r*p) + h/r*(s(i)*c(aop+ta))]
return np.hstack([rvec, vvec])
elif method == 'p':
# assigning temporary variables
aop_t = aop
raan_t = raan
ta_t = ta
# checking for undefined states
if e == 0 and i == 0:
aop_t = 0.
raan_t = 0.
ta_t = aop_t + raan_t + ta
elif e == 0:
aop_t = 0.
ta_t = aop_t + ta
elif i == 0:
raan_t = 0.
aop_t = raan_t + aop
ta_t = ta
# converting elements into state vectors in PQW frame
r_pqw = [p*c(ta_t) / (1+e*c(ta_t)), p*s(ta_t) / (1+e*c(ta_t)), 0]
v_pqw = [-sqrt(mu/p)*s(ta_t), sqrt(mu/p)*(e+c(ta_t)), 0]
# get 313 transformation matrix to geocentric-equitorial frame
m1 = rot.rotate(-aop, axis='z')
m2 = rot.rotate(-i, axis='x')
m3 = rot.rotate(-raan, axis='z')
T_ijk_pqw = mat.mxm(m2=m3, m1=mat.mxm(m2=m2, m1=m1))
# state vector from PQW to ECI
r_ijk = mat.mxv(m1=T_ijk_pqw, v1=r_pqw)
v_ijk = mat.mxv(m1=T_ijk_pqw, v1=v_pqw)
return np.hstack([r_ijk, v_ijk])
def kepler_prop(r, v, dt, center='earth'):
"""Solve Kepler's problem using classical orbital elements; no perturbations
:param r: initial position
:param v: initial velocity
:param dt: time of flight
:return rvec: propagated position vector
:return vvec: propagated velocity vector
in work
"""
# determine which planet center to compute from
mu = get_mu(center=center)
elements = get_orbital_elements(r, v)
sma, e, i, raan, aop, ta = elements
p = sma*(1-e**2)
n = 2*sqrt(mu/p**3)
rmag = norm(r)
if e != 0:
if e < 1:
E0, _ = mean_anomalies(e, ta)
M0 = E0 - e*sin(E0)
M = M0 + n*dt
E = univ_anomalies(M=M, e=e, dt=None, p=None)
ta = true_anomaly(e, p=None, r=None, E=E, B=None, H=None)
elif e == 1:
B0 = mean_anomalies(e, ta)
hvec = vec.vcrossv(r, v)
hmag = norm(hvec)
p = hmag**2/mu
M0 = B0 + B0**3/3
B = univ_anomalies(M=None, e=e, dt=dt, p=p)
ta = true_anomaly(e, p=p, r=rmag, E=None, B=B, H=None)
elif e > 1:
H0 = mean_anomalies(e, ta)
M0 = e*sinh(H0) - H0
M = M0 + n*dt
H = univ_anomalies(M=M, e=e, dt=None, p=None)
ta = true_anomaly(e, p=None, r=None, E=None, B=None, H=H)
else:
E = raan + aop + ta
rvec, vvec = get_rv_frm_elements([p, e, i, raan, aop, ta], method='p')
return np.hstack([rvec, vvec])
def flight_path_angle(e, ta):
"""computes flight path angle for a satellite; measured from the
local horizon to the velocity vector
:param e: magnitude of eccentricity vector
:param ta: true anomaly (rad)
:return: flight path angle (rad)
not tested
"""
if e == 0:
return 0.
elif e < 1:
E, _ = mean_anomalies(e, ta)
# FIXME: is this the correct way to check sign?
fpa = arccos(sqrt((1-e**2)/(1-e**2*cos(E)**2)))
if ta > np.pi or ta < 0:
fpa = -fpa
# ALT: fpa = arctan2(e*sin(ta), 1+e*cos(ta))
elif e == 1:
fpa = ta/2.
else: # if e > 1
H = mean_anomalies(e, ta)
fpa = arccos(sqrt( (e**2-1)/(e**2*cosh(H)**2-1) ))
# return arccos( (1+e*cos(ta) / (sqrt(1+2*e*cos(ta)+e**2))))
return fpa
def sp_energy(vel, pos, mu=mu_earth):
"""returns specific mechanical energy (km2/s2), angular momentum
(km2/s), and flight-path angle (deg) of an orbit; 2body problem
"""
v_mag = norm(vel)
r_mag = norm(pos)
energy =v_mag**2/2. - mu/r_mag
ang_mo = vec.vcrossv(v1=pos, v2=vel)
if np.dot(a=pos, b=vel) > 0:
phi = np.rad2deg(arccos(norm(ang_mo)/(r_mag*v_mag)))
else:
phi = 0.
return energy, ang_mo, phi
def get_period(sma, mu):
"""get orbital period (s)
:param sma: semi-major axis (km)
:param mu: planetary constant (km3/s2)
:return: orbital period (s)
"""
return 2*np.pi*np.sqrt(sma**3/mu)
def mean_anomalies(e, ta):
"""in work
"""
if e < 1:
E = arcsin(sin(ta)*sqrt(1-e**2) / (1+e*cos(ta)))
# alt: E = arccos((e+cos(ta))/(1+e*cos(ta)))
M = E - e**sin(E)
return E, M
elif e == 1:
B = tan(ta/2)
return B
elif e > 1:
H = arcsinh( (sin(ta)*sqrt(e**2-1)) / (1+e*cos(ta)) )
# alt: H = arccosh((e+cos(ta)/(1+e*cos(ta)))
return H
def true_anomaly(e, p=None, r=None, E=None, B=None, H=None):
"""in work
"""
if e < 1 and E:
ta = arcsin(sin(E)*sqrt(1-e**2) / (1-e*cos(E)))
# alt: ta = arccos((cos(E)-e)/(1-e*cos(E)))
elif e == 1 and B:
ta = np.arsin(p*B/r)
# alt: ta = (p-r)/r
else: # e > 1 and H:
ta = arcsin( (-sinh(H)*sqrt(e**2-1)) / (1-e*cosh(H)) )
# alt: ta = arccos((cosh(ta)-e)/(1-e*cosh(H)))
return ta
def univ_anomalies(M=None, e=None, dt=None, p=None, center='earth'):
"""universal formulation of elliptical, parabolic, and hyperbolic
solutions for Kepler's Equation using Newton-Raphson's interation method
where applicable.
:param M: mean anomaly (rad)
:param e: eccentricity
:param dt: time of flight from orbit periapsis (s)
:param p: semi-parameter (km)
:return E: eccentric anomaly for elliptical orbits (rad)
:return B: parabolic anomaly for parabolic orbits (rad)
:return H: hyperbolic anomaly for hyperbolic orbits (rad)
"""
mu = get_mu(center=center)
# elliptical solution
if e < 1 and M:
if -np.pi < M < 0 or np.pi < M < 2*np.pi:
E = M - e
else:
E = M + e
count = 0
E_prev = 0
while (np.abs(E - E_prev) > 1e-15):
if count == 0:
count += 1
else:
E_prev = E
E = E_prev + (M - E_prev + e*sin(E_prev)) / (1-e*cos(E_prev))
return E
# parabolic solution
elif e == 1 and p:
n_p = 2*sqrt(mu/p**3)
s = 0.5*arctan(2/(3*n_p*dt))
w = arctan(tan(s)**(1/3))
B = 2/tan(2*w)
return B
# hyperbolic solution
else: # e > 1
if e < 1.6 and M:
if -np.pi < M < 0 or np.pi < M < 2*np.pi:
H = M - e
else:
H = M + e
elif 1.6 <= e < 3.6 and np.abs(M) > np.pi:
H = M - np.sign(M)*e
else:
H = M / (e-1)
count = 0
H_prev = 0
while (np.abs(H - H_prev) > 1e-15):
if count == 0:
count += 1
else:
H_prev = H
H = H_prev + (M - e*sinh(H_prev)+H_prev) / (e*cosh(H_prev)-1)
return H
def semimajor_axis(p=None, e=None, mu=None, period=None):
"""returns semi-major axis for a Keplerian orbit
:param p: semiparameter (km)
:param e: eccentricity (-)
:param mu: planetary constant (km3/s2)
:param period: orbital period (s)
:return: semi-major axis
"""
if period and mu:
return (mu*period**2/(4*np.pi**2))**(1/3)
else:
return p / (1-e**2)
def trajectory_eqn(p, e, tanom):
"""returns trajectory equation for a Keplerian orbit
:param p: semiparameter (km)
:param e: eccentricity (-)
:param tanom: true anomaly (radians)
:return: orbit
"""
return p / ( 1 + e*cos(tanom))
def vis_viva(r=None, a=None, e=None, p=None, tanom=None, center='earth'):
"""returns orbital velocity at true anomaly point
:param r: radius to point from center (km)
:param a: semi-major axis of orbit (km)
:param e: eccentricity (-)
:param p: semiparameter (km)
:param tanom: true anomaly (radians)
:param center: center object; default='earth'
:return vmag: orbital velocity of orbit at point of interest
not fully tested
"""
mu = get_mu(center=center)
if r and a:
vmag = sqrt(2*mu/r - mu/a)
return vmag
elif e and p and tanom:
a = semimajor_axis(p, e)
r = trajectory_eqn(p, e, tanom)
vmag = sqrt(2*mu/r - mu/a)
return vmag
else:
return "Need at least r, a; or e, p, tanom"
class Keplerian(object):
"""Class to compute classical Keplerian elements from
position/velocity vectors.
:param rvec: positional vectors of spacecraft (km)
:param vvec: velocity vectors of spacecraft (km/s)
:param center: center object of orbit; default=earth
"""
def __init__(self, rvec, vvec, center='earth'):
# determine gravitational constant
self.mu = get_mu(center=center)
# position and veloccity
self.rvec = rvec
self.vvec = vvec
self.r_mag = vec.norm(rvec)
self.v_mag = vec.norm(vvec)
# angular momentun; orbit normal direction
self.h_vec = vec.vcrossv(rvec, vvec)
self.h_mag = vec.norm(self.h_vec)
self.h_hat = self.h_vec / self.h_mag
# node vector K; n = 0 for equatorial orbits
self.node_vec = vec.vcrossv(v1=[0,0,1], v2=self.h_vec)
self.node_mag = vec.norm(self.node_vec)
# eccentricity vector; e = 0 for circular orbits
self.e_vec = self.eccentricity_vector
self.e_mag = vec.norm(self.e_vec)
self.e_hat = self.e_vec/self.e_mag
@property
def eccentricity_vector(self):
"""eccentricity vector"""
scalar1 = self.v_mag**2/self.mu - 1./self.r_mag
term1 = vec.vxs(scalar=scalar1, v1=self.rvec)
term2 = -vec.vxs(scalar=vec.vdotv(v1=self.rvec, v2=self.vvec)/self.mu,
v1=self.vvec)
eccentricity_vec = vec.vxadd(v1=term1, v2=term2) # points to orbit periapsis;
# e_vec = 0 for circular orbits
return eccentricity_vec
@property
def inclination(self):
"""inclination"""
return arccos(self.h_vec[2]/self.h_mag)
@property
def true_anomaly(self):
"""true anomaly"""
if self.e_mag == 0:
return float('nan')
ta = arccos(vec.vdotv(self.e_vec, self.rvec)/(self.e_mag*self.r_mag))
if vec.vdotv(self.rvec, self.vvec) < 0:
ta = 2*np.pi - ta
return ta
@property
def raan(self):
"""right ascending node"""
if self.inclination == 0:
return float('nan')
omega = arccos(self.node_vec[0]/self.node_mag)
if self.node_vec[1] < 0:
omega = 2*np.pi - omega
return omega
@property
def aop(self):
"""argument_of_periapse"""
if self.e_mag == 0 or self.node_mag == 0:
return float('nan')
argp = arccos(vec.vdotv(self.node_vec, self.e_vec)/(self.node_mag*self.e_mag))
if self.e_vec[2] < 0:
argp = 2*np.pi - argp
return argp
@property
def semiparameter(self):
"""semi-parameter"""
return self.h_mag**2/self.mu
@property
def semimajor_axis(self):
"""semi-major axis"""
return self.semiparameter/(1-self.e_mag**2)
@property
def energy(self):
"""semi-major axis"""
return self.v_mag**2/2 - self.mu/self.r_mag
@property
def period(self):
"""orbital period"""
return 2*np.pi*np.sqrt(self.semimajor_axis**3/self.mu)
def patched_conics(r1, r2, rt1, rt2, pl1, pl2, center='sun',
elliptical1=False, period1=None,
elliptical2=False, period2=None):
"""compute a patched conics orbit transfer from an inner planet to
outer planet.
:param r1: orbital radius about planet 1 (km)
:param r2: orbital radius about planet 2 (km)
:param rt1: radius to planet 1 from center of transfer orbit (km)
:param rt2: radius to planet 2 from center of transfer orbi (km)
:param pl1: departing planet
:param pl2: arrival planet
:return vt1: heliocentric departure velocity at planet 1 (km/s)
:return vt2: heliocentric arrival velocity at planet 2 (km/s)
:return dv_inj: [injection] departure delta-v (km/s) (km/s)
:return dv_ins: [insertion] arrival delta-v (km/s) (km/s)
:return TOF: transfer time of flight (s)
in work
"""
mu_center = get_mu(center=center)
mu_pl1 = get_mu(pl1)
mu_pl2 = get_mu(pl2)
sma_pl1 = get_sma(pl1)
sma_pl2 = get_sma(pl2)
r_orbit1 = r1
r_orbit2 = r2
atrans = (rt1 + rt2) / 2 # transfer sma
TOF = pi*sqrt(atrans**3/mu_center) # period of hohmann transfer
print(f'time of flight (days): {TOF/(3600*24)}')
# spacecraft orbital velocities relative to planet 1, 2
if elliptical1:
P = period1
a = (mu_pl1*(P/(2*pi))**2)**(1/3)
vc1 = sqrt(2*mu_pl1/r_orbit1 - mu_pl1/a)
print(f'elliptical orbit velocity at planet 1 (km/s): {vc1}')
else:
vc1 = sqrt(mu_pl1/r_orbit1)
print(f'circular orbit velocity at planet 1 (km/s): {vc1}')
if elliptical2:
P = period2
a = (mu_pl2*(P/(2*pi))**2)**(1/3)
vc2 = sqrt(2*mu_pl2/r_orbit2 - mu_pl2/a)
print(f'elliptical orbit velocity at planet 2 (km/s): {vc2}')
else:
vc2 = sqrt(mu_pl2/r_orbit2)
print(f'circular orbit velocity at planet 2 (km/s): {vc2}')
# heliocentric departure and arrival velocities
vt1 = sqrt(2*mu_center/rt1 - mu_center/atrans)
vt2 = sqrt(2*mu_center/rt2 - mu_center/atrans)
print(f'heliocentric departure velocity at planet 1 (km/s): {vt1}')
print(f'heliocentric arrival velocity at planet 2 (km/s): {vt2}')
# heliocentric velocities of planet 1 and 2
v_pl1 = sqrt(mu_center/sma_pl1)
v_pl2 = sqrt(mu_center/sma_pl2)
print(f'planet 1 velocity, heliocentric (km/s): {v_pl1}')
print(f'planet 2 velocity, heliocentric (km/s): {v_pl2}')
# hyperbolic excess velocities
v_hyp1 = vt1 - v_pl1
v_hyp2 = vt2 - v_pl2
print(f'hyperbolic excess velocity (vinf), wrt planet 1 (km/s): {v_hyp1}')
print(f'hyperbolic excess velocity (vinf), wrt planet 2 (km/s): {v_hyp2}')
# departure
vp1 = sqrt(2*mu_pl1/r_orbit1 + v_hyp1**2)
# print(f'vp1 {vp1}')
dv_inj = vp1 - vc1 # v_inf
print(f'[injection] departure delta-v (km/s): {dv_inj}')
# arrival
vp2 = sqrt(2*mu_pl2/r_orbit2 + v_hyp2**2)
# print(f'vp2 {vp2}')
dv_ins = vc2 - vp2 # v_inf
print(f'[insertion] arrival delta-v (km/s) {dv_ins}')
return vt1, vt2, dv_inj, dv_ins, TOF
def rocket_eqn(isp, g0, m0, mf):
return isp*g0*np.log(m0/mf)
def rocket_eqn_mass(dv, isp, g0):
return np.exp(dv / (isp*g0))
if __name__ == "__main__":
# circular orbit
r = [12756.2, 0.0, 0.0]
v = [0.0, 7.90537, 0.0]
elements = get_orbital_elements(rvec=r, vvec=v)
# print(elements)
# polar orbit
r = [8750., 5100., 0.0]
v = [-3., 5.2, 5.9]
elements = get_orbital_elements(rvec=r, vvec=v)
# print(elements)
# polar orbit
r = [0., -5100., 8750.]
v = [0., 4.2, 5.9]
elements = get_orbital_elements(rvec=r, vvec=v)
# print(elements)
# get r,v from orbital elements
elements = [14351, 0.5, np.rad2deg(45), np.rad2deg(30), 0, 0]
rv = get_rv_frm_elements(elements, method='p')
# assert np.allclose(rv)
# print(rv) # r=[9567.2, 0], v=[0, 7.9054]
# testing specifc energy function
pos = vec.vxs(scalar=1e4, v1=[1.2756, 1.9135, 3.1891]) # km
vel = [7.9053, 15.8106, 0.0] # km/s
energy = sp_energy(vel=vel, pos=pos, mu=get_mu(center='earth'))
# print(energy)
# print('\n\n')
r = [8773.8938, -11873.3568, -6446.7067]
v = [4.717099, 0.714936, 0.388178]
# elements = Keplerian(r, v)
elements = get_orbital_elements(r, v)
sma, e, i, raan, aop, ta = elements
p = sma*(1-e**2)
state = get_rv_frm_elements([p, e, i, raan, aop, ta], method='p')
# print(state)
# example from pg 114 vallado
# orbital positon/velocity
r = [6524.834, 6862.875, 6448.296]
v = [4.901327, 5.533756, -1.976341]
elements = get_orbital_elements(rvec=r, vvec=v)
# print(elements)
# test get_rv_frm_elements(p, e, i, raan, aop, ta, center='earth'):
p = 11067.79
e = 0.83285
i = np.deg2rad(87.87)
raan = np.deg2rad(227.89)
aop = np.deg2rad(53.38)
ta = np.deg2rad(92.335)
state = get_rv_frm_elements([p, e, i, raan, aop, ta], center='earth', method='p')
assert np.allclose(state, [6525.36812099, 6861.5318349, 6449.11861416,
4.90227865, 5.53313957, -1.9757101])
E = univ_anomalies(e=0.4, M=np.deg2rad(235.4))
B = univ_anomalies(e=1, dt=53.7874*60, p=25512)
H = univ_anomalies(e=2.4, M=np.deg2rad(235.4))
assert np.allclose([E, B, H], [3.84866174, 0.817751, 1.6013761449])
# not verified
r1 = r_earth + 300
r2 = r_jupiter + 221103.53
rt1 = sma_earth
rt2 = sma_jupiter
pl1 = 'earth'
pl2 = 'jupiter'
# patched_conics(r1, r2, rt1, rt2, pl1, pl2, center='sun', elliptical2=True, period2=231.48*24*3600)
mu = mu_earth
period = 6.5/7.0*(23+56/60+4.091/3600)*3600
a = semimajor_axis(p=None, e=None, mu=mu, period=period)
print(a) | [
"numpy.abs",
"math_helpers.vectors.vxadd",
"numpy.allclose",
"numpy.arsin",
"math_helpers.vectors.vxs",
"math_helpers.vectors.norm",
"numpy.exp",
"os.path.dirname",
"math_helpers.vectors.vdotv",
"math_helpers.rotations.rotate",
"numpy.hstack",
"numpy.dot",
"numpy.log",
"numpy.deg2rad",
"... | [((1275, 1293), 'math_helpers.vectors.norm', 'vec.norm', (['node_vec'], {}), '(node_vec)\n', (1283, 1293), True, 'from math_helpers import vectors as vec\n'), ((3985, 4021), 'numpy.array', 'np.array', (['[sma, e, i, raan, aop, ta]'], {}), '([sma, e, i, raan, aop, ta])\n', (3993, 4021), True, 'import numpy as np\n'), ((8165, 8188), 'numpy.hstack', 'np.hstack', (['[rvec, vvec]'], {}), '([rvec, vvec])\n', (8174, 8188), True, 'import numpy as np\n'), ((9288, 9315), 'math_helpers.vectors.vcrossv', 'vec.vcrossv', ([], {'v1': 'pos', 'v2': 'vel'}), '(v1=pos, v2=vel)\n', (9299, 9315), True, 'from math_helpers import vectors as vec\n'), ((20170, 20193), 'numpy.exp', 'np.exp', (['(dv / (isp * g0))'], {}), '(dv / (isp * g0))\n', (20176, 20193), True, 'import numpy as np\n'), ((20945, 20997), 'math_helpers.vectors.vxs', 'vec.vxs', ([], {'scalar': '(10000.0)', 'v1': '[1.2756, 1.9135, 3.1891]'}), '(scalar=10000.0, v1=[1.2756, 1.9135, 3.1891])\n', (20952, 20997), True, 'from math_helpers import vectors as vec\n'), ((21795, 21812), 'numpy.deg2rad', 'np.deg2rad', (['(87.87)'], {}), '(87.87)\n', (21805, 21812), True, 'import numpy as np\n'), ((21824, 21842), 'numpy.deg2rad', 'np.deg2rad', (['(227.89)'], {}), '(227.89)\n', (21834, 21842), True, 'import numpy as np\n'), ((21853, 21870), 'numpy.deg2rad', 'np.deg2rad', (['(53.38)'], {}), '(53.38)\n', (21863, 21870), True, 'import numpy as np\n'), ((21880, 21898), 'numpy.deg2rad', 'np.deg2rad', (['(92.335)'], {}), '(92.335)\n', (21890, 21898), True, 'import numpy as np\n'), ((21996, 22100), 'numpy.allclose', 'np.allclose', (['state', '[6525.36812099, 6861.5318349, 6449.11861416, 4.90227865, 5.53313957, -1.9757101\n ]'], {}), '(state, [6525.36812099, 6861.5318349, 6449.11861416, 4.90227865,\n 5.53313957, -1.9757101])\n', (22007, 22100), True, 'import numpy as np\n'), ((22294, 22354), 'numpy.allclose', 'np.allclose', (['[E, B, H]', '[3.84866174, 0.817751, 1.6013761449]'], {}), '([E, B, H], [3.84866174, 0.817751, 1.6013761449])\n', (22305, 22354), True, 'import numpy as np\n'), ((5545, 5568), 'numpy.hstack', 'np.hstack', (['[rvec, vvec]'], {}), '([rvec, vvec])\n', (5554, 5568), True, 'import numpy as np\n'), ((9323, 9343), 'numpy.dot', 'np.dot', ([], {'a': 'pos', 'b': 'vel'}), '(a=pos, b=vel)\n', (9329, 9343), True, 'import numpy as np\n'), ((9664, 9686), 'numpy.sqrt', 'np.sqrt', (['(sma ** 3 / mu)'], {}), '(sma ** 3 / mu)\n', (9671, 9686), True, 'import numpy as np\n'), ((14367, 14381), 'math_helpers.vectors.norm', 'vec.norm', (['rvec'], {}), '(rvec)\n', (14375, 14381), True, 'from math_helpers import vectors as vec\n'), ((14403, 14417), 'math_helpers.vectors.norm', 'vec.norm', (['vvec'], {}), '(vvec)\n', (14411, 14417), True, 'from math_helpers import vectors as vec\n'), ((14491, 14514), 'math_helpers.vectors.vcrossv', 'vec.vcrossv', (['rvec', 'vvec'], {}), '(rvec, vvec)\n', (14502, 14514), True, 'from math_helpers import vectors as vec\n'), ((14536, 14556), 'math_helpers.vectors.norm', 'vec.norm', (['self.h_vec'], {}), '(self.h_vec)\n', (14544, 14556), True, 'from math_helpers import vectors as vec\n'), ((14680, 14720), 'math_helpers.vectors.vcrossv', 'vec.vcrossv', ([], {'v1': '[0, 0, 1]', 'v2': 'self.h_vec'}), '(v1=[0, 0, 1], v2=self.h_vec)\n', (14691, 14720), True, 'from math_helpers import vectors as vec\n'), ((14743, 14766), 'math_helpers.vectors.norm', 'vec.norm', (['self.node_vec'], {}), '(self.node_vec)\n', (14751, 14766), True, 'from math_helpers import vectors as vec\n'), ((14892, 14912), 'math_helpers.vectors.norm', 'vec.norm', (['self.e_vec'], {}), '(self.e_vec)\n', (14900, 14912), True, 'from math_helpers import vectors as vec\n'), ((15113, 15150), 'math_helpers.vectors.vxs', 'vec.vxs', ([], {'scalar': 'scalar1', 'v1': 'self.rvec'}), '(scalar=scalar1, v1=self.rvec)\n', (15120, 15150), True, 'from math_helpers import vectors as vec\n'), ((15302, 15331), 'math_helpers.vectors.vxadd', 'vec.vxadd', ([], {'v1': 'term1', 'v2': 'term2'}), '(v1=term1, v2=term2)\n', (15311, 15331), True, 'from math_helpers import vectors as vec\n'), ((20109, 20124), 'numpy.log', 'np.log', (['(m0 / mf)'], {}), '(m0 / mf)\n', (20115, 20124), True, 'import numpy as np\n'), ((20731, 20745), 'numpy.rad2deg', 'np.rad2deg', (['(45)'], {}), '(45)\n', (20741, 20745), True, 'import numpy as np\n'), ((20747, 20761), 'numpy.rad2deg', 'np.rad2deg', (['(30)'], {}), '(30)\n', (20757, 20761), True, 'import numpy as np\n'), ((88, 113), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (103, 113), False, 'import os\n'), ((6322, 6348), 'math_helpers.rotations.rotate', 'rot.rotate', (['(-aop)'], {'axis': '"""z"""'}), "(-aop, axis='z')\n", (6332, 6348), True, 'from math_helpers import rotations as rot\n'), ((6362, 6386), 'math_helpers.rotations.rotate', 'rot.rotate', (['(-i)'], {'axis': '"""x"""'}), "(-i, axis='x')\n", (6372, 6386), True, 'from math_helpers import rotations as rot\n'), ((6400, 6427), 'math_helpers.rotations.rotate', 'rot.rotate', (['(-raan)'], {'axis': '"""z"""'}), "(-raan, axis='z')\n", (6410, 6427), True, 'from math_helpers import rotations as rot\n'), ((6545, 6576), 'math_helpers.matrices.mxv', 'mat.mxv', ([], {'m1': 'T_ijk_pqw', 'v1': 'r_pqw'}), '(m1=T_ijk_pqw, v1=r_pqw)\n', (6552, 6576), True, 'from math_helpers import matrices as mat\n'), ((6593, 6624), 'math_helpers.matrices.mxv', 'mat.mxv', ([], {'m1': 'T_ijk_pqw', 'v1': 'v_pqw'}), '(m1=T_ijk_pqw, v1=v_pqw)\n', (6600, 6624), True, 'from math_helpers import matrices as mat\n'), ((6640, 6665), 'numpy.hstack', 'np.hstack', (['[r_ijk, v_ijk]'], {}), '([r_ijk, v_ijk])\n', (6649, 6665), True, 'import numpy as np\n'), ((10359, 10378), 'numpy.arsin', 'np.arsin', (['(p * B / r)'], {}), '(p * B / r)\n', (10367, 10378), True, 'import numpy as np\n'), ((11378, 11396), 'numpy.abs', 'np.abs', (['(E - E_prev)'], {}), '(E - E_prev)\n', (11384, 11396), True, 'import numpy as np\n'), ((15770, 15801), 'math_helpers.vectors.vdotv', 'vec.vdotv', (['self.rvec', 'self.vvec'], {}), '(self.rvec, self.vvec)\n', (15779, 15801), True, 'from math_helpers import vectors as vec\n'), ((16909, 16952), 'numpy.sqrt', 'np.sqrt', (['(self.semimajor_axis ** 3 / self.mu)'], {}), '(self.semimajor_axis ** 3 / self.mu)\n', (16916, 16952), True, 'import numpy as np\n'), ((22161, 22178), 'numpy.deg2rad', 'np.deg2rad', (['(235.4)'], {}), '(235.4)\n', (22171, 22178), True, 'import numpy as np\n'), ((22264, 22281), 'numpy.deg2rad', 'np.deg2rad', (['(235.4)'], {}), '(235.4)\n', (22274, 22281), True, 'import numpy as np\n'), ((1699, 1748), 'math_helpers.vectors.vdotv', 'vec.vdotv', (['k.eccentricity_vector', '[1.0, 0.0, 0.0]'], {}), '(k.eccentricity_vector, [1.0, 0.0, 0.0])\n', (1708, 1748), True, 'from math_helpers import vectors as vec\n'), ((2187, 2212), 'math_helpers.vectors.vdotv', 'vec.vdotv', (['node_vec', 'rvec'], {}), '(node_vec, rvec)\n', (2196, 2212), True, 'from math_helpers import vectors as vec\n'), ((7566, 7583), 'math_helpers.vectors.vcrossv', 'vec.vcrossv', (['r', 'v'], {}), '(r, v)\n', (7577, 7583), True, 'from math_helpers import vectors as vec\n'), ((12159, 12177), 'numpy.abs', 'np.abs', (['(H - H_prev)'], {}), '(H - H_prev)\n', (12165, 12177), True, 'import numpy as np\n'), ((15701, 15733), 'math_helpers.vectors.vdotv', 'vec.vdotv', (['self.e_vec', 'self.rvec'], {}), '(self.e_vec, self.rvec)\n', (15710, 15733), True, 'from math_helpers import vectors as vec\n'), ((16311, 16347), 'math_helpers.vectors.vdotv', 'vec.vdotv', (['self.node_vec', 'self.e_vec'], {}), '(self.node_vec, self.e_vec)\n', (16320, 16347), True, 'from math_helpers import vectors as vec\n'), ((2921, 2953), 'math_helpers.vectors.vdotv', 'vec.vdotv', (['[1.0, 0.0, 0.0]', 'rvec'], {}), '([1.0, 0.0, 0.0], rvec)\n', (2930, 2953), True, 'from math_helpers import vectors as vec\n'), ((3315, 3328), 'numpy.rad2deg', 'np.rad2deg', (['i'], {}), '(i)\n', (3325, 3328), True, 'import numpy as np\n'), ((3366, 3382), 'numpy.rad2deg', 'np.rad2deg', (['raan'], {}), '(raan)\n', (3376, 3382), True, 'import numpy as np\n'), ((3435, 3450), 'numpy.rad2deg', 'np.rad2deg', (['aop'], {}), '(aop)\n', (3445, 3450), True, 'import numpy as np\n'), ((3509, 3534), 'numpy.rad2deg', 'np.rad2deg', (['true_lon_peri'], {}), '(true_lon_peri)\n', (3519, 3534), True, 'import numpy as np\n'), ((3593, 3618), 'numpy.rad2deg', 'np.rad2deg', (['lon_peri_mean'], {}), '(lon_peri_mean)\n', (3603, 3618), True, 'import numpy as np\n'), ((3662, 3676), 'numpy.rad2deg', 'np.rad2deg', (['ta'], {}), '(ta)\n', (3672, 3676), True, 'import numpy as np\n'), ((3728, 3746), 'numpy.rad2deg', 'np.rad2deg', (['arglat'], {}), '(arglat)\n', (3738, 3746), True, 'import numpy as np\n'), ((3805, 3828), 'numpy.rad2deg', 'np.rad2deg', (['arglat_mean'], {}), '(arglat_mean)\n', (3815, 3828), True, 'import numpy as np\n'), ((3874, 3894), 'numpy.rad2deg', 'np.rad2deg', (['true_lon'], {}), '(true_lon)\n', (3884, 3894), True, 'import numpy as np\n'), ((3940, 3960), 'numpy.rad2deg', 'np.rad2deg', (['mean_lon'], {}), '(mean_lon)\n', (3950, 3960), True, 'import numpy as np\n'), ((6466, 6487), 'math_helpers.matrices.mxm', 'mat.mxm', ([], {'m2': 'm2', 'm1': 'm1'}), '(m2=m2, m1=m1)\n', (6473, 6487), True, 'from math_helpers import matrices as mat\n'), ((12014, 12023), 'numpy.abs', 'np.abs', (['M'], {}), '(M)\n', (12020, 12023), True, 'import numpy as np\n'), ((15183, 15220), 'math_helpers.vectors.vdotv', 'vec.vdotv', ([], {'v1': 'self.rvec', 'v2': 'self.vvec'}), '(v1=self.rvec, v2=self.vvec)\n', (15192, 15220), True, 'from math_helpers import vectors as vec\n'), ((12053, 12063), 'numpy.sign', 'np.sign', (['M'], {}), '(M)\n', (12060, 12063), True, 'import numpy as np\n')] |
#####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#####
##### CLIMS data viewer V01 - FS - 01/30/2019
##### Please define the default search path and experiment name below
##### for an enhanced user experience!
#####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#####
##### DEFAULT SEARCH PATH:
DPATH='/Volumes/Chunky/LOVECLIP/LOVECLIM1.3/'
##### DEFAULT EXPERIMENT NAME
DNAME='pivar02'
#####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#####
#####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#####
import dash
import dash_core_components as dcc
import dash_html_components as html
from netCDF4 import Dataset
import numpy as np
import plotly.graph_objs as go
#import json
import os
import os.path
#from mpl_toolkits.basemap import Basemap
#####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#####
#####
##### COMPARE TWO EXPERIMENTS OR SNAPSHOTS:
#####
#####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#####
## GET LIST OF EXPERIMENTS
#DEFAULT
all_experiments=os.listdir(DPATH)
nnnn=0
for n in range(len(all_experiments)):
if not os.path.isdir(DPATH+all_experiments[n-nnnn]+'/output'):
all_experiments.remove(all_experiments[n-nnnn])
nnnn+=1
#####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#####
#####~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#####
### APP:
available_variables=['SAT', 'Total precipitation']
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
################################################################################################################
# HEADER
html.Div(
html.H1(
id='app-headline',
children='CLIMS data viewer'
),
style={'width': '72%','display': 'inline-block','verticalAlign':'middle'}
),
html.Div(
html.Div(
id='delta-n-text',
children=' Deln:'
),
style={'width': '10%','display': 'inline-block','verticalAlign':'bottom'}
),
html.Div(
html.Div(
id='factor-n-text',
children=' Chunkl:'
),
style={'width': '15%','display': 'inline-block','verticalAlign':'bottom'}
),
################################################################################################################
# DATASET 1
html.Div(
dcc.Input(
id='data--path1',
value='/Volumes/Chunky/LOVECLIP/LOVECLIM1.3/',
type='text',
size=60,
debounce=True
),
style={'width': '39%','display': 'inline-block','verticalAlign':'middle'}
),
html.Div(
dcc.Dropdown(
id='drop--experiment1',
#options=[{'label': i, 'value': i} for i in all_experiments],
value=DNAME,
clearable=False
),
style={'width': '29%','display': 'inline-block','verticalAlign':'middle'}
),
html.Div(
html.Div(
id='delta-n1',
children=' '
),
style={'width': '4%','display': 'inline-block','verticalAlign':'middle'}
),
html.Div(
dcc.Input(
id='input-n1',
value=0,
type='text',
size=5,
debounce=True
),
style={'width': '6%','display': 'inline-block','verticalAlign':'middle'}
),
html.Div(
html.Div(
id='factor-n1',
children=' '
),
style={'width': '4%','display': 'inline-block','verticalAlign':'middle'}
),
html.Div(
dcc.Input(
id='infac-n1',
value=1,
type='text',
size=5,
debounce=True
),
style={'width': '9%','display': 'inline-block','verticalAlign':'middle'}
),
################################################################################################################
# DATASET 2
html.Div(
dcc.Input(
id='data--path2',
value='/Volumes/Chunky/LOVECLIP/LOVECLIM1.3/',
type='text',
size=60,
debounce=True
),
style={'width': '39%','display': 'inline-block','verticalAlign':'middle'}
),
html.Div(
dcc.Dropdown(
id='drop--experiment2',
#options=[{'label': i, 'value': i} for i in all_experiments],
value=DNAME,
clearable=False
),
style={'width': '29%','display': 'inline-block','verticalAlign':'middle'}
),
html.Div(
html.Div(
id='delta-n2',
children=' '
),
style={'width': '4%','display': 'inline-block','verticalAlign':'middle'}
),
html.Div(
dcc.Input(
id='input-n2',
value=0,
type='text',
size=5,
debounce=True
),
style={'width': '6%','display': 'inline-block','verticalAlign':'middle'}
),
html.Div(
html.Div(
id='factor-n2',
children=''
),
style={'width': '4%','display': 'inline-block','verticalAlign':'middle'}
),
html.Div(
dcc.Input(
id='infac-n2',
value=1,
type='text',
size=5,
debounce=True
),
style={'width': '9%','display': 'inline-block','verticalAlign':'middle'}
),
################################################################################################################
# STORAGE
#dcc.Store(id='experiment-list'),
dcc.Store(id='data--exp1'),
dcc.Store(id='data--exp2'),
dcc.Store(id='exp--icevol1'),
dcc.Store(id='exp--icevol2'),
################################################################################################################
# CHUNK SLIDER AND ICEVOLUME
dcc.Slider(
id='chunk--slider',
min=0,
value=0,
step=1
),
dcc.Graph(id='icevol--plot'),
################################################################################################################
# CONTOUR PLOTS
html.Div(
dcc.Dropdown(
id='drop--difference',
value=0,
clearable=False
),
style={'width': '14%','display': 'inline-block','verticalAlign':'middle'}
),
html.Div(
html.Button(id='submit-to-plots', n_clicks=0, children='Update'),
style={'display': 'inline-block','width': '9%','verticalAlign':'middle'}
),
html.Div(
html.Div(id='plot--state'),
style={'width': '74%','display': 'inline-block','verticalAlign':'middle'}
),
html.Div(
dcc.Graph(id='NH--plot'),
style={'width': '59%', 'display': 'inline-block'}
),
html.Div(
dcc.Graph(id='AIS--plot'),
style={'width': '39%', 'display': 'inline-block'}
)
])
#########################################################################
##
## GET PATH OF EXPERIMENTS
##
#########################################################################
### get experiment lists and update dropdown
@app.callback(
dash.dependencies.Output('drop--experiment1', 'options'),
[dash.dependencies.Input('data--path1','value')])
def get_all_experiments1(datapath):
expnames=os.listdir(datapath)
nnn=0
for n in range(len(expnames)):
if not os.path.isdir(datapath+expnames[n-nnn]+'/output'):
expnames.remove(expnames[n-nnn])
nnn+=1
return [{'label': i, 'value': i} for i in expnames]
@app.callback(
dash.dependencies.Output('drop--experiment2', 'options'),
[dash.dependencies.Input('data--path2','value')])
def get_all_experiments2(datapath):
expnames=os.listdir(datapath)
nnn=0
for n in range(len(expnames)):
if not os.path.isdir(datapath+expnames[n-nnn]+'/output'):
expnames.remove(expnames[n-nnn])
nnn+=1
return [{'label': i, 'value': i} for i in expnames]
### get experiment path:
@app.callback(
dash.dependencies.Output('data--exp1', 'data'),
[dash.dependencies.Input('data--path1','value'),
dash.dependencies.Input('drop--experiment1','value')
])
def get_experiments1(datapath,expname):
exppath=datapath+expname
return exppath
@app.callback(
dash.dependencies.Output('data--exp2', 'data'),
[dash.dependencies.Input('data--path2','value'),
dash.dependencies.Input('drop--experiment2','value')
])
def get_experiments2(datapath,expname):
exppath=datapath+expname
return exppath
@app.callback(
dash.dependencies.Output('drop--difference','options'),
[dash.dependencies.Input('drop--experiment1','value'),
dash.dependencies.Input('drop--experiment2','value')])
def get_plot_type(input1,input2):
myoptions=['difference',input1,input2]
#print myoptions
theoptions=[{'label': myoptions[i], 'value': i} for i in range(len(myoptions))]
#print theoptions
return theoptions
#########################################################################
##
## SOME RESETS:
##
#########################################################################
@app.callback(
dash.dependencies.Output('chunk--slider','value'),
[dash.dependencies.Input('data--exp1','data'),
dash.dependencies.Input('data--exp2','data'),
dash.dependencies.Input('input-n1','value'),
dash.dependencies.Input('input-n2','value'),
dash.dependencies.Input('infac-n1','value'),
dash.dependencies.Input('infac-n2','value'),
dash.dependencies.Input('drop--difference','value')
])
def reset_the_slider(x0,x1,x2,x3,x4,x5,x6):
return 0
#########################################################################
##
## PLOT ICE VOLUME AND GET CHUNKS
##
#########################################################################
# get icevolumes:
@app.callback(
dash.dependencies.Output('exp--icevol1','data'),
[dash.dependencies.Input('data--exp1', 'data')])
def get_exp_props1(expname):
#get time series of icevol in SL equivalent:
iv=-0.9/3.6e14*(np.loadtxt(expname+'/output/icevol.dat')[:,1]-3.0e16)
data = {'yy' : np.squeeze(iv)}
data['xx']=np.squeeze(np.arange(len(iv)))
return data
@app.callback(
dash.dependencies.Output('exp--icevol2','data'),
[dash.dependencies.Input('data--exp2', 'data')])
def get_exp_props2(expname):
#get time series of icevol in SL equivalent:
iv=-0.9/3.6e14*(np.loadtxt(expname+'/output/icevol.dat')[:,1]-3.0e16)
data = {'yy' : np.squeeze(iv)}
data['xx']=np.squeeze(np.arange(len(iv)))
return data
@app.callback(
dash.dependencies.Output('chunk--slider','max'),
[dash.dependencies.Input('data--exp1','data'),
dash.dependencies.Input('data--exp2','data'),
dash.dependencies.Input('input-n1','value'),
dash.dependencies.Input('input-n2','value'),
dash.dependencies.Input('infac-n1','value'),
dash.dependencies.Input('infac-n2','value'),
dash.dependencies.Input('drop--difference','value')
])
# adjust the chunk slider:
def no_of_chunks_experiment(expname1,expname2,ntext1,ntext2,nfactext1,nfactext2,pltype):
if pltype==0 or pltype==1:
PNHPATH=expname1+'/output/psuim_nh/'
chunk_dirs=os.listdir(PNHPATH)
nn=0
for n in range(len(chunk_dirs)):
if not os.path.isfile(PNHPATH+chunk_dirs[n-nn]+'/fort.92.nc'):
chunk_dirs.remove(chunk_dirs[n-nn])
nn+=1
NCHUNKSall=len(chunk_dirs)
if pltype==0 or pltype==2:
PNHPATH=expname2+'/output/psuim_nh/'
chunk_dirs=os.listdir(PNHPATH)
nn=0
for n in range(len(chunk_dirs)):
if not os.path.isfile(PNHPATH+chunk_dirs[n-nn]+'/fort.92.nc'):
chunk_dirs.remove(chunk_dirs[n-nn])
nn+=1
NCHUNKS2all=len(chunk_dirs)
nskip1=int(ntext1)
nskip2=int(ntext2)
nskip=nskip2-nskip1
nfac1=int(nfactext1)
nfac2=int(nfactext2)
if pltype==0:
if nfac1==nfac2:
NCHUNKS=NCHUNKSall
NCHUNKS2=NCHUNKS2all
else:
# this assumes that either nfac1/nfac2 or nfac2/nfac1 is an integer
if nfac1>nfac2:
nfacn=nfac1/nfac2
NCHUNKS=NCHUNKSall
NCHUNKS2=NCHUNKS2all/nfacn
else:
nfacn=nfac2/nfac1
NCHUNKS=NCHUNKSall/nfacn
NCHUNKS2=NCHUNKS2all
nmaxsl=min(NCHUNKS-max(nskip,0)/nfac1,NCHUNKS2-max(-nskip,0)/nfac2)
nmaxsl=max(0,nmaxsl)
elif pltype==1:
nmaxsl=NCHUNKSall
elif pltype==2:
nmaxsl=NCHUNKS2all
return nmaxsl
# renew icevolume plot
@app.callback(
dash.dependencies.Output('icevol--plot', 'figure'),
[dash.dependencies.Input('exp--icevol1', 'data'),
dash.dependencies.Input('exp--icevol2', 'data'),
dash.dependencies.Input('input-n1','value'),
dash.dependencies.Input('input-n2','value'),
dash.dependencies.Input('infac-n1','value'),
dash.dependencies.Input('infac-n2','value'),
dash.dependencies.Input('chunk--slider', 'value'),
dash.dependencies.Input('drop--difference','value')
])
def update_sealev(datain1,datain2,ntext1,ntext2,nfactext1,nfactext2,nstep,pltype):
nskip1=int(ntext1)
nskip2=int(ntext2)
nskip=nskip2-nskip1
nfac1=int(nfactext1)
nfac2=int(nfactext2)
for n in range(len(datain2['xx'])):
datain2['xx'][n]=datain2['xx'][n]*nfac2+nskip2
for n in range(len(datain1['xx'])):
datain1['xx'][n]=datain1['xx'][n]*nfac1+nskip1
trace0 = go.Scatter(
x = datain1['xx'],
y = datain1['yy'],
mode = 'lines',
name = 'ice volume',
showlegend=False
)
trace1 = go.Scatter(
x = datain2['xx'],
y = datain2['yy'],
mode = 'lines',
name = 'ice volume',
showlegend=False
)
if pltype==0:
xdum=max(nskip1,nskip2)+max(nfac1,nfac2)*nstep
xpo=[xdum,xdum]
ndum1=nstep*max(1,nfac2/nfac1)+max(nskip2-nskip1,0)/nfac1
ndum2=nstep*max(1,nfac1/nfac2)+max(nskip1-nskip2,0)/nfac2
ypo=[datain1['yy'][ndum1],datain2['yy'][ndum2]]
elif pltype==1:
xpo=[nstep*nfac1+nskip1]
ypo=[datain1['yy'][nstep]]
elif pltype==2:
xpo=[nstep*nfac2+nskip2]
ypo=[datain2['yy'][nstep]]
trace2 = go.Scatter(
x = xpo,
y = ypo,
mode = 'markers',
marker = {'size': 15},
showlegend=False
)
data = [trace0, trace1, trace2]
return {
'data': data,
'layout': go.Layout(
yaxis={
'title': 'ice volume [SLE]'
},
height=200,
margin={'l': 40, 'b': 40, 't': 10, 'r': 0},
hovermode='closest'
)
}
@app.callback(dash.dependencies.Output('plot--state', 'children'),
[dash.dependencies.Input('submit-to-plots', 'n_clicks')],
[dash.dependencies.State('chunk--slider', 'value'),
dash.dependencies.State('input-n1','value'),
dash.dependencies.State('input-n2','value'),
dash.dependencies.State('infac-n1','value'),
dash.dependencies.State('infac-n2','value'),
dash.dependencies.State('drop--experiment1', 'value'),
dash.dependencies.State('drop--experiment2', 'value'),
dash.dependencies.State('drop--difference','value')])
def update_output(n_clicks, nchunkin,nskiptext1,nskiptext2,nfact1,nfact2,ename1,ename2,pltype):
if pltype==0:
nskipin1=int(nskiptext1)
nskipin2=int(nskiptext2)
nskipin=nskipin1-nskipin2
nfac1=int(nfact1)
nfac2=int(nfact2)
if nskipin>=0:
ne1=nchunkin
ne2=int(nchunkin+(nskipin/nfac2))
else:
ne2=nchunkin
ne1=int(nchunkin-(nskipin/nfac1))
textout='Chunk '+str(ne1)+' of '+ename1+' minus Chunk '+str(ne2)+' of '+ename2
elif pltype==1:
textout='Chunk '+str(nchunkin)+' of '+ename1
elif pltype==2:
textout='Chunk '+str(nchunkin)+' of '+ename2
return u'''
Showing {}
'''.format(textout)
#########################################################################
##
## PRODUCE NH CONTOUR PLOT
##
#########################################################################
@app.callback(dash.dependencies.Output('NH--plot', 'figure'),
[dash.dependencies.Input('submit-to-plots', 'n_clicks')],
[dash.dependencies.State('chunk--slider', 'value'),
dash.dependencies.State('input-n1','value'),
dash.dependencies.State('input-n2','value'),
dash.dependencies.State('infac-n1','value'),
dash.dependencies.State('infac-n2','value'),
dash.dependencies.State('drop--experiment1', 'value'),
dash.dependencies.State('drop--experiment2', 'value'),
dash.dependencies.State('data--path1','value'),
dash.dependencies.State('drop--difference','value')])
def update_NH(n_clicks, nchunkin,nskiptext1,nskiptext2,nfact1,nfact2,ename1,ename2,datapath,pltype):
# determine chunk numbers:
ne1=nchunkin
ne2=nchunkin
if pltype==0:
nskipin1=int(nskiptext1)
nskipin2=int(nskiptext2)
nskipin=nskipin1-nskipin2
nfac1=int(nfact1)
nfac2=int(nfact2)
if nskipin>=0:
ne2=int(nchunkin+(nskipin/nfac2))
else:
ne1=int(nchunkin-(nskipin/nfac1))
# now doe the plots:
if pltype==0:
PNHPATH=datapath+ename1+'/output/psuim_nh/'
chunk_dirs=os.listdir(PNHPATH)
nn=0
for n in range(len(chunk_dirs)):
if not os.path.isfile(PNHPATH+chunk_dirs[n-nn]+'/fort.92.nc'):
chunk_dirs.remove(chunk_dirs[n-nn])
nn+=1
DFILE=PNHPATH+chunk_dirs[ne1]+'/fort.92.nc'
ff = Dataset(DFILE , mode='r')
h= ff.variables['h']
lat= ff.variables['lat'][:]
lon= ff.variables['lon'][:]
wmask = ff.variables['maskwater']
lmask1=np.squeeze(wmask[0,:,:])
hsh1=np.squeeze(h[0,:,:])
ff.close()
PNHPATH=datapath+ename2+'/output/psuim_nh/'
chunk_dirs=os.listdir(PNHPATH)
nn=0
for n in range(len(chunk_dirs)):
if not os.path.isfile(PNHPATH+chunk_dirs[n-nn]+'/fort.92.nc'):
chunk_dirs.remove(chunk_dirs[n-nn])
nn+=1
DFILE=PNHPATH+chunk_dirs[ne2]+'/fort.92.nc'
ff = Dataset(DFILE , mode='r')
h= ff.variables['h']
wmask = ff.variables['maskwater']
lmask2=np.squeeze(wmask[0,:,:])
hsh2=np.squeeze(h[0,:,:])
ff.close()
hdif=hsh1-hsh2
cscale=np.amax(np.abs(hdif))
hdif[np.abs(hdif)<0.00001]=np.nan
trace0= go.Contour(
x=lon,
y=lat,
z=hdif,
zmin=-cscale,
zmax=cscale,
colorscale='RdBu',
contours={'showlines':False},
colorbar={
'yanchor':'middle',
'lenmode':'fraction',
'len':0.7
}
)
trace1= go.Contour(
x=lon,
y=lat,
z=lmask1,
ncontours=2,
contours={
'coloring':'none'},
line={
'color':'lightgray',
'smoothing':1.3,
'width':2}
)
trace2= go.Contour(
x=lon,
y=lat,
z=lmask2,
ncontours=2,
contours={
'coloring':'none'},
line={
'color':'black',
'smoothing':1.3,
'width':2}
)
else:
if pltype==1:
ename=ename1
ne=ne1
else:
ename=ename2
ne=ne2
PNHPATH=datapath+ename+'/output/psuim_nh/'
chunk_dirs=os.listdir(PNHPATH)
nn=0
for n in range(len(chunk_dirs)):
if not os.path.isfile(PNHPATH+chunk_dirs[n-nn]+'/fort.92.nc'):
chunk_dirs.remove(chunk_dirs[n-nn])
nn+=1
DFILE=PNHPATH+chunk_dirs[ne]+'/fort.92.nc'
ff = Dataset(DFILE , mode='r')
h= ff.variables['h']
lat= ff.variables['lat'][:]
lon= ff.variables['lon'][:]
wmask = ff.variables['maskwater']
lmask=np.squeeze(wmask[0,:,:])
hsh=np.squeeze(h[0,:,:])
ff.close()
hsh[hsh<100.0]=np.nan
hsland=np.copy(hsh)
hsland[lmask==1.0]=np.nan
hsh[lmask==0.0]=np.nan
trace0= go.Contour(
x=lon,
y=lat,
z=hsland,
colorscale='YlGnBu',
contours={'showlines':False},
colorbar={
'yanchor':'bottom',
'lenmode':'fraction',
'len':0.45
}
)
trace1=go.Contour(
x=lon,
y=lat,
z=hsh,
colorscale='YlOrRd',
colorbar={
'yanchor':'top',
'lenmode':'fraction',
'len':0.45
},
contours={'showlines':False}
)
trace2=go.Contour(
x=lon,
y=lat,
z=lmask,
ncontours=2,
contours={
'coloring':'none'},
line={
'color':'lightgray',
'smoothing':1.3,
'width':2}
)
data = [trace0,trace1,trace2]
return {
'data':data,
'layout':go.Layout(
title='Northern Hemisphere Ice Thickness',
xaxis={'showgrid':False,
'zeroline':False},
yaxis={'showgrid':False,
'zeroline':False},
width=1000,
height=500
)
}
#########################################################################
##
## PRODUCE SH CONTOUR PLOT
##
#########################################################################
@app.callback(dash.dependencies.Output('AIS--plot', 'figure'),
[dash.dependencies.Input('submit-to-plots', 'n_clicks')],
[dash.dependencies.State('chunk--slider', 'value'),
dash.dependencies.State('input-n1','value'),
dash.dependencies.State('input-n2','value'),
dash.dependencies.State('infac-n1','value'),
dash.dependencies.State('infac-n2','value'),
dash.dependencies.State('drop--experiment1', 'value'),
dash.dependencies.State('drop--experiment2', 'value'),
dash.dependencies.State('data--path1','value'),
dash.dependencies.State('drop--difference','value')])
def update_SH(n_clicks, nchunkin,nskiptext1,nskiptext2,nfact1,nfact2,ename1,ename2,datapath,pltype):
# determine chunk numbers:
ne1=nchunkin
ne2=nchunkin
if pltype==0:
nskipin1=int(nskiptext1)
nskipin2=int(nskiptext2)
nskipin=nskipin1-nskipin2
nfac1=int(nfact1)
nfac2=int(nfact2)
if nskipin>=0:
ne2=int(nchunkin+(nskipin/nfac2))
else:
ne1=int(nchunkin-(nskipin/nfac1))
# now doe the plots:
if pltype==0:
PNHPATH=datapath+ename1+'/output/psuim_sh/'
chunk_dirs=os.listdir(PNHPATH)
nn=0
for n in range(len(chunk_dirs)):
if not os.path.isfile(PNHPATH+chunk_dirs[n-nn]+'/fort.92.nc'):
chunk_dirs.remove(chunk_dirs[n-nn])
nn+=1
DFILE=PNHPATH+chunk_dirs[ne1]+'/fort.92.nc'
ff = Dataset(DFILE , mode='r')
h= ff.variables['h']
wmask = ff.variables['maskwater']
lmask1=np.squeeze(wmask[0,:,:])
hsh1=np.squeeze(h[0,:,:])
ff.close()
PNHPATH=datapath+ename2+'/output/psuim_sh/'
chunk_dirs=os.listdir(PNHPATH)
nn=0
for n in range(len(chunk_dirs)):
if not os.path.isfile(PNHPATH+chunk_dirs[n-nn]+'/fort.92.nc'):
chunk_dirs.remove(chunk_dirs[n-nn])
nn+=1
DFILE=PNHPATH+chunk_dirs[ne2]+'/fort.92.nc'
ff = Dataset(DFILE , mode='r')
h= ff.variables['h']
wmask = ff.variables['maskwater']
lmask2=np.squeeze(wmask[0,:,:])
hsh2=np.squeeze(h[0,:,:])
ff.close()
hdif=hsh1-hsh2
cscale=np.amax(np.abs(hdif))
hdif[np.abs(hdif)<0.00001]=np.nan
trace0= go.Contour(
z=hdif,
zmin=-cscale,
zmax=cscale,
colorscale='RdBu',
contours={'showlines':False},
colorbar={
'yanchor':'middle',
'lenmode':'fraction',
'len':0.7
}
)
trace1= go.Contour(
z=lmask1,
ncontours=2,
contours={
'coloring':'none'},
line={
'color':'lightgray',
'smoothing':1.3,
'width':2}
)
trace2= go.Contour(
z=lmask2,
ncontours=2,
contours={
'coloring':'none'},
line={
'color':'black',
'smoothing':1.3,
'width':2}
)
else:
if pltype==1:
ename=ename1
ne=ne1
else:
ename=ename2
ne=ne2
PNHPATH=datapath+ename+'/output/psuim_sh/'
chunk_dirs=os.listdir(PNHPATH)
nn=0
for n in range(len(chunk_dirs)):
if not os.path.isfile(PNHPATH+chunk_dirs[n-nn]+'/fort.92.nc'):
chunk_dirs.remove(chunk_dirs[n-nn])
nn+=1
DFILE=PNHPATH+chunk_dirs[ne]+'/fort.92.nc'
ff = Dataset(DFILE , mode='r')
h= ff.variables['h']
wmask = ff.variables['maskwater']
lmask=np.squeeze(wmask[0,:,:])
hsh=np.squeeze(h[0,:,:])
ff.close()
hsh[hsh<100.0]=np.nan
hsland=np.copy(hsh)
hsland[lmask==1.0]=np.nan
hsh[lmask==0.0]=np.nan
trace0= go.Contour(
z=hsland,
colorscale='YlGnBu',
contours={'showlines':False},
colorbar={
'yanchor':'bottom',
'lenmode':'fraction',
'len':0.45
}
)
trace1=go.Contour(
z=hsh,
colorscale='YlOrRd',
colorbar={
'yanchor':'top',
'lenmode':'fraction',
'len':0.45
},
contours={'showlines':False}
)
trace2=go.Contour(
z=lmask,
ncontours=2,
contours={
'coloring':'none'},
line={
'color':'lightgray',
'smoothing':1.3,
'width':2}
)
data = [trace0,trace1,trace2]
return {
'data':data,
'layout':go.Layout(
title='Antarctic Ice Thickness',
xaxis={'showgrid':False,
'zeroline':False},
yaxis={'showgrid':False,
'zeroline':False},
width=550,
height=500
)
}
if __name__ == '__main__':
app.run_server(debug=True)
| [
"numpy.abs",
"dash_core_components.Input",
"os.path.isfile",
"netCDF4.Dataset",
"dash.Dash",
"dash_core_components.Slider",
"numpy.copy",
"dash_html_components.Div",
"dash.dependencies.State",
"numpy.loadtxt",
"dash_core_components.Store",
"plotly.graph_objs.Scatter",
"dash_html_components.B... | [((1066, 1083), 'os.listdir', 'os.listdir', (['DPATH'], {}), '(DPATH)\n', (1076, 1083), False, 'import os\n'), ((1553, 1615), 'dash.Dash', 'dash.Dash', (['__name__'], {'external_stylesheets': 'external_stylesheets'}), '(__name__, external_stylesheets=external_stylesheets)\n', (1562, 1615), False, 'import dash\n'), ((7505, 7525), 'os.listdir', 'os.listdir', (['datapath'], {}), '(datapath)\n', (7515, 7525), False, 'import os\n'), ((7343, 7399), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""drop--experiment1"""', '"""options"""'], {}), "('drop--experiment1', 'options')\n", (7367, 7399), False, 'import dash\n'), ((7940, 7960), 'os.listdir', 'os.listdir', (['datapath'], {}), '(datapath)\n', (7950, 7960), False, 'import os\n'), ((7778, 7834), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""drop--experiment2"""', '"""options"""'], {}), "('drop--experiment2', 'options')\n", (7802, 7834), False, 'import dash\n'), ((8237, 8283), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""data--exp1"""', '"""data"""'], {}), "('data--exp1', 'data')\n", (8261, 8283), False, 'import dash\n'), ((8512, 8558), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""data--exp2"""', '"""data"""'], {}), "('data--exp2', 'data')\n", (8536, 8558), False, 'import dash\n'), ((8787, 8842), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""drop--difference"""', '"""options"""'], {}), "('drop--difference', 'options')\n", (8811, 8842), False, 'import dash\n'), ((9382, 9432), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""chunk--slider"""', '"""value"""'], {}), "('chunk--slider', 'value')\n", (9406, 9432), False, 'import dash\n'), ((10097, 10145), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""exp--icevol1"""', '"""data"""'], {}), "('exp--icevol1', 'data')\n", (10121, 10145), False, 'import dash\n'), ((10468, 10516), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""exp--icevol2"""', '"""data"""'], {}), "('exp--icevol2', 'data')\n", (10492, 10516), False, 'import dash\n'), ((10843, 10891), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""chunk--slider"""', '"""max"""'], {}), "('chunk--slider', 'max')\n", (10867, 10891), False, 'import dash\n'), ((13875, 13975), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': "datain1['xx']", 'y': "datain1['yy']", 'mode': '"""lines"""', 'name': '"""ice volume"""', 'showlegend': '(False)'}), "(x=datain1['xx'], y=datain1['yy'], mode='lines', name=\n 'ice volume', showlegend=False)\n", (13885, 13975), True, 'import plotly.graph_objs as go\n'), ((14038, 14138), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': "datain2['xx']", 'y': "datain2['yy']", 'mode': '"""lines"""', 'name': '"""ice volume"""', 'showlegend': '(False)'}), "(x=datain2['xx'], y=datain2['yy'], mode='lines', name=\n 'ice volume', showlegend=False)\n", (14048, 14138), True, 'import plotly.graph_objs as go\n'), ((14686, 14765), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': 'xpo', 'y': 'ypo', 'mode': '"""markers"""', 'marker': "{'size': 15}", 'showlegend': '(False)'}), "(x=xpo, y=ypo, mode='markers', marker={'size': 15}, showlegend=False)\n", (14696, 14765), True, 'import plotly.graph_objs as go\n'), ((12982, 13032), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""icevol--plot"""', '"""figure"""'], {}), "('icevol--plot', 'figure')\n", (13006, 13032), False, 'import dash\n'), ((15142, 15193), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""plot--state"""', '"""children"""'], {}), "('plot--state', 'children')\n", (15166, 15193), False, 'import dash\n'), ((16773, 16819), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""NH--plot"""', '"""figure"""'], {}), "('NH--plot', 'figure')\n", (16797, 16819), False, 'import dash\n'), ((22631, 22678), 'dash.dependencies.Output', 'dash.dependencies.Output', (['"""AIS--plot"""', '"""figure"""'], {}), "('AIS--plot', 'figure')\n", (22655, 22678), False, 'import dash\n'), ((1140, 1200), 'os.path.isdir', 'os.path.isdir', (["(DPATH + all_experiments[n - nnnn] + '/output')"], {}), "(DPATH + all_experiments[n - nnnn] + '/output')\n", (1153, 1200), False, 'import os\n'), ((5735, 5761), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""data--exp1"""'}), "(id='data--exp1')\n", (5744, 5761), True, 'import dash_core_components as dcc\n'), ((5767, 5793), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""data--exp2"""'}), "(id='data--exp2')\n", (5776, 5793), True, 'import dash_core_components as dcc\n'), ((5809, 5837), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""exp--icevol1"""'}), "(id='exp--icevol1')\n", (5818, 5837), True, 'import dash_core_components as dcc\n'), ((5843, 5871), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""exp--icevol2"""'}), "(id='exp--icevol2')\n", (5852, 5871), True, 'import dash_core_components as dcc\n'), ((6036, 6090), 'dash_core_components.Slider', 'dcc.Slider', ([], {'id': '"""chunk--slider"""', 'min': '(0)', 'value': '(0)', 'step': '(1)'}), "(id='chunk--slider', min=0, value=0, step=1)\n", (6046, 6090), True, 'import dash_core_components as dcc\n'), ((6134, 6162), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""icevol--plot"""'}), "(id='icevol--plot')\n", (6143, 6162), True, 'import dash_core_components as dcc\n'), ((7407, 7454), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""data--path1"""', '"""value"""'], {}), "('data--path1', 'value')\n", (7430, 7454), False, 'import dash\n'), ((7842, 7889), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""data--path2"""', '"""value"""'], {}), "('data--path2', 'value')\n", (7865, 7889), False, 'import dash\n'), ((8291, 8338), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""data--path1"""', '"""value"""'], {}), "('data--path1', 'value')\n", (8314, 8338), False, 'import dash\n'), ((8344, 8397), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""drop--experiment1"""', '"""value"""'], {}), "('drop--experiment1', 'value')\n", (8367, 8397), False, 'import dash\n'), ((8566, 8613), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""data--path2"""', '"""value"""'], {}), "('data--path2', 'value')\n", (8589, 8613), False, 'import dash\n'), ((8619, 8672), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""drop--experiment2"""', '"""value"""'], {}), "('drop--experiment2', 'value')\n", (8642, 8672), False, 'import dash\n'), ((8849, 8902), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""drop--experiment1"""', '"""value"""'], {}), "('drop--experiment1', 'value')\n", (8872, 8902), False, 'import dash\n'), ((8908, 8961), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""drop--experiment2"""', '"""value"""'], {}), "('drop--experiment2', 'value')\n", (8931, 8961), False, 'import dash\n'), ((9438, 9483), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""data--exp1"""', '"""data"""'], {}), "('data--exp1', 'data')\n", (9461, 9483), False, 'import dash\n'), ((9489, 9534), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""data--exp2"""', '"""data"""'], {}), "('data--exp2', 'data')\n", (9512, 9534), False, 'import dash\n'), ((9540, 9584), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""input-n1"""', '"""value"""'], {}), "('input-n1', 'value')\n", (9563, 9584), False, 'import dash\n'), ((9590, 9634), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""input-n2"""', '"""value"""'], {}), "('input-n2', 'value')\n", (9613, 9634), False, 'import dash\n'), ((9640, 9684), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""infac-n1"""', '"""value"""'], {}), "('infac-n1', 'value')\n", (9663, 9684), False, 'import dash\n'), ((9690, 9734), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""infac-n2"""', '"""value"""'], {}), "('infac-n2', 'value')\n", (9713, 9734), False, 'import dash\n'), ((9746, 9798), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""drop--difference"""', '"""value"""'], {}), "('drop--difference', 'value')\n", (9769, 9798), False, 'import dash\n'), ((10370, 10384), 'numpy.squeeze', 'np.squeeze', (['iv'], {}), '(iv)\n', (10380, 10384), True, 'import numpy as np\n'), ((10151, 10196), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""data--exp1"""', '"""data"""'], {}), "('data--exp1', 'data')\n", (10174, 10196), False, 'import dash\n'), ((10745, 10759), 'numpy.squeeze', 'np.squeeze', (['iv'], {}), '(iv)\n', (10755, 10759), True, 'import numpy as np\n'), ((10522, 10567), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""data--exp2"""', '"""data"""'], {}), "('data--exp2', 'data')\n", (10545, 10567), False, 'import dash\n'), ((11478, 11497), 'os.listdir', 'os.listdir', (['PNHPATH'], {}), '(PNHPATH)\n', (11488, 11497), False, 'import os\n'), ((11836, 11855), 'os.listdir', 'os.listdir', (['PNHPATH'], {}), '(PNHPATH)\n', (11846, 11855), False, 'import os\n'), ((10897, 10942), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""data--exp1"""', '"""data"""'], {}), "('data--exp1', 'data')\n", (10920, 10942), False, 'import dash\n'), ((10948, 10993), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""data--exp2"""', '"""data"""'], {}), "('data--exp2', 'data')\n", (10971, 10993), False, 'import dash\n'), ((10999, 11043), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""input-n1"""', '"""value"""'], {}), "('input-n1', 'value')\n", (11022, 11043), False, 'import dash\n'), ((11049, 11093), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""input-n2"""', '"""value"""'], {}), "('input-n2', 'value')\n", (11072, 11093), False, 'import dash\n'), ((11099, 11143), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""infac-n1"""', '"""value"""'], {}), "('infac-n1', 'value')\n", (11122, 11143), False, 'import dash\n'), ((11149, 11193), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""infac-n2"""', '"""value"""'], {}), "('infac-n2', 'value')\n", (11172, 11193), False, 'import dash\n'), ((11204, 11256), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""drop--difference"""', '"""value"""'], {}), "('drop--difference', 'value')\n", (11227, 11256), False, 'import dash\n'), ((14909, 15036), 'plotly.graph_objs.Layout', 'go.Layout', ([], {'yaxis': "{'title': 'ice volume [SLE]'}", 'height': '(200)', 'margin': "{'l': 40, 'b': 40, 't': 10, 'r': 0}", 'hovermode': '"""closest"""'}), "(yaxis={'title': 'ice volume [SLE]'}, height=200, margin={'l': 40,\n 'b': 40, 't': 10, 'r': 0}, hovermode='closest')\n", (14918, 15036), True, 'import plotly.graph_objs as go\n'), ((13039, 13086), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""exp--icevol1"""', '"""data"""'], {}), "('exp--icevol1', 'data')\n", (13062, 13086), False, 'import dash\n'), ((13093, 13140), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""exp--icevol2"""', '"""data"""'], {}), "('exp--icevol2', 'data')\n", (13116, 13140), False, 'import dash\n'), ((13147, 13191), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""input-n1"""', '"""value"""'], {}), "('input-n1', 'value')\n", (13170, 13191), False, 'import dash\n'), ((13197, 13241), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""input-n2"""', '"""value"""'], {}), "('input-n2', 'value')\n", (13220, 13241), False, 'import dash\n'), ((13247, 13291), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""infac-n1"""', '"""value"""'], {}), "('infac-n1', 'value')\n", (13270, 13291), False, 'import dash\n'), ((13297, 13341), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""infac-n2"""', '"""value"""'], {}), "('infac-n2', 'value')\n", (13320, 13341), False, 'import dash\n'), ((13348, 13397), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""chunk--slider"""', '"""value"""'], {}), "('chunk--slider', 'value')\n", (13371, 13397), False, 'import dash\n'), ((13404, 13456), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""drop--difference"""', '"""value"""'], {}), "('drop--difference', 'value')\n", (13427, 13456), False, 'import dash\n'), ((15210, 15264), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""submit-to-plots"""', '"""n_clicks"""'], {}), "('submit-to-plots', 'n_clicks')\n", (15233, 15264), False, 'import dash\n'), ((15282, 15331), 'dash.dependencies.State', 'dash.dependencies.State', (['"""chunk--slider"""', '"""value"""'], {}), "('chunk--slider', 'value')\n", (15305, 15331), False, 'import dash\n'), ((15348, 15392), 'dash.dependencies.State', 'dash.dependencies.State', (['"""input-n1"""', '"""value"""'], {}), "('input-n1', 'value')\n", (15371, 15392), False, 'import dash\n'), ((15408, 15452), 'dash.dependencies.State', 'dash.dependencies.State', (['"""input-n2"""', '"""value"""'], {}), "('input-n2', 'value')\n", (15431, 15452), False, 'import dash\n'), ((15468, 15512), 'dash.dependencies.State', 'dash.dependencies.State', (['"""infac-n1"""', '"""value"""'], {}), "('infac-n1', 'value')\n", (15491, 15512), False, 'import dash\n'), ((15528, 15572), 'dash.dependencies.State', 'dash.dependencies.State', (['"""infac-n2"""', '"""value"""'], {}), "('infac-n2', 'value')\n", (15551, 15572), False, 'import dash\n'), ((15603, 15656), 'dash.dependencies.State', 'dash.dependencies.State', (['"""drop--experiment1"""', '"""value"""'], {}), "('drop--experiment1', 'value')\n", (15626, 15656), False, 'import dash\n'), ((15673, 15726), 'dash.dependencies.State', 'dash.dependencies.State', (['"""drop--experiment2"""', '"""value"""'], {}), "('drop--experiment2', 'value')\n", (15696, 15726), False, 'import dash\n'), ((15743, 15795), 'dash.dependencies.State', 'dash.dependencies.State', (['"""drop--difference"""', '"""value"""'], {}), "('drop--difference', 'value')\n", (15766, 15795), False, 'import dash\n'), ((18072, 18091), 'os.listdir', 'os.listdir', (['PNHPATH'], {}), '(PNHPATH)\n', (18082, 18091), False, 'import os\n'), ((18361, 18385), 'netCDF4.Dataset', 'Dataset', (['DFILE'], {'mode': '"""r"""'}), "(DFILE, mode='r')\n", (18368, 18385), False, 'from netCDF4 import Dataset\n'), ((18546, 18572), 'numpy.squeeze', 'np.squeeze', (['wmask[0, :, :]'], {}), '(wmask[0, :, :])\n', (18556, 18572), True, 'import numpy as np\n'), ((18588, 18610), 'numpy.squeeze', 'np.squeeze', (['h[0, :, :]'], {}), '(h[0, :, :])\n', (18598, 18610), True, 'import numpy as np\n'), ((18699, 18718), 'os.listdir', 'os.listdir', (['PNHPATH'], {}), '(PNHPATH)\n', (18709, 18718), False, 'import os\n'), ((18988, 19012), 'netCDF4.Dataset', 'Dataset', (['DFILE'], {'mode': '"""r"""'}), "(DFILE, mode='r')\n", (18995, 19012), False, 'from netCDF4 import Dataset\n'), ((19101, 19127), 'numpy.squeeze', 'np.squeeze', (['wmask[0, :, :]'], {}), '(wmask[0, :, :])\n', (19111, 19127), True, 'import numpy as np\n'), ((19143, 19165), 'numpy.squeeze', 'np.squeeze', (['h[0, :, :]'], {}), '(h[0, :, :])\n', (19153, 19165), True, 'import numpy as np\n'), ((19301, 19486), 'plotly.graph_objs.Contour', 'go.Contour', ([], {'x': 'lon', 'y': 'lat', 'z': 'hdif', 'zmin': '(-cscale)', 'zmax': 'cscale', 'colorscale': '"""RdBu"""', 'contours': "{'showlines': False}", 'colorbar': "{'yanchor': 'middle', 'lenmode': 'fraction', 'len': 0.7}"}), "(x=lon, y=lat, z=hdif, zmin=-cscale, zmax=cscale, colorscale=\n 'RdBu', contours={'showlines': False}, colorbar={'yanchor': 'middle',\n 'lenmode': 'fraction', 'len': 0.7})\n", (19311, 19486), True, 'import plotly.graph_objs as go\n'), ((19662, 19803), 'plotly.graph_objs.Contour', 'go.Contour', ([], {'x': 'lon', 'y': 'lat', 'z': 'lmask1', 'ncontours': '(2)', 'contours': "{'coloring': 'none'}", 'line': "{'color': 'lightgray', 'smoothing': 1.3, 'width': 2}"}), "(x=lon, y=lat, z=lmask1, ncontours=2, contours={'coloring':\n 'none'}, line={'color': 'lightgray', 'smoothing': 1.3, 'width': 2})\n", (19672, 19803), True, 'import plotly.graph_objs as go\n'), ((19960, 20097), 'plotly.graph_objs.Contour', 'go.Contour', ([], {'x': 'lon', 'y': 'lat', 'z': 'lmask2', 'ncontours': '(2)', 'contours': "{'coloring': 'none'}", 'line': "{'color': 'black', 'smoothing': 1.3, 'width': 2}"}), "(x=lon, y=lat, z=lmask2, ncontours=2, contours={'coloring':\n 'none'}, line={'color': 'black', 'smoothing': 1.3, 'width': 2})\n", (19970, 20097), True, 'import plotly.graph_objs as go\n'), ((20442, 20461), 'os.listdir', 'os.listdir', (['PNHPATH'], {}), '(PNHPATH)\n', (20452, 20461), False, 'import os\n'), ((20730, 20754), 'netCDF4.Dataset', 'Dataset', (['DFILE'], {'mode': '"""r"""'}), "(DFILE, mode='r')\n", (20737, 20754), False, 'from netCDF4 import Dataset\n'), ((20914, 20940), 'numpy.squeeze', 'np.squeeze', (['wmask[0, :, :]'], {}), '(wmask[0, :, :])\n', (20924, 20940), True, 'import numpy as np\n'), ((20955, 20977), 'numpy.squeeze', 'np.squeeze', (['h[0, :, :]'], {}), '(h[0, :, :])\n', (20965, 20977), True, 'import numpy as np\n'), ((21060, 21072), 'numpy.copy', 'np.copy', (['hsh'], {}), '(hsh)\n', (21067, 21072), True, 'import numpy as np\n'), ((21154, 21317), 'plotly.graph_objs.Contour', 'go.Contour', ([], {'x': 'lon', 'y': 'lat', 'z': 'hsland', 'colorscale': '"""YlGnBu"""', 'contours': "{'showlines': False}", 'colorbar': "{'yanchor': 'bottom', 'lenmode': 'fraction', 'len': 0.45}"}), "(x=lon, y=lat, z=hsland, colorscale='YlGnBu', contours={\n 'showlines': False}, colorbar={'yanchor': 'bottom', 'lenmode':\n 'fraction', 'len': 0.45})\n", (21164, 21317), True, 'import plotly.graph_objs as go\n'), ((21468, 21620), 'plotly.graph_objs.Contour', 'go.Contour', ([], {'x': 'lon', 'y': 'lat', 'z': 'hsh', 'colorscale': '"""YlOrRd"""', 'colorbar': "{'yanchor': 'top', 'lenmode': 'fraction', 'len': 0.45}", 'contours': "{'showlines': False}"}), "(x=lon, y=lat, z=hsh, colorscale='YlOrRd', colorbar={'yanchor':\n 'top', 'lenmode': 'fraction', 'len': 0.45}, contours={'showlines': False})\n", (21478, 21620), True, 'import plotly.graph_objs as go\n'), ((21785, 21926), 'plotly.graph_objs.Contour', 'go.Contour', ([], {'x': 'lon', 'y': 'lat', 'z': 'lmask', 'ncontours': '(2)', 'contours': "{'coloring': 'none'}", 'line': "{'color': 'lightgray', 'smoothing': 1.3, 'width': 2}"}), "(x=lon, y=lat, z=lmask, ncontours=2, contours={'coloring': 'none'\n }, line={'color': 'lightgray', 'smoothing': 1.3, 'width': 2})\n", (21795, 21926), True, 'import plotly.graph_objs as go\n'), ((22155, 22332), 'plotly.graph_objs.Layout', 'go.Layout', ([], {'title': '"""Northern Hemisphere Ice Thickness"""', 'xaxis': "{'showgrid': False, 'zeroline': False}", 'yaxis': "{'showgrid': False, 'zeroline': False}", 'width': '(1000)', 'height': '(500)'}), "(title='Northern Hemisphere Ice Thickness', xaxis={'showgrid': \n False, 'zeroline': False}, yaxis={'showgrid': False, 'zeroline': False},\n width=1000, height=500)\n", (22164, 22332), True, 'import plotly.graph_objs as go\n'), ((16836, 16890), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""submit-to-plots"""', '"""n_clicks"""'], {}), "('submit-to-plots', 'n_clicks')\n", (16859, 16890), False, 'import dash\n'), ((16908, 16957), 'dash.dependencies.State', 'dash.dependencies.State', (['"""chunk--slider"""', '"""value"""'], {}), "('chunk--slider', 'value')\n", (16931, 16957), False, 'import dash\n'), ((16974, 17018), 'dash.dependencies.State', 'dash.dependencies.State', (['"""input-n1"""', '"""value"""'], {}), "('input-n1', 'value')\n", (16997, 17018), False, 'import dash\n'), ((17034, 17078), 'dash.dependencies.State', 'dash.dependencies.State', (['"""input-n2"""', '"""value"""'], {}), "('input-n2', 'value')\n", (17057, 17078), False, 'import dash\n'), ((17094, 17138), 'dash.dependencies.State', 'dash.dependencies.State', (['"""infac-n1"""', '"""value"""'], {}), "('infac-n1', 'value')\n", (17117, 17138), False, 'import dash\n'), ((17154, 17198), 'dash.dependencies.State', 'dash.dependencies.State', (['"""infac-n2"""', '"""value"""'], {}), "('infac-n2', 'value')\n", (17177, 17198), False, 'import dash\n'), ((17214, 17267), 'dash.dependencies.State', 'dash.dependencies.State', (['"""drop--experiment1"""', '"""value"""'], {}), "('drop--experiment1', 'value')\n", (17237, 17267), False, 'import dash\n'), ((17284, 17337), 'dash.dependencies.State', 'dash.dependencies.State', (['"""drop--experiment2"""', '"""value"""'], {}), "('drop--experiment2', 'value')\n", (17307, 17337), False, 'import dash\n'), ((17355, 17402), 'dash.dependencies.State', 'dash.dependencies.State', (['"""data--path1"""', '"""value"""'], {}), "('data--path1', 'value')\n", (17378, 17402), False, 'import dash\n'), ((17418, 17470), 'dash.dependencies.State', 'dash.dependencies.State', (['"""drop--difference"""', '"""value"""'], {}), "('drop--difference', 'value')\n", (17441, 17470), False, 'import dash\n'), ((23931, 23950), 'os.listdir', 'os.listdir', (['PNHPATH'], {}), '(PNHPATH)\n', (23941, 23950), False, 'import os\n'), ((24220, 24244), 'netCDF4.Dataset', 'Dataset', (['DFILE'], {'mode': '"""r"""'}), "(DFILE, mode='r')\n", (24227, 24244), False, 'from netCDF4 import Dataset\n'), ((24333, 24359), 'numpy.squeeze', 'np.squeeze', (['wmask[0, :, :]'], {}), '(wmask[0, :, :])\n', (24343, 24359), True, 'import numpy as np\n'), ((24375, 24397), 'numpy.squeeze', 'np.squeeze', (['h[0, :, :]'], {}), '(h[0, :, :])\n', (24385, 24397), True, 'import numpy as np\n'), ((24486, 24505), 'os.listdir', 'os.listdir', (['PNHPATH'], {}), '(PNHPATH)\n', (24496, 24505), False, 'import os\n'), ((24775, 24799), 'netCDF4.Dataset', 'Dataset', (['DFILE'], {'mode': '"""r"""'}), "(DFILE, mode='r')\n", (24782, 24799), False, 'from netCDF4 import Dataset\n'), ((24888, 24914), 'numpy.squeeze', 'np.squeeze', (['wmask[0, :, :]'], {}), '(wmask[0, :, :])\n', (24898, 24914), True, 'import numpy as np\n'), ((24930, 24952), 'numpy.squeeze', 'np.squeeze', (['h[0, :, :]'], {}), '(h[0, :, :])\n', (24940, 24952), True, 'import numpy as np\n'), ((25088, 25259), 'plotly.graph_objs.Contour', 'go.Contour', ([], {'z': 'hdif', 'zmin': '(-cscale)', 'zmax': 'cscale', 'colorscale': '"""RdBu"""', 'contours': "{'showlines': False}", 'colorbar': "{'yanchor': 'middle', 'lenmode': 'fraction', 'len': 0.7}"}), "(z=hdif, zmin=-cscale, zmax=cscale, colorscale='RdBu', contours={\n 'showlines': False}, colorbar={'yanchor': 'middle', 'lenmode':\n 'fraction', 'len': 0.7})\n", (25098, 25259), True, 'import plotly.graph_objs as go\n'), ((25411, 25539), 'plotly.graph_objs.Contour', 'go.Contour', ([], {'z': 'lmask1', 'ncontours': '(2)', 'contours': "{'coloring': 'none'}", 'line': "{'color': 'lightgray', 'smoothing': 1.3, 'width': 2}"}), "(z=lmask1, ncontours=2, contours={'coloring': 'none'}, line={\n 'color': 'lightgray', 'smoothing': 1.3, 'width': 2})\n", (25421, 25539), True, 'import plotly.graph_objs as go\n'), ((25671, 25795), 'plotly.graph_objs.Contour', 'go.Contour', ([], {'z': 'lmask2', 'ncontours': '(2)', 'contours': "{'coloring': 'none'}", 'line': "{'color': 'black', 'smoothing': 1.3, 'width': 2}"}), "(z=lmask2, ncontours=2, contours={'coloring': 'none'}, line={\n 'color': 'black', 'smoothing': 1.3, 'width': 2})\n", (25681, 25795), True, 'import plotly.graph_objs as go\n'), ((26115, 26134), 'os.listdir', 'os.listdir', (['PNHPATH'], {}), '(PNHPATH)\n', (26125, 26134), False, 'import os\n'), ((26403, 26427), 'netCDF4.Dataset', 'Dataset', (['DFILE'], {'mode': '"""r"""'}), "(DFILE, mode='r')\n", (26410, 26427), False, 'from netCDF4 import Dataset\n'), ((26515, 26541), 'numpy.squeeze', 'np.squeeze', (['wmask[0, :, :]'], {}), '(wmask[0, :, :])\n', (26525, 26541), True, 'import numpy as np\n'), ((26556, 26578), 'numpy.squeeze', 'np.squeeze', (['h[0, :, :]'], {}), '(h[0, :, :])\n', (26566, 26578), True, 'import numpy as np\n'), ((26661, 26673), 'numpy.copy', 'np.copy', (['hsh'], {}), '(hsh)\n', (26668, 26673), True, 'import numpy as np\n'), ((26755, 26899), 'plotly.graph_objs.Contour', 'go.Contour', ([], {'z': 'hsland', 'colorscale': '"""YlGnBu"""', 'contours': "{'showlines': False}", 'colorbar': "{'yanchor': 'bottom', 'lenmode': 'fraction', 'len': 0.45}"}), "(z=hsland, colorscale='YlGnBu', contours={'showlines': False},\n colorbar={'yanchor': 'bottom', 'lenmode': 'fraction', 'len': 0.45})\n", (26765, 26899), True, 'import plotly.graph_objs as go\n'), ((27031, 27169), 'plotly.graph_objs.Contour', 'go.Contour', ([], {'z': 'hsh', 'colorscale': '"""YlOrRd"""', 'colorbar': "{'yanchor': 'top', 'lenmode': 'fraction', 'len': 0.45}", 'contours': "{'showlines': False}"}), "(z=hsh, colorscale='YlOrRd', colorbar={'yanchor': 'top',\n 'lenmode': 'fraction', 'len': 0.45}, contours={'showlines': False})\n", (27041, 27169), True, 'import plotly.graph_objs as go\n'), ((27310, 27437), 'plotly.graph_objs.Contour', 'go.Contour', ([], {'z': 'lmask', 'ncontours': '(2)', 'contours': "{'coloring': 'none'}", 'line': "{'color': 'lightgray', 'smoothing': 1.3, 'width': 2}"}), "(z=lmask, ncontours=2, contours={'coloring': 'none'}, line={\n 'color': 'lightgray', 'smoothing': 1.3, 'width': 2})\n", (27320, 27437), True, 'import plotly.graph_objs as go\n'), ((27640, 27806), 'plotly.graph_objs.Layout', 'go.Layout', ([], {'title': '"""Antarctic Ice Thickness"""', 'xaxis': "{'showgrid': False, 'zeroline': False}", 'yaxis': "{'showgrid': False, 'zeroline': False}", 'width': '(550)', 'height': '(500)'}), "(title='Antarctic Ice Thickness', xaxis={'showgrid': False,\n 'zeroline': False}, yaxis={'showgrid': False, 'zeroline': False}, width\n =550, height=500)\n", (27649, 27806), True, 'import plotly.graph_objs as go\n'), ((22695, 22749), 'dash.dependencies.Input', 'dash.dependencies.Input', (['"""submit-to-plots"""', '"""n_clicks"""'], {}), "('submit-to-plots', 'n_clicks')\n", (22718, 22749), False, 'import dash\n'), ((22767, 22816), 'dash.dependencies.State', 'dash.dependencies.State', (['"""chunk--slider"""', '"""value"""'], {}), "('chunk--slider', 'value')\n", (22790, 22816), False, 'import dash\n'), ((22833, 22877), 'dash.dependencies.State', 'dash.dependencies.State', (['"""input-n1"""', '"""value"""'], {}), "('input-n1', 'value')\n", (22856, 22877), False, 'import dash\n'), ((22893, 22937), 'dash.dependencies.State', 'dash.dependencies.State', (['"""input-n2"""', '"""value"""'], {}), "('input-n2', 'value')\n", (22916, 22937), False, 'import dash\n'), ((22953, 22997), 'dash.dependencies.State', 'dash.dependencies.State', (['"""infac-n1"""', '"""value"""'], {}), "('infac-n1', 'value')\n", (22976, 22997), False, 'import dash\n'), ((23013, 23057), 'dash.dependencies.State', 'dash.dependencies.State', (['"""infac-n2"""', '"""value"""'], {}), "('infac-n2', 'value')\n", (23036, 23057), False, 'import dash\n'), ((23073, 23126), 'dash.dependencies.State', 'dash.dependencies.State', (['"""drop--experiment1"""', '"""value"""'], {}), "('drop--experiment1', 'value')\n", (23096, 23126), False, 'import dash\n'), ((23143, 23196), 'dash.dependencies.State', 'dash.dependencies.State', (['"""drop--experiment2"""', '"""value"""'], {}), "('drop--experiment2', 'value')\n", (23166, 23196), False, 'import dash\n'), ((23214, 23261), 'dash.dependencies.State', 'dash.dependencies.State', (['"""data--path1"""', '"""value"""'], {}), "('data--path1', 'value')\n", (23237, 23261), False, 'import dash\n'), ((23277, 23329), 'dash.dependencies.State', 'dash.dependencies.State', (['"""drop--difference"""', '"""value"""'], {}), "('drop--difference', 'value')\n", (23300, 23329), False, 'import dash\n'), ((1789, 1845), 'dash_html_components.H1', 'html.H1', ([], {'id': '"""app-headline"""', 'children': '"""CLIMS data viewer"""'}), "(id='app-headline', children='CLIMS data viewer')\n", (1796, 1845), True, 'import dash_html_components as html\n'), ((1992, 2038), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""delta-n-text"""', 'children': '""" Deln:"""'}), "(id='delta-n-text', children=' Deln:')\n", (2000, 2038), True, 'import dash_html_components as html\n'), ((2186, 2235), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""factor-n-text"""', 'children': '""" Chunkl:"""'}), "(id='factor-n-text', children=' Chunkl:')\n", (2194, 2235), True, 'import dash_html_components as html\n'), ((2515, 2630), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""data--path1"""', 'value': '"""/Volumes/Chunky/LOVECLIP/LOVECLIM1.3/"""', 'type': '"""text"""', 'size': '(60)', 'debounce': '(True)'}), "(id='data--path1', value='/Volumes/Chunky/LOVECLIP/LOVECLIM1.3/',\n type='text', size=60, debounce=True)\n", (2524, 2630), True, 'import dash_core_components as dcc\n'), ((2812, 2878), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""drop--experiment1"""', 'value': 'DNAME', 'clearable': '(False)'}), "(id='drop--experiment1', value=DNAME, clearable=False)\n", (2824, 2878), True, 'import dash_core_components as dcc\n'), ((3116, 3153), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""delta-n1"""', 'children': '""" """'}), "(id='delta-n1', children=' ')\n", (3124, 3153), True, 'import dash_html_components as html\n'), ((3299, 3368), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""input-n1"""', 'value': '(0)', 'type': '"""text"""', 'size': '(5)', 'debounce': '(True)'}), "(id='input-n1', value=0, type='text', size=5, debounce=True)\n", (3308, 3368), True, 'import dash_core_components as dcc\n'), ((3550, 3588), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""factor-n1"""', 'children': '""" """'}), "(id='factor-n1', children=' ')\n", (3558, 3588), True, 'import dash_html_components as html\n'), ((3734, 3803), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""infac-n1"""', 'value': '(1)', 'type': '"""text"""', 'size': '(5)', 'debounce': '(True)'}), "(id='infac-n1', value=1, type='text', size=5, debounce=True)\n", (3743, 3803), True, 'import dash_core_components as dcc\n'), ((4114, 4229), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""data--path2"""', 'value': '"""/Volumes/Chunky/LOVECLIP/LOVECLIM1.3/"""', 'type': '"""text"""', 'size': '(60)', 'debounce': '(True)'}), "(id='data--path2', value='/Volumes/Chunky/LOVECLIP/LOVECLIM1.3/',\n type='text', size=60, debounce=True)\n", (4123, 4229), True, 'import dash_core_components as dcc\n'), ((4412, 4478), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""drop--experiment2"""', 'value': 'DNAME', 'clearable': '(False)'}), "(id='drop--experiment2', value=DNAME, clearable=False)\n", (4424, 4478), True, 'import dash_core_components as dcc\n'), ((4711, 4748), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""delta-n2"""', 'children': '""" """'}), "(id='delta-n2', children=' ')\n", (4719, 4748), True, 'import dash_html_components as html\n'), ((4894, 4963), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""input-n2"""', 'value': '(0)', 'type': '"""text"""', 'size': '(5)', 'debounce': '(True)'}), "(id='input-n2', value=0, type='text', size=5, debounce=True)\n", (4903, 4963), True, 'import dash_core_components as dcc\n'), ((5149, 5186), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""factor-n2"""', 'children': '""""""'}), "(id='factor-n2', children='')\n", (5157, 5186), True, 'import dash_html_components as html\n'), ((5332, 5401), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""infac-n2"""', 'value': '(1)', 'type': '"""text"""', 'size': '(5)', 'debounce': '(True)'}), "(id='infac-n2', value=1, type='text', size=5, debounce=True)\n", (5341, 5401), True, 'import dash_core_components as dcc\n'), ((6323, 6384), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""drop--difference"""', 'value': '(0)', 'clearable': '(False)'}), "(id='drop--difference', value=0, clearable=False)\n", (6335, 6384), True, 'import dash_core_components as dcc\n'), ((6551, 6615), 'dash_html_components.Button', 'html.Button', ([], {'id': '"""submit-to-plots"""', 'n_clicks': '(0)', 'children': '"""Update"""'}), "(id='submit-to-plots', n_clicks=0, children='Update')\n", (6562, 6615), True, 'import dash_html_components as html\n'), ((6727, 6753), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""plot--state"""'}), "(id='plot--state')\n", (6735, 6753), True, 'import dash_html_components as html\n'), ((6870, 6894), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""NH--plot"""'}), "(id='NH--plot')\n", (6879, 6894), True, 'import dash_core_components as dcc\n'), ((6983, 7008), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""AIS--plot"""'}), "(id='AIS--plot')\n", (6992, 7008), True, 'import dash_core_components as dcc\n'), ((7586, 7641), 'os.path.isdir', 'os.path.isdir', (["(datapath + expnames[n - nnn] + '/output')"], {}), "(datapath + expnames[n - nnn] + '/output')\n", (7599, 7641), False, 'import os\n'), ((8021, 8076), 'os.path.isdir', 'os.path.isdir', (["(datapath + expnames[n - nnn] + '/output')"], {}), "(datapath + expnames[n - nnn] + '/output')\n", (8034, 8076), False, 'import os\n'), ((19229, 19241), 'numpy.abs', 'np.abs', (['hdif'], {}), '(hdif)\n', (19235, 19241), True, 'import numpy as np\n'), ((25016, 25028), 'numpy.abs', 'np.abs', (['hdif'], {}), '(hdif)\n', (25022, 25028), True, 'import numpy as np\n'), ((10297, 10339), 'numpy.loadtxt', 'np.loadtxt', (["(expname + '/output/icevol.dat')"], {}), "(expname + '/output/icevol.dat')\n", (10307, 10339), True, 'import numpy as np\n'), ((10672, 10714), 'numpy.loadtxt', 'np.loadtxt', (["(expname + '/output/icevol.dat')"], {}), "(expname + '/output/icevol.dat')\n", (10682, 10714), True, 'import numpy as np\n'), ((11571, 11631), 'os.path.isfile', 'os.path.isfile', (["(PNHPATH + chunk_dirs[n - nn] + '/fort.92.nc')"], {}), "(PNHPATH + chunk_dirs[n - nn] + '/fort.92.nc')\n", (11585, 11631), False, 'import os\n'), ((11929, 11989), 'os.path.isfile', 'os.path.isfile', (["(PNHPATH + chunk_dirs[n - nn] + '/fort.92.nc')"], {}), "(PNHPATH + chunk_dirs[n - nn] + '/fort.92.nc')\n", (11943, 11989), False, 'import os\n'), ((18165, 18225), 'os.path.isfile', 'os.path.isfile', (["(PNHPATH + chunk_dirs[n - nn] + '/fort.92.nc')"], {}), "(PNHPATH + chunk_dirs[n - nn] + '/fort.92.nc')\n", (18179, 18225), False, 'import os\n'), ((18792, 18852), 'os.path.isfile', 'os.path.isfile', (["(PNHPATH + chunk_dirs[n - nn] + '/fort.92.nc')"], {}), "(PNHPATH + chunk_dirs[n - nn] + '/fort.92.nc')\n", (18806, 18852), False, 'import os\n'), ((19256, 19268), 'numpy.abs', 'np.abs', (['hdif'], {}), '(hdif)\n', (19262, 19268), True, 'import numpy as np\n'), ((20535, 20595), 'os.path.isfile', 'os.path.isfile', (["(PNHPATH + chunk_dirs[n - nn] + '/fort.92.nc')"], {}), "(PNHPATH + chunk_dirs[n - nn] + '/fort.92.nc')\n", (20549, 20595), False, 'import os\n'), ((24024, 24084), 'os.path.isfile', 'os.path.isfile', (["(PNHPATH + chunk_dirs[n - nn] + '/fort.92.nc')"], {}), "(PNHPATH + chunk_dirs[n - nn] + '/fort.92.nc')\n", (24038, 24084), False, 'import os\n'), ((24579, 24639), 'os.path.isfile', 'os.path.isfile', (["(PNHPATH + chunk_dirs[n - nn] + '/fort.92.nc')"], {}), "(PNHPATH + chunk_dirs[n - nn] + '/fort.92.nc')\n", (24593, 24639), False, 'import os\n'), ((25043, 25055), 'numpy.abs', 'np.abs', (['hdif'], {}), '(hdif)\n', (25049, 25055), True, 'import numpy as np\n'), ((26208, 26268), 'os.path.isfile', 'os.path.isfile', (["(PNHPATH + chunk_dirs[n - nn] + '/fort.92.nc')"], {}), "(PNHPATH + chunk_dirs[n - nn] + '/fort.92.nc')\n", (26222, 26268), False, 'import os\n')] |
import sys
import gym
import numpy as np
from collections import defaultdict, deque
import matplotlib.pyplot as plt
#%matplotlib inline
import check_test
from plot_utils import plot_values
env = gym.make('CliffWalking-v0')
def epsilon_greedy(state, Q, epsilon, nA):
a_max = np.argmax(Q[state])
probabilities = np.zeros(nA)
probabilities[a_max] = 1 - epsilon
probabilities = probabilities + epsilon / nA
return probabilities
def get_action(probabilities):
action = np.random.choice(env.action_space.n, 1, p=probabilities)[0]
return action
def sarsa(env, num_episodes, alpha, gamma=1.0):
# initialize action-value function (empty dictionary of arrays)
Q = defaultdict(lambda: np.zeros(env.nA))
# initialize performance monitor
# loop over episodes
epsilon_init = 1.0
for i_episode in range(1, num_episodes + 1):
# monitor progress
epsilon = epsilon_init / i_episode
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
## TODO: complete the function
state = env.reset()
probabilities = epsilon_greedy(state, Q, epsilon, env.action_space.n)
action = get_action(probabilities)
while True:
next_state, reward, done, info = env.step(action)
probabilities = epsilon_greedy(next_state, Q, epsilon, env.action_space.n)
next_action = get_action(probabilities)
Q[state][action] = Q[state][action] + alpha * (
reward + gamma * Q[next_state][next_action] - Q[state][action])
state = next_state
action = next_action
if done:
break
return Q
# obtain the estimated optimal policy and corresponding action-value function
Q_sarsa = sarsa(env, 5000, .01)
# print the estimated optimal policy
policy_sarsa = np.array([np.argmax(Q_sarsa[key]) if key in Q_sarsa else -1 for key in np.arange(48)]).reshape(4,12)
check_test.run_check('td_control_check', policy_sarsa)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_sarsa)
# plot the estimated optimal state-value function
V_sarsa = ([np.max(Q_sarsa[key]) if key in Q_sarsa else 0 for key in np.arange(48)])
plot_values(V_sarsa) | [
"plot_utils.plot_values",
"check_test.run_check",
"gym.make",
"numpy.argmax",
"numpy.zeros",
"numpy.max",
"numpy.arange",
"sys.stdout.flush",
"numpy.random.choice"
] | [((197, 224), 'gym.make', 'gym.make', (['"""CliffWalking-v0"""'], {}), "('CliffWalking-v0')\n", (205, 224), False, 'import gym\n'), ((2025, 2079), 'check_test.run_check', 'check_test.run_check', (['"""td_control_check"""', 'policy_sarsa'], {}), "('td_control_check', policy_sarsa)\n", (2045, 2079), False, 'import check_test\n'), ((2323, 2343), 'plot_utils.plot_values', 'plot_values', (['V_sarsa'], {}), '(V_sarsa)\n', (2334, 2343), False, 'from plot_utils import plot_values\n'), ((282, 301), 'numpy.argmax', 'np.argmax', (['Q[state]'], {}), '(Q[state])\n', (291, 301), True, 'import numpy as np\n'), ((322, 334), 'numpy.zeros', 'np.zeros', (['nA'], {}), '(nA)\n', (330, 334), True, 'import numpy as np\n'), ((493, 549), 'numpy.random.choice', 'np.random.choice', (['env.action_space.n', '(1)'], {'p': 'probabilities'}), '(env.action_space.n, 1, p=probabilities)\n', (509, 549), True, 'import numpy as np\n'), ((2250, 2270), 'numpy.max', 'np.max', (['Q_sarsa[key]'], {}), '(Q_sarsa[key])\n', (2256, 2270), True, 'import numpy as np\n'), ((2307, 2320), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (2316, 2320), True, 'import numpy as np\n'), ((717, 733), 'numpy.zeros', 'np.zeros', (['env.nA'], {}), '(env.nA)\n', (725, 733), True, 'import numpy as np\n'), ((1061, 1079), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1077, 1079), False, 'import sys\n'), ((1934, 1957), 'numpy.argmax', 'np.argmax', (['Q_sarsa[key]'], {}), '(Q_sarsa[key])\n', (1943, 1957), True, 'import numpy as np\n'), ((1995, 2008), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (2004, 2008), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import warnings
warnings.filterwarnings("ignore")
# Open the image.
img = cv2.imread('../Images and Videos/flower.jpg')
cv2.imshow('image',img)
'''
log transformation : S = c*log(1+r)
where s : output intensity value
r : pixel value of the image
constant c : 255/log(1+m)
where m : maximum pixel value in the image
'''
def log_transformation(img):
m = np.max(img)
try:
c = int(255/np.log(1+m))
s = c*np.log(1+img)
except ZeroDivisionError:
print("Encounter zero division error")
lf = np.array(s,dtype=np.uint8)
return lf
print(img.shape)
log_transformed_image = log_transformation(img)
cv2.imshow('log transformed',log_transformed_image)
# Save the output.
cv2.imwrite('log_transformed.jpg', log_transformed_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"numpy.log",
"warnings.filterwarnings",
"cv2.waitKey",
"cv2.imwrite",
"cv2.destroyAllWindows",
"cv2.imread",
"numpy.max",
"numpy.array",
"cv2.imshow"
] | [((48, 81), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (71, 81), False, 'import warnings\n'), ((108, 153), 'cv2.imread', 'cv2.imread', (['"""../Images and Videos/flower.jpg"""'], {}), "('../Images and Videos/flower.jpg')\n", (118, 153), False, 'import cv2\n'), ((156, 180), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (166, 180), False, 'import cv2\n'), ((685, 737), 'cv2.imshow', 'cv2.imshow', (['"""log transformed"""', 'log_transformed_image'], {}), "('log transformed', log_transformed_image)\n", (695, 737), False, 'import cv2\n'), ((771, 828), 'cv2.imwrite', 'cv2.imwrite', (['"""log_transformed.jpg"""', 'log_transformed_image'], {}), "('log_transformed.jpg', log_transformed_image)\n", (782, 828), False, 'import cv2\n'), ((831, 845), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (842, 845), False, 'import cv2\n'), ((846, 869), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (867, 869), False, 'import cv2\n'), ((403, 414), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (409, 414), True, 'import numpy as np\n'), ((576, 603), 'numpy.array', 'np.array', (['s'], {'dtype': 'np.uint8'}), '(s, dtype=np.uint8)\n', (584, 603), True, 'import numpy as np\n'), ((471, 486), 'numpy.log', 'np.log', (['(1 + img)'], {}), '(1 + img)\n', (477, 486), True, 'import numpy as np\n'), ((444, 457), 'numpy.log', 'np.log', (['(1 + m)'], {}), '(1 + m)\n', (450, 457), True, 'import numpy as np\n')] |
"""
This file contains methods that display the training data in a reader-friendly manner
"""
import os
import pandas as pd
import json
import numpy as np
from termcolor import colored
from functional import pseq
from matplotlib import pyplot as plt
import build_pairs
dirname = os.path.dirname(__file__)
data_path = os.path.join(dirname, '../../resources/training_pairs/')
def pretty_print(file=None, sample_size=25):
"""
This method displays the sentence mappings from a training pairs csv file
:param file: string, name of the CSV file
"""
csv_file = os.listdir(data_path)[0] if file is None else file
mf = \
pd.read_csv(data_path + csv_file,
sep='\t',
header=None,
index_col=False,
names=['target_domain', 'source_domain', 'in_sent',
'out_sent', 'lemma_pos', 'word_type'])
# extract a sample
mf = mf.sample(sample_size)
for column in mf.iloc:
# remove characters, we only need the first two integers, the lemma position
l = column[4].replace('[', '').replace(']', '').replace('(', '').replace(')', '')
l = l.split(',')
i = int(l[0])
j = int(l[1])
lemma = colored(column[2][i:j], 'red')
replacement = colored(column[3][i:].split(' ')[0], 'green')
in_sen = '{}({} ->){}{}'.format(column[2][:i], lemma, replacement, column[2][j:])
print('[{}] {} IS {}: {}'.format(column[5], column[0], column[1], in_sen))
def vis_sentences_dist():
"""
This methods visualizes the distribution of example sentences per MetaNet target frame
MetaNet frames with zero examples sentences are **excluded** from the resulting
figure, for the sake of readability
"""
with open('resources/metanet_metaphors.json', 'r') as f:
metas = json.load(f)
# extract only the relevant frames that appear in metaphors as target frames
targets = list(pseq(metas)
.map(lambda m: m.get('target'))
.filter(lambda t: isinstance(t, str))
.to_set())
lens = (pseq(targets)
.map(build_pairs.get_sentences_for_frame)
.map(lambda x: x[0])
.map(len)
.filter(lambda x: x > 0)
.list())
mean = np.mean(lens)
plt.hist(lens, log=True, bins=20)
plt.axvline(mean, label='mean: {:.2f}'.format(mean), color='r', linestyle='dashed')
plt.xlabel('number of sentences')
plt.ylabel('number of frames')
plt.title('Distribution of sentences among MetaNet target frames')
plt.legend()
plt.savefig('sentences_per_MN_target_frame')
def vis_vocab_dist():
"""
This methods visualizes the distribution of vocabulary per MetaNet source frame
MetaNet frames with zero vocabulary options are **excluded** from the resulting
figure, for the sake of readability
"""
with open('resources/metanet_metaphors.json', 'r') as f:
metas = json.load(f)
# extract only the relevant frames that appear in metaphors as source frames
sources = list(pseq(metas)
.map(lambda m: m.get('source'))
.filter(lambda s: isinstance(s, str))
.to_set())
lens = (pseq(sources)
.map(build_pairs.get_vocabulary)
.map(len)
.filter(lambda x: x > 0)
.list())
mean = np.mean(lens)
plt.hist(lens, log=True, bins=20)
plt.axvline(mean, label='mean: {:.2f}'.format(mean), color='r', linestyle='dashed')
plt.xlabel('number of vocabulary options')
plt.ylabel('number of frames')
plt.title('Distribution of vocabulary options among MetaNet source frames')
plt.legend()
plt.savefig('vocabs_per_MN_source_frame')
| [
"matplotlib.pyplot.title",
"json.load",
"matplotlib.pyplot.hist",
"pandas.read_csv",
"os.path.dirname",
"matplotlib.pyplot.legend",
"termcolor.colored",
"numpy.mean",
"functional.pseq",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join",
"os.listdir",
"matplotlib.pyplot... | [((282, 307), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (297, 307), False, 'import os\n'), ((320, 376), 'os.path.join', 'os.path.join', (['dirname', '"""../../resources/training_pairs/"""'], {}), "(dirname, '../../resources/training_pairs/')\n", (332, 376), False, 'import os\n'), ((650, 822), 'pandas.read_csv', 'pd.read_csv', (['(data_path + csv_file)'], {'sep': '"""\t"""', 'header': 'None', 'index_col': '(False)', 'names': "['target_domain', 'source_domain', 'in_sent', 'out_sent', 'lemma_pos',\n 'word_type']"}), "(data_path + csv_file, sep='\\t', header=None, index_col=False,\n names=['target_domain', 'source_domain', 'in_sent', 'out_sent',\n 'lemma_pos', 'word_type'])\n", (661, 822), True, 'import pandas as pd\n'), ((1266, 1296), 'termcolor.colored', 'colored', (['column[2][i:j]', '"""red"""'], {}), "(column[2][i:j], 'red')\n", (1273, 1296), False, 'from termcolor import colored\n'), ((1875, 1887), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1884, 1887), False, 'import json\n'), ((2393, 2406), 'numpy.mean', 'np.mean', (['lens'], {}), '(lens)\n', (2400, 2406), True, 'import numpy as np\n'), ((2415, 2448), 'matplotlib.pyplot.hist', 'plt.hist', (['lens'], {'log': '(True)', 'bins': '(20)'}), '(lens, log=True, bins=20)\n', (2423, 2448), True, 'from matplotlib import pyplot as plt\n'), ((2549, 2582), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of sentences"""'], {}), "('number of sentences')\n", (2559, 2582), True, 'from matplotlib import pyplot as plt\n'), ((2591, 2621), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""number of frames"""'], {}), "('number of frames')\n", (2601, 2621), True, 'from matplotlib import pyplot as plt\n'), ((2630, 2696), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of sentences among MetaNet target frames"""'], {}), "('Distribution of sentences among MetaNet target frames')\n", (2639, 2696), True, 'from matplotlib import pyplot as plt\n'), ((2705, 2717), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2715, 2717), True, 'from matplotlib import pyplot as plt\n'), ((2726, 2770), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""sentences_per_MN_target_frame"""'], {}), "('sentences_per_MN_target_frame')\n", (2737, 2770), True, 'from matplotlib import pyplot as plt\n'), ((3097, 3109), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3106, 3109), False, 'import json\n'), ((3569, 3582), 'numpy.mean', 'np.mean', (['lens'], {}), '(lens)\n', (3576, 3582), True, 'import numpy as np\n'), ((3591, 3624), 'matplotlib.pyplot.hist', 'plt.hist', (['lens'], {'log': '(True)', 'bins': '(20)'}), '(lens, log=True, bins=20)\n', (3599, 3624), True, 'from matplotlib import pyplot as plt\n'), ((3725, 3767), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of vocabulary options"""'], {}), "('number of vocabulary options')\n", (3735, 3767), True, 'from matplotlib import pyplot as plt\n'), ((3776, 3806), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""number of frames"""'], {}), "('number of frames')\n", (3786, 3806), True, 'from matplotlib import pyplot as plt\n'), ((3815, 3890), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of vocabulary options among MetaNet source frames"""'], {}), "('Distribution of vocabulary options among MetaNet source frames')\n", (3824, 3890), True, 'from matplotlib import pyplot as plt\n'), ((3899, 3911), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3909, 3911), True, 'from matplotlib import pyplot as plt\n'), ((3920, 3961), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""vocabs_per_MN_source_frame"""'], {}), "('vocabs_per_MN_source_frame')\n", (3931, 3961), True, 'from matplotlib import pyplot as plt\n'), ((580, 601), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (590, 601), False, 'import os\n'), ((1997, 2008), 'functional.pseq', 'pseq', (['metas'], {}), '(metas)\n', (2001, 2008), False, 'from functional import pseq\n'), ((3219, 3230), 'functional.pseq', 'pseq', (['metas'], {}), '(metas)\n', (3223, 3230), False, 'from functional import pseq\n'), ((3398, 3411), 'functional.pseq', 'pseq', (['sources'], {}), '(sources)\n', (3402, 3411), False, 'from functional import pseq\n'), ((2176, 2189), 'functional.pseq', 'pseq', (['targets'], {}), '(targets)\n', (2180, 2189), False, 'from functional import pseq\n')] |
import math
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import KFold
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import AdaBoostRegressor
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
def removeColumn(ds,name): # to remove a cloumn given by parameter and return new dataframe, removed column list
dropedColumn = list(ds[name])
ds = ds.drop([name], axis=1)
return ds, dropedColumn
def dataTransforming():
dataset = pd.read_csv('cleandataset.csv')
dataset, ids = removeColumn(dataset,'İlan no')
y = dataset['Fiyat']
X = dataset.drop('Fiyat', axis=1)
X = pd.get_dummies(X, columns = ["Yapının Durumu", "Eşya Durumu", "Isınma Tipi"])
X = pd.get_dummies(X, columns = ["Semt"])
columns = X.columns
columns = list(columns)
scaler1 = StandardScaler()
scaler1.fit(X)
X = scaler1.transform(X)
X = pd.DataFrame(X)
for col, realcol in zip(X.columns, columns) :
X = X.rename(columns={col: realcol})
return X, y, ids
def trainModel(model, X, y):
errors = []
kf = KFold(n_splits=10, shuffle = True, random_state = 8)
for train_index, test_index in kf.split(X):
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
MAE = mean_absolute_error(y_test, y_pred)
errors.append(MAE)
return errors
def getResults(X,y):
models_reg = [LinearRegression(), Lasso(), Ridge(), KNeighborsRegressor(), DecisionTreeRegressor(),
GradientBoostingRegressor(warm_start='True',n_estimators=50), RandomForestRegressor(warm_start='True'), AdaBoostRegressor(n_estimators=50)]
for model in models_reg:
errors = trainModel(model, X ,y)
print(type(model).__name__)
print("%.2f\n" %np.mean(errors))
#print(model)
# VisualizePrediction(model, X, y)
VisualizeCoefficient(model, X)
def VisualizeCoefficient(model, X):
try:
coefs = pd.Series(model.coef_, index = X.columns)
print(type(model).__name__ , str(sum(coefs != 0)) , " features and eliminated the other " + str(sum(coefs == 0)) + " features")
imp_coefs = pd.concat([coefs.sort_values().head(12),coefs.sort_values().tail(13)])
imp_coefs.plot(kind = "barh")
title = 'Coefficients in the ' + type(model).__name__
plt.title(title)
plt.show()
except:
print('there is no coefficient')
try:
coefs = pd.Series(model.feature_importances_, index = X.columns)
imp_coefs = pd.concat([coefs.sort_values().head(12),coefs.sort_values().tail(13)])
imp_coefs.plot(kind = "barh")
title = 'Importance in the ' + type(model).__name__
plt.title(title)
plt.show()
except:
print('there is no importance')
def VisualizePrediction(model, X, y):
# print(type(model).__name__)
predicted = cross_val_predict(model, X, y, cv=10)
fig, ax = plt.subplots()
ax.scatter(y, predicted, edgecolors=(0, 0, 0))
ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4)
ax.set_xlabel('Real')
ax.set_ylabel('Predicted')
title = type(model).__name__
plt.title(title)
plt.show()
def VisualizeCorrelationsAll(X):
corr = X.corr()
sns.heatmap(corr,cmap="YlGnBu")
plt.show()
def VisualizeCorrelationsOnebyOne(X,y):
for col in X.columns:
plt.scatter(X[col], y, c= 'red')
plt.title("Outliers")
plt.xlabel(col)
plt.ylabel("Fiyat")
plt.show()
def showPricePLot(y):
sns.distplot(np.log(y))
plt.show()
def showLogPredictions(X,y,links):
predicted = cross_val_predict(RandomForestRegressor(warm_start='True'), X, np.log(y), cv=10)
lst = []
for i in predicted:
lst.append(round(math.exp(i)))
d = {'real':list(y), 'predicted':lst}
pred = pd.DataFrame(data=d)
pred['fark'] = pred['real'] - pred['predicted']
pred['linkler'] = links
#print(pred)
pred.to_csv('sonuclar_log.csv',index=False)
def showPredictions(X,y,links):
predicted = cross_val_predict(RandomForestRegressor(warm_start='True'), X, y, cv=10)
lst = []
for i in predicted:
lst.append(int(round(i)))
d = {'real':list(y), 'predicted':lst}
pred = pd.DataFrame(data=d)
pred['fark'] = pred['real'] - pred['predicted']
pred['linkler'] = links
#print(pred)
pred.to_csv('sonuclar.csv',index=False)
x, y, ids = dataTransforming()
getResults(x,y)
# VisualizeCorrelationsOnebyOne(x,y)
# VisualizeCorrelationsAll(x)
| [
"matplotlib.pyplot.title",
"sklearn.preprocessing.StandardScaler",
"seaborn.heatmap",
"pandas.read_csv",
"sklearn.ensemble.GradientBoostingRegressor",
"sklearn.metrics.mean_absolute_error",
"numpy.mean",
"pandas.DataFrame",
"sklearn.tree.DecisionTreeRegressor",
"matplotlib.pyplot.subplots",
"skl... | [((692, 725), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (715, 725), False, 'import warnings\n'), ((975, 1006), 'pandas.read_csv', 'pd.read_csv', (['"""cleandataset.csv"""'], {}), "('cleandataset.csv')\n", (986, 1006), True, 'import pandas as pd\n'), ((1131, 1206), 'pandas.get_dummies', 'pd.get_dummies', (['X'], {'columns': "['Yapının Durumu', 'Eşya Durumu', 'Isınma Tipi']"}), "(X, columns=['Yapının Durumu', 'Eşya Durumu', 'Isınma Tipi'])\n", (1145, 1206), True, 'import pandas as pd\n'), ((1217, 1252), 'pandas.get_dummies', 'pd.get_dummies', (['X'], {'columns': "['Semt']"}), "(X, columns=['Semt'])\n", (1231, 1252), True, 'import pandas as pd\n'), ((1327, 1343), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1341, 1343), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1410, 1425), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (1422, 1425), True, 'import pandas as pd\n'), ((1598, 1646), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)', 'shuffle': '(True)', 'random_state': '(8)'}), '(n_splits=10, shuffle=True, random_state=8)\n', (1603, 1646), False, 'from sklearn.model_selection import KFold\n'), ((3533, 3570), 'sklearn.model_selection.cross_val_predict', 'cross_val_predict', (['model', 'X', 'y'], {'cv': '(10)'}), '(model, X, y, cv=10)\n', (3550, 3570), False, 'from sklearn.model_selection import cross_val_predict\n'), ((3585, 3599), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3597, 3599), True, 'import matplotlib.pyplot as plt\n'), ((3811, 3827), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3820, 3827), True, 'import matplotlib.pyplot as plt\n'), ((3832, 3842), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3840, 3842), True, 'import matplotlib.pyplot as plt\n'), ((3907, 3939), 'seaborn.heatmap', 'sns.heatmap', (['corr'], {'cmap': '"""YlGnBu"""'}), "(corr, cmap='YlGnBu')\n", (3918, 3939), True, 'import seaborn as sns\n'), ((3943, 3953), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3951, 3953), True, 'import matplotlib.pyplot as plt\n'), ((4234, 4244), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4242, 4244), True, 'import matplotlib.pyplot as plt\n'), ((4507, 4527), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'd'}), '(data=d)\n', (4519, 4527), True, 'import pandas as pd\n'), ((4919, 4939), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'd'}), '(data=d)\n', (4931, 4939), True, 'import pandas as pd\n'), ((1923, 1958), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1942, 1958), False, 'from sklearn.metrics import mean_absolute_error\n'), ((2045, 2063), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2061, 2063), False, 'from sklearn.linear_model import LinearRegression\n'), ((2065, 2072), 'sklearn.linear_model.Lasso', 'Lasso', ([], {}), '()\n', (2070, 2072), False, 'from sklearn.linear_model import Lasso\n'), ((2074, 2081), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (2079, 2081), False, 'from sklearn.linear_model import Ridge\n'), ((2083, 2104), 'sklearn.neighbors.KNeighborsRegressor', 'KNeighborsRegressor', ([], {}), '()\n', (2102, 2104), False, 'from sklearn.neighbors import KNeighborsRegressor\n'), ((2106, 2129), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (2127, 2129), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((2141, 2202), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {'warm_start': '"""True"""', 'n_estimators': '(50)'}), "(warm_start='True', n_estimators=50)\n", (2166, 2202), False, 'from sklearn.ensemble import GradientBoostingRegressor\n'), ((2203, 2243), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'warm_start': '"""True"""'}), "(warm_start='True')\n", (2224, 2243), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((2245, 2279), 'sklearn.ensemble.AdaBoostRegressor', 'AdaBoostRegressor', ([], {'n_estimators': '(50)'}), '(n_estimators=50)\n', (2262, 2279), False, 'from sklearn.ensemble import AdaBoostRegressor\n'), ((2595, 2634), 'pandas.Series', 'pd.Series', (['model.coef_'], {'index': 'X.columns'}), '(model.coef_, index=X.columns)\n', (2604, 2634), True, 'import pandas as pd\n'), ((2974, 2990), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2983, 2990), True, 'import matplotlib.pyplot as plt\n'), ((2999, 3009), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3007, 3009), True, 'import matplotlib.pyplot as plt\n'), ((3093, 3147), 'pandas.Series', 'pd.Series', (['model.feature_importances_'], {'index': 'X.columns'}), '(model.feature_importances_, index=X.columns)\n', (3102, 3147), True, 'import pandas as pd\n'), ((3348, 3364), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3357, 3364), True, 'import matplotlib.pyplot as plt\n'), ((3373, 3383), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3381, 3383), True, 'import matplotlib.pyplot as plt\n'), ((4045, 4076), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[col]', 'y'], {'c': '"""red"""'}), "(X[col], y, c='red')\n", (4056, 4076), True, 'import matplotlib.pyplot as plt\n'), ((4086, 4107), 'matplotlib.pyplot.title', 'plt.title', (['"""Outliers"""'], {}), "('Outliers')\n", (4095, 4107), True, 'import matplotlib.pyplot as plt\n'), ((4116, 4131), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['col'], {}), '(col)\n', (4126, 4131), True, 'import matplotlib.pyplot as plt\n'), ((4140, 4159), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fiyat"""'], {}), "('Fiyat')\n", (4150, 4159), True, 'import matplotlib.pyplot as plt\n'), ((4168, 4178), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4176, 4178), True, 'import matplotlib.pyplot as plt\n'), ((4219, 4228), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (4225, 4228), True, 'import numpy as np\n'), ((4315, 4355), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'warm_start': '"""True"""'}), "(warm_start='True')\n", (4336, 4355), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((4360, 4369), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (4366, 4369), True, 'import numpy as np\n'), ((4740, 4780), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'warm_start': '"""True"""'}), "(warm_start='True')\n", (4761, 4780), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((2412, 2427), 'numpy.mean', 'np.mean', (['errors'], {}), '(errors)\n', (2419, 2427), True, 'import numpy as np\n'), ((4440, 4451), 'math.exp', 'math.exp', (['i'], {}), '(i)\n', (4448, 4451), False, 'import math\n')] |
# -*- coding: utf-8 -*-
"""Template-based prediction
================================================================================
In this tutorial, we show how to better predict new contrasts for a target
subject using many source subjects corresponding contrasts. For this purpose,
we create a template to which we align the target subject, using shared information.
We then predict new images for the target and compare them to a baseline.
We mostly rely on Python common packages and on nilearn to handle
functional data in a clean fashion.
To run this example, you must launch IPython via ``ipython
--matplotlib`` in a terminal, or use ``jupyter-notebook``.
.. contents:: **Contents**
:local:
:depth: 1
"""
###############################################################################
# Retrieve the data
# -----------------
# In this example we use the IBC dataset, which includes a large number of
# different contrasts maps for 12 subjects.
# We download the images for subjects sub-01, sub-02, sub-04, sub-05, sub-06
# and sub-07 (or retrieve them if they were already downloaded).
# imgs is the list of paths to available statistical images for each subjects.
# df is a dataframe with metadata about each of them.
# mask is a binary image used to extract grey matter regions.
#
from fmralign.fetch_example_data import fetch_ibc_subjects_contrasts
imgs, df, mask_img = fetch_ibc_subjects_contrasts(
['sub-01', 'sub-02', 'sub-04', 'sub-05', 'sub-06', 'sub-07'])
###############################################################################
# Definine a masker
# -----------------
# We define a nilearn masker that will be used to handle relevant data.
# For more information, visit :
# 'http://nilearn.github.io/manipulating_images/masker_objects.html'
#
from nilearn.input_data import NiftiMasker
masker = NiftiMasker(mask_img=mask_img).fit()
###############################################################################
# Prepare the data
# ----------------
# For each subject, we will use two series of contrasts acquired during
# two independent sessions with a different phase encoding:
# Antero-posterior(AP) or Postero-anterior(PA).
#
# To infer a template for subjects sub-01 to sub-06 for both AP and PA data,
# we make a list of 4D niimgs from our list of list of files containing 3D images
from nilearn.image import concat_imgs
template_train = []
for i in range(5):
template_train.append(concat_imgs(imgs[i]))
target_train = df[df.subject == 'sub-07'][df.acquisition == 'ap'].path.values
# For subject sub-07, we split it in two folds:
# - target train: sub-07 AP contrasts, used to learn alignment to template
# - target test: sub-07 PA contrasts, used as a ground truth to score predictions
# We make a single 4D Niimg from our list of 3D filenames
target_train = concat_imgs(target_train)
target_test = df[df.subject == 'sub-07'][df.acquisition == 'pa'].path.values
###############################################################################
# Compute a baseline (average of subjects)
# ----------------------------------------
# We create an image with as many contrasts as any subject representing for
# each contrast the average of all train subjects maps.
#
import numpy as np
masked_imgs = [masker.transform(img) for img in template_train]
average_img = np.mean(masked_imgs, axis=0)
average_subject = masker.inverse_transform(average_img)
###############################################################################
# Create a template from the training subjects.
# ---------------------------------------------
# We define an estimator using the class TemplateAlignment:
# * We align the whole brain through 'multiple' local alignments.
# * These alignments are calculated on a parcellation of the brain in 150 pieces,
# this parcellation creates group of functionnally similar voxels.
# * The template is created iteratively, aligning all subjects data into a
# common space, from which the template is inferred and aligning again to this
# new template space.
#
from fmralign.template_alignment import TemplateAlignment
from nilearn.image import index_img
template_estim = TemplateAlignment(
n_pieces=150, alignment_method='ridge_cv', mask=masker)
template_estim.fit(template_train)
###############################################################################
# Predict new data for left-out subject
# -------------------------------------
# We use target_train data to fit the transform, indicating it corresponds to
# the contrasts indexed by train_index and predict from this learnt alignment
# contrasts corresponding to template test_index numbers.
# For each train subject and for the template, the AP contrasts are sorted from
# 0, to 53, and then the PA contrasts from 53 to 106.
#
train_index = range(53)
test_index = range(53, 106)
# We input the mapping image target_train in a list, we could have input more
# than one subject for which we'd want to predict : [train_1, train_2 ...]
prediction_from_template = template_estim.transform([target_train], train_index,
test_index)
# As a baseline prediction, let's just take the average of activations across subjects.
prediction_from_average = index_img(average_subject, test_index)
###############################################################################
# Score the baseline and the prediction
# -------------------------------------
# We use a utility scoring function to measure the voxelwise correlation
# between the prediction and the ground truth. That is, for each voxel, we
# measure the correlation between its profile of activation without and with
# alignment, to see if alignment was able to predict a signal more alike the ground truth.
#
from fmralign._utils import voxelwise_correlation
# Now we use this scoring function to compare the correlation of predictions
# made from group average and from template with the real PA contrasts of sub-07
average_score = voxelwise_correlation(
target_test, prediction_from_average, masker)
template_score = voxelwise_correlation(
target_test, prediction_from_template[0], masker)
###############################################################################
# Plotting the measures
# ---------------------
# Finally we plot both scores
#
from nilearn import plotting
baseline_display = plotting.plot_stat_map(
average_score, display_mode="z", vmax=1, cut_coords=[-15, -5])
baseline_display.title(
"Group average correlation wt ground truth")
display = plotting.plot_stat_map(
template_score, display_mode="z", cut_coords=[-15, -5], vmax=1)
display.title(
"Template-based prediction correlation wt ground truth")
###############################################################################
# We observe that creating a template and aligning a new subject to it yields
# a prediction that is better correlated with the ground truth than just using
# the average activations of subjects.
#
| [
"nilearn.image.concat_imgs",
"nilearn.image.index_img",
"nilearn.plotting.plot_stat_map",
"fmralign.template_alignment.TemplateAlignment",
"fmralign._utils.voxelwise_correlation",
"numpy.mean",
"nilearn.input_data.NiftiMasker",
"fmralign.fetch_example_data.fetch_ibc_subjects_contrasts"
] | [((1399, 1493), 'fmralign.fetch_example_data.fetch_ibc_subjects_contrasts', 'fetch_ibc_subjects_contrasts', (["['sub-01', 'sub-02', 'sub-04', 'sub-05', 'sub-06', 'sub-07']"], {}), "(['sub-01', 'sub-02', 'sub-04', 'sub-05',\n 'sub-06', 'sub-07'])\n", (1427, 1493), False, 'from fmralign.fetch_example_data import fetch_ibc_subjects_contrasts\n'), ((2835, 2860), 'nilearn.image.concat_imgs', 'concat_imgs', (['target_train'], {}), '(target_train)\n', (2846, 2860), False, 'from nilearn.image import concat_imgs\n'), ((3338, 3366), 'numpy.mean', 'np.mean', (['masked_imgs'], {'axis': '(0)'}), '(masked_imgs, axis=0)\n', (3345, 3366), True, 'import numpy as np\n'), ((4184, 4257), 'fmralign.template_alignment.TemplateAlignment', 'TemplateAlignment', ([], {'n_pieces': '(150)', 'alignment_method': '"""ridge_cv"""', 'mask': 'masker'}), "(n_pieces=150, alignment_method='ridge_cv', mask=masker)\n", (4201, 4257), False, 'from fmralign.template_alignment import TemplateAlignment\n'), ((5278, 5316), 'nilearn.image.index_img', 'index_img', (['average_subject', 'test_index'], {}), '(average_subject, test_index)\n', (5287, 5316), False, 'from nilearn.image import index_img\n'), ((6023, 6090), 'fmralign._utils.voxelwise_correlation', 'voxelwise_correlation', (['target_test', 'prediction_from_average', 'masker'], {}), '(target_test, prediction_from_average, masker)\n', (6044, 6090), False, 'from fmralign._utils import voxelwise_correlation\n'), ((6113, 6184), 'fmralign._utils.voxelwise_correlation', 'voxelwise_correlation', (['target_test', 'prediction_from_template[0]', 'masker'], {}), '(target_test, prediction_from_template[0], masker)\n', (6134, 6184), False, 'from fmralign._utils import voxelwise_correlation\n'), ((6400, 6490), 'nilearn.plotting.plot_stat_map', 'plotting.plot_stat_map', (['average_score'], {'display_mode': '"""z"""', 'vmax': '(1)', 'cut_coords': '[-15, -5]'}), "(average_score, display_mode='z', vmax=1, cut_coords=\n [-15, -5])\n", (6422, 6490), False, 'from nilearn import plotting\n'), ((6574, 6665), 'nilearn.plotting.plot_stat_map', 'plotting.plot_stat_map', (['template_score'], {'display_mode': '"""z"""', 'cut_coords': '[-15, -5]', 'vmax': '(1)'}), "(template_score, display_mode='z', cut_coords=[-15, -\n 5], vmax=1)\n", (6596, 6665), False, 'from nilearn import plotting\n'), ((1848, 1878), 'nilearn.input_data.NiftiMasker', 'NiftiMasker', ([], {'mask_img': 'mask_img'}), '(mask_img=mask_img)\n', (1859, 1878), False, 'from nilearn.input_data import NiftiMasker\n'), ((2451, 2471), 'nilearn.image.concat_imgs', 'concat_imgs', (['imgs[i]'], {}), '(imgs[i])\n', (2462, 2471), False, 'from nilearn.image import concat_imgs\n')] |
from __future__ import print_function
import sys
import getopt
import rl_env
import numpy as np
from rulebased_agent import RulebasedAgent
from agents.random_agent import RandomAgent
from agents.simple_agent import SimpleAgent
from internal_agent import InternalAgent
from outer_agent import OuterAgent
from iggi_agent import IGGIAgent
from legal_random_agent import LegalRandomAgent
from flawed_agent import FlawedAgent
from piers_agent import PiersAgent
from van_den_bergh_agent import VanDenBerghAgent
import run_paired_experiment
from rainbow_agent import RainbowAgent
class BehavioralRunner(object):
"""Runner class."""
def __init__(self, flags, a1, a2, lenient=False):
"""Initialize runner."""
self.flags = flags
self.agent_config = {'players': flags['players']}
self.environment = rl_env.make('Hanabi-Full', num_players=flags['players'])
self.a1 = a1
self.a2 = a2
self.lenient = lenient
def run(self):
"""Run episodes."""
obs_stacker = run_paired_experiment.create_obs_stacker(self.environment)
num_episodes = self.flags['num_episodes']
hints_given = np.zeros(self.environment.players)
hints_possible = np.zeros(self.environment.players)
total_information_plays = np.zeros(self.environment.players)
num_plays = np.zeros(self.environment.players)
points_scored = np.zeros(self.environment.players)
mistakes_made = np.zeros(self.environment.players)
total_bombed = 0
rewards = []
for episode in range(num_episodes):
episode_length, episode_return, lr, sr, num_bombed, hg, hp, tip, num_p, ps, mm = run_paired_experiment.run_episode_behavioral(self.a1, self.a2, self.lenient, self.environment, obs_stacker)
rewards.append(episode_return)
hints_given += hg
hints_possible += hp
total_information_plays += tip
num_plays += num_p
points_scored += ps
mistakes_made += mm
total_bombed += num_bombed
return rewards, hints_given/hints_possible, total_information_plays/num_plays, points_scored/num_episodes, mistakes_made/num_episodes, float(total_bombed)/float(num_episodes)
# # agents = [self.agent_class(self.agent_config)
# # for _ in range(self.flags['players'])]
# # agents = [a1,a2]
# reward_since_last_action = np.zeros(self.environment.players)
# # if np.random.uniform() <= 0.5:
# # agents = [self.a1,self.a2]
# # else:
# # agents = [self.a2,self.a1]
# agents = [self.a1,self.a2]
# obs_stacker.reset_stack()
# observations = self.environment.reset()
# done = False
# episode_reward = 0
# first_turn = True
# while not done:
# for agent_id, agent in enumerate(agents):
# observation = observations['player_observations'][agent_id]
# if first_turn == True:
# # print(first_turn)
# # print(observation['current_player'])
# first_turn = False
# if isinstance(agent,RainbowAgent):
# # print(observation)
# # print(observation['vectorized'])
# # print(len(observation['vectorized']))
# # print(observation['legal_moves_as_int'])
# # print(reward_since_last_action)
# # print(observation['current_player'])
# # legal_moves = np.ones(20)
# # for m in observation['legal_moves_as_int']:
# # legal_moves[m]=0
# # print(legal_moves)
# current_player, legal_moves, observation_vector = ( run_paired_experiment.parse_observations(observations, self.environment.num_moves(), obs_stacker))
# action = int(agent._select_action(observation['vectorized'],legal_moves))
# else:
# action = agent.act(observation)
# print('=-=-=-=-=--=-=')
# print('other player made action ' + str(action))
# print('=-=-=-=-=--=-=')
# if observation['current_player'] == agent_id:
# assert action is not None
# current_player_action = action
# else:
# assert action is None
# # Make an environment step.
# # # print('Agent: {} action: {}'.format(observation['current_player'],
# # current_player_action))
# observations, reward, done, unused_info = self.environment.step(
# current_player_action)
# if (reward >=0 or not self.lenient):
# episode_reward += reward
# rewards.append(episode_reward)
# # print('Running episode: %d' % episode)
# # print('Reward of this episode: %d' % episode_reward)
# # print('Max Reward: %.3f' % max(rewards))
# # print('Average Reward: %.3f' % (sum(rewards)/(episode+1)))
# # for a in agents:
# # a.rulebased.print_histogram()
if __name__ == "__main__":
flags = {'players': 2, 'num_episodes': 1, 'agent_class': 'SimpleAgent'}
options, arguments = getopt.getopt(sys.argv[1:], '',
['players=',
'num_episodes=',
'agent_class='])
if arguments:
sys.exit('usage: rl_env_example.py [options]\n'
'--players number of players in the game.\n'
'--num_episodes number of game episodes to run.\n'
'--agent_class {}'.format(' or '.join(AGENT_CLASSES.keys())))
for flag, value in options:
flag = flag[2:] # Strip leading --.
flags[flag] = type(flags[flag])(value)
results = []
for name1 in AGENT_CLASSES:
for name2 in AGENT_CLASSES:
runner = Runner(flags, name1, name2)
reward = np.average(runner.run())
results.append([name1, name2, reward])
for r in results:
print(r)
print(len(results)) | [
"getopt.getopt",
"numpy.zeros",
"run_paired_experiment.run_episode_behavioral",
"rl_env.make",
"run_paired_experiment.create_obs_stacker"
] | [((4980, 5058), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '""""""', "['players=', 'num_episodes=', 'agent_class=']"], {}), "(sys.argv[1:], '', ['players=', 'num_episodes=', 'agent_class='])\n", (4993, 5058), False, 'import getopt\n'), ((814, 870), 'rl_env.make', 'rl_env.make', (['"""Hanabi-Full"""'], {'num_players': "flags['players']"}), "('Hanabi-Full', num_players=flags['players'])\n", (825, 870), False, 'import rl_env\n'), ((992, 1050), 'run_paired_experiment.create_obs_stacker', 'run_paired_experiment.create_obs_stacker', (['self.environment'], {}), '(self.environment)\n', (1032, 1050), False, 'import run_paired_experiment\n'), ((1118, 1152), 'numpy.zeros', 'np.zeros', (['self.environment.players'], {}), '(self.environment.players)\n', (1126, 1152), True, 'import numpy as np\n'), ((1174, 1208), 'numpy.zeros', 'np.zeros', (['self.environment.players'], {}), '(self.environment.players)\n', (1182, 1208), True, 'import numpy as np\n'), ((1239, 1273), 'numpy.zeros', 'np.zeros', (['self.environment.players'], {}), '(self.environment.players)\n', (1247, 1273), True, 'import numpy as np\n'), ((1290, 1324), 'numpy.zeros', 'np.zeros', (['self.environment.players'], {}), '(self.environment.players)\n', (1298, 1324), True, 'import numpy as np\n'), ((1345, 1379), 'numpy.zeros', 'np.zeros', (['self.environment.players'], {}), '(self.environment.players)\n', (1353, 1379), True, 'import numpy as np\n'), ((1400, 1434), 'numpy.zeros', 'np.zeros', (['self.environment.players'], {}), '(self.environment.players)\n', (1408, 1434), True, 'import numpy as np\n'), ((1601, 1712), 'run_paired_experiment.run_episode_behavioral', 'run_paired_experiment.run_episode_behavioral', (['self.a1', 'self.a2', 'self.lenient', 'self.environment', 'obs_stacker'], {}), '(self.a1, self.a2, self.lenient,\n self.environment, obs_stacker)\n', (1645, 1712), False, 'import run_paired_experiment\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 11 19:20:27 2018
@author: <NAME>
@email: <EMAIL>
"""
import json
import os
import numpy
import pandas
import unittest
from research_engine.engine import ResearchEngine
from scipy.sparse.csr import csr_matrix
from sklearn.feature_extraction.text import TfidfVectorizer
class TestResearchEngine(unittest.TestCase):
def setUp(self):
self.example_list = [{'A':'unbelievably not a','B':'kung fu','C':'pandas'},{'A':'tRoubles','B':'liTTle Chinatown','C':'Town'}]
self.json_descriptor = 'descriptor.json'
self.expected_frame = pandas.DataFrame.from_records(self.example_list)
with open(self.json_descriptor, 'w') as fp:
json.dump(self.example_list, fp)
def tearDown(self):
os.remove(self.json_descriptor)
def test_init(self):
engine = ResearchEngine('Pluto')
self.assertEqual(engine.json_descriptor, 'Pluto')
self.assertEqual(None, engine.vectorizers)
self.assertEqual(None, engine.inverse_indexes)
def test_populate_data_frame(self):
engine = ResearchEngine(self.json_descriptor)
engine.populate_data_frame()
pandas.testing.assert_frame_equal(self.expected_frame, engine.dataframe)
def test_preprocess_corpus(self):
engine = ResearchEngine(self.json_descriptor)
engine.populate_data_frame()
test_series=engine._preprocess_corpus('A')
pandas.testing.assert_series_equal(pandas.Series(['unbeliev', 'troubl'], name='A'), test_series)
test_series=engine._preprocess_corpus('C')
pandas.testing.assert_series_equal(pandas.Series(['panda', 'town'], name='C'), test_series)
def test_preprocess_query(self):
engine = ResearchEngine(self.json_descriptor)
engine.populate_data_frame()
query = 'I am a magical cat!'
preprocessed_query = engine._preprocess_query(query)
self.assertEqual(preprocessed_query, 'magic cat')
def test_create_TFIDF_indexes(self):
engine = ResearchEngine(self.json_descriptor)
engine.populate_data_frame()
keys=['A', 'C']
engine.create_TFIDF_indexes(keys)
test_arrays = [numpy.array([[0., 1.],[1., 0.]]), numpy.array([[1., 0.],[0., 1.]])]
for index in range(len(keys)):
self.assertTrue(isinstance(engine.vectorizers[index], TfidfVectorizer))
self.assertTrue(isinstance(engine.inverse_indexes[index], csr_matrix))
numpy.testing.assert_array_equal(engine.inverse_indexes[index].todense(), test_arrays[index])
def test_research_and_rank(self):
engine = ResearchEngine(self.json_descriptor)
engine.populate_data_frame()
keys=['A', 'C']
engine.create_TFIDF_indexes(keys)
result = engine.research_and_rank('panda', 1)
pandas.testing.assert_series_equal(self.expected_frame.iloc[[0]].iloc[0], result.iloc[0])
keys=['B']
engine.create_TFIDF_indexes(keys)
result = engine.research_and_rank('little', 1)
pandas.testing.assert_series_equal(self.expected_frame.iloc[[1]].iloc[0], result.iloc[0])
def test_sanity_check(self):
engine = ResearchEngine(self.json_descriptor)
engine.populate_data_frame()
self.assertCountEqual(engine._sanity_check(['B']),['B'])
self.assertCountEqual(engine._sanity_check(['A', 'B']),['A', 'B'])
self.assertCountEqual(engine._sanity_check(['B', 'A', 'C']), ['B', 'A', 'C'])
self.assertCountEqual(engine._sanity_check(['A', 'B', 'D']), ['A', 'B'])
self.assertCountEqual(engine._sanity_check([]), ['A', 'B', 'C']) | [
"json.dump",
"pandas.testing.assert_frame_equal",
"os.remove",
"research_engine.engine.ResearchEngine",
"numpy.array",
"pandas.Series",
"pandas.DataFrame.from_records",
"pandas.testing.assert_series_equal"
] | [((624, 672), 'pandas.DataFrame.from_records', 'pandas.DataFrame.from_records', (['self.example_list'], {}), '(self.example_list)\n', (653, 672), False, 'import pandas\n'), ((803, 834), 'os.remove', 'os.remove', (['self.json_descriptor'], {}), '(self.json_descriptor)\n', (812, 834), False, 'import os\n'), ((878, 901), 'research_engine.engine.ResearchEngine', 'ResearchEngine', (['"""Pluto"""'], {}), "('Pluto')\n", (892, 901), False, 'from research_engine.engine import ResearchEngine\n'), ((1132, 1168), 'research_engine.engine.ResearchEngine', 'ResearchEngine', (['self.json_descriptor'], {}), '(self.json_descriptor)\n', (1146, 1168), False, 'from research_engine.engine import ResearchEngine\n'), ((1214, 1286), 'pandas.testing.assert_frame_equal', 'pandas.testing.assert_frame_equal', (['self.expected_frame', 'engine.dataframe'], {}), '(self.expected_frame, engine.dataframe)\n', (1247, 1286), False, 'import pandas\n'), ((1351, 1387), 'research_engine.engine.ResearchEngine', 'ResearchEngine', (['self.json_descriptor'], {}), '(self.json_descriptor)\n', (1365, 1387), False, 'from research_engine.engine import ResearchEngine\n'), ((1805, 1841), 'research_engine.engine.ResearchEngine', 'ResearchEngine', (['self.json_descriptor'], {}), '(self.json_descriptor)\n', (1819, 1841), False, 'from research_engine.engine import ResearchEngine\n'), ((2104, 2140), 'research_engine.engine.ResearchEngine', 'ResearchEngine', (['self.json_descriptor'], {}), '(self.json_descriptor)\n', (2118, 2140), False, 'from research_engine.engine import ResearchEngine\n'), ((2741, 2777), 'research_engine.engine.ResearchEngine', 'ResearchEngine', (['self.json_descriptor'], {}), '(self.json_descriptor)\n', (2755, 2777), False, 'from research_engine.engine import ResearchEngine\n'), ((2943, 3036), 'pandas.testing.assert_series_equal', 'pandas.testing.assert_series_equal', (['self.expected_frame.iloc[[0]].iloc[0]', 'result.iloc[0]'], {}), '(self.expected_frame.iloc[[0]].iloc[0],\n result.iloc[0])\n', (2977, 3036), False, 'import pandas\n'), ((3166, 3259), 'pandas.testing.assert_series_equal', 'pandas.testing.assert_series_equal', (['self.expected_frame.iloc[[1]].iloc[0]', 'result.iloc[0]'], {}), '(self.expected_frame.iloc[[1]].iloc[0],\n result.iloc[0])\n', (3200, 3259), False, 'import pandas\n'), ((3307, 3343), 'research_engine.engine.ResearchEngine', 'ResearchEngine', (['self.json_descriptor'], {}), '(self.json_descriptor)\n', (3321, 3343), False, 'from research_engine.engine import ResearchEngine\n'), ((737, 769), 'json.dump', 'json.dump', (['self.example_list', 'fp'], {}), '(self.example_list, fp)\n', (746, 769), False, 'import json\n'), ((1528, 1575), 'pandas.Series', 'pandas.Series', (["['unbeliev', 'troubl']"], {'name': '"""A"""'}), "(['unbeliev', 'troubl'], name='A')\n", (1541, 1575), False, 'import pandas\n'), ((1693, 1735), 'pandas.Series', 'pandas.Series', (["['panda', 'town']"], {'name': '"""C"""'}), "(['panda', 'town'], name='C')\n", (1706, 1735), False, 'import pandas\n'), ((2284, 2321), 'numpy.array', 'numpy.array', (['[[0.0, 1.0], [1.0, 0.0]]'], {}), '([[0.0, 1.0], [1.0, 0.0]])\n', (2295, 2321), False, 'import numpy\n'), ((2318, 2355), 'numpy.array', 'numpy.array', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (2329, 2355), False, 'import numpy\n')] |
#!/usr/bin/env python
#------------------------------------------------------------------------------
# plot_imsrg_flow.py
#
# author: <NAME>
# version: 1.0.0
# date: Dec 6, 2016
#
# tested with Python v2.7
#
#------------------------------------------------------------------------------
from sys import argv
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.colors import SymLogNorm, Normalize
from mpl_toolkits.axes_grid1 import AxesGrid, make_axes_locatable
import numpy as np
from numpy import array, dot, diag, reshape
#------------------------------------------------------------------------------
# plot helpers
#------------------------------------------------------------------------------
# format tick labels using LaTeX-like math fonts
def myLabels(x, pos):
return '$%s$'%x
def myLogLabels(x, pos):
return '$10^{%d}$'%(np.log10(x))
# save these settings for use in both following plots
def myPlotSettings(ax):
ax.minorticks_on()
ax.tick_params(axis='both',which='major',width=1.5,length=8)
ax.tick_params(axis='both',which='minor',width=1.5,length=5)
ax.tick_params(axis='both',width=2,length=10,labelsize=20)
for s in ['left', 'right', 'top', 'bottom']:
ax.spines[s].set_linewidth(2)
return
#------------------------------------------------------------------------------
# plot flow
#------------------------------------------------------------------------------
def plot_energies(data, exact, filename):
# diagonals vs. eigenvalues on absolute scale
fig, ax = plt.subplots()
plt.semilogx([1.0e-8,1.0e-4,1.0,100], [exact,exact,exact,exact], linewidth=2,
color='black', linestyle='dashed', dashes=(10,5))
plt.semilogx(data[:,0], data[:,1], color='blue', marker='o', markersize=9, label='$E$')
plt.semilogx(data[:,0], data[:,1]+data[:,2], color='red', marker='s', markersize=9, label='$+\Delta E^{(2)}$')
plt.semilogx(data[:,0], data[:,1]+data[:,2]+data[:,3], color='green', marker='D', markersize=9,label='$+\Delta E^{(3)}$')
myPlotSettings(ax)
ax.xaxis.set_major_formatter(FuncFormatter(myLogLabels))
ax.yaxis.set_major_formatter(FuncFormatter(myLabels))
ax.set_xlim([0.00006,13])
ymin,ymax=ax.get_ylim()
ax.set_ylim(ymin-0.005,ymax+0.005)
plt.xlabel('$s$', fontsize=20)
plt.ylabel('$E\,\mathrm{[a.u.]}$', fontsize=20)
# plt.legend(bbox_to_anchor=(0.35, 0.05), loc=3, borderaxespad=0.5)
plt.legend(loc=1, borderaxespad=0.5)
plt.savefig("%s.pdf"%(filename), bbox_inches="tight", pad_inches=0.05)
plt.show()
plt.close()
return
def plot_norms_loglog(data, filename):
# diagonals vs. eigenvalues on absolute scale
fig, ax = plt.subplots()
plt.loglog(data[:,0], data[:,6], basex=10, color='blue', marker='o', markersize=9, label='$||\eta||$')
plt.loglog(data[:,0], data[:,8], basex=10, color='red', marker='s', markersize=9, label='$||\Gamma_{od}||$')
myPlotSettings(ax)
ax.xaxis.set_major_formatter(FuncFormatter(myLogLabels))
ax.yaxis.set_major_formatter(FuncFormatter(myLogLabels))
plt.xlabel('$s$', fontsize=20)
plt.ylabel('$||\eta||, ||\Gamma_{od}||\, [\mathrm{a.u.}]$', fontsize=20)
plt.legend(bbox_to_anchor=(0.05, 0.05), loc=3, borderaxespad=0.5)
plt.savefig("%s.norms.pdf"%(filename.rsplit(".",1)[0]), bbox_inches="tight", pad_inches=0.05)
plt.show()
plt.close()
return
def plot_norms_semilog(data, filename):
# diagonals vs. eigenvalues on absolute scale
fig, ax = plt.subplots()
plt.semilogy(data[:,0], data[:,6], basey=10, color='blue', marker='o', markersize=9, label='$||\eta||$')
plt.semilogy(data[:,0], data[:,8], basey=10, color='red', marker='s', markersize=9, label='$||\Gamma_{od}||$')
myPlotSettings(ax)
ax.xaxis.set_major_formatter(FuncFormatter(myLabels))
ax.yaxis.set_major_formatter(FuncFormatter(myLogLabels))
plt.xlabel('$s$', fontsize=20)
plt.ylabel('$||\eta||, ||\Gamma_{od}||\, [\mathrm{a.u.}]$', fontsize=20)
plt.legend(bbox_to_anchor=(0.05, 0.05), loc=3, borderaxespad=0.5)
plt.savefig("%s.norms.semilog.pdf"%(filename.rsplit(".",1)[0]), bbox_inches="tight", pad_inches=0.05)
plt.show()
plt.close()
return
#------------------------------------------------------------------------------
# main
#------------------------------------------------------------------------------
def main():
filename = argv[1]
exact = argv[2]
# read data from file
data = np.loadtxt(filename, skiprows=2)
plot_energies(data, exact, filename)
plot_norms_loglog(data,filename)
plot_norms_semilog(data,filename)
return
#------------------------------------------------------------------------------
# make executable
#------------------------------------------------------------------------------
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.show",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplots",
"numpy.log10",
"matplotlib.ticker.FuncFormatter",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.semilogy",
"matplotlib.pyplot.xlabel",
"m... | [((1560, 1574), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1572, 1574), True, 'import matplotlib.pyplot as plt\n'), ((1578, 1715), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['[1e-08, 0.0001, 1.0, 100]', '[exact, exact, exact, exact]'], {'linewidth': '(2)', 'color': '"""black"""', 'linestyle': '"""dashed"""', 'dashes': '(10, 5)'}), "([1e-08, 0.0001, 1.0, 100], [exact, exact, exact, exact],\n linewidth=2, color='black', linestyle='dashed', dashes=(10, 5))\n", (1590, 1715), True, 'import matplotlib.pyplot as plt\n'), ((1714, 1807), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['data[:, 0]', 'data[:, 1]'], {'color': '"""blue"""', 'marker': '"""o"""', 'markersize': '(9)', 'label': '"""$E$"""'}), "(data[:, 0], data[:, 1], color='blue', marker='o', markersize=9,\n label='$E$')\n", (1726, 1807), True, 'import matplotlib.pyplot as plt\n'), ((1804, 1924), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['data[:, 0]', '(data[:, 1] + data[:, 2])'], {'color': '"""red"""', 'marker': '"""s"""', 'markersize': '(9)', 'label': '"""$+\\\\Delta E^{(2)}$"""'}), "(data[:, 0], data[:, 1] + data[:, 2], color='red', marker='s',\n markersize=9, label='$+\\\\Delta E^{(2)}$')\n", (1816, 1924), True, 'import matplotlib.pyplot as plt\n'), ((1917, 2053), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['data[:, 0]', '(data[:, 1] + data[:, 2] + data[:, 3])'], {'color': '"""green"""', 'marker': '"""D"""', 'markersize': '(9)', 'label': '"""$+\\\\Delta E^{(3)}$"""'}), "(data[:, 0], data[:, 1] + data[:, 2] + data[:, 3], color=\n 'green', marker='D', markersize=9, label='$+\\\\Delta E^{(3)}$')\n", (1929, 2053), True, 'import matplotlib.pyplot as plt\n'), ((2271, 2301), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$s$"""'], {'fontsize': '(20)'}), "('$s$', fontsize=20)\n", (2281, 2301), True, 'import matplotlib.pyplot as plt\n'), ((2304, 2353), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$E\\\\,\\\\mathrm{[a.u.]}$"""'], {'fontsize': '(20)'}), "('$E\\\\,\\\\mathrm{[a.u.]}$', fontsize=20)\n", (2314, 2353), True, 'import matplotlib.pyplot as plt\n'), ((2425, 2461), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(1)', 'borderaxespad': '(0.5)'}), '(loc=1, borderaxespad=0.5)\n', (2435, 2461), True, 'import matplotlib.pyplot as plt\n'), ((2465, 2535), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s.pdf' % filename)"], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.05)'}), "('%s.pdf' % filename, bbox_inches='tight', pad_inches=0.05)\n", (2476, 2535), True, 'import matplotlib.pyplot as plt\n'), ((2538, 2548), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2546, 2548), True, 'import matplotlib.pyplot as plt\n'), ((2551, 2562), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2560, 2562), True, 'import matplotlib.pyplot as plt\n'), ((2674, 2688), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2686, 2688), True, 'import matplotlib.pyplot as plt\n'), ((2692, 2801), 'matplotlib.pyplot.loglog', 'plt.loglog', (['data[:, 0]', 'data[:, 6]'], {'basex': '(10)', 'color': '"""blue"""', 'marker': '"""o"""', 'markersize': '(9)', 'label': '"""$||\\\\eta||$"""'}), "(data[:, 0], data[:, 6], basex=10, color='blue', marker='o',\n markersize=9, label='$||\\\\eta||$')\n", (2702, 2801), True, 'import matplotlib.pyplot as plt\n'), ((2797, 2912), 'matplotlib.pyplot.loglog', 'plt.loglog', (['data[:, 0]', 'data[:, 8]'], {'basex': '(10)', 'color': '"""red"""', 'marker': '"""s"""', 'markersize': '(9)', 'label': '"""$||\\\\Gamma_{od}||$"""'}), "(data[:, 0], data[:, 8], basex=10, color='red', marker='s',\n markersize=9, label='$||\\\\Gamma_{od}||$')\n", (2807, 2912), True, 'import matplotlib.pyplot as plt\n'), ((3049, 3079), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$s$"""'], {'fontsize': '(20)'}), "('$s$', fontsize=20)\n", (3059, 3079), True, 'import matplotlib.pyplot as plt\n'), ((3082, 3158), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$||\\\\eta||, ||\\\\Gamma_{od}||\\\\, [\\\\mathrm{a.u.}]$"""'], {'fontsize': '(20)'}), "('$||\\\\eta||, ||\\\\Gamma_{od}||\\\\, [\\\\mathrm{a.u.}]$', fontsize=20)\n", (3092, 3158), True, 'import matplotlib.pyplot as plt\n'), ((3158, 3223), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.05, 0.05)', 'loc': '(3)', 'borderaxespad': '(0.5)'}), '(bbox_to_anchor=(0.05, 0.05), loc=3, borderaxespad=0.5)\n', (3168, 3223), True, 'import matplotlib.pyplot as plt\n'), ((3325, 3335), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3333, 3335), True, 'import matplotlib.pyplot as plt\n'), ((3338, 3349), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3347, 3349), True, 'import matplotlib.pyplot as plt\n'), ((3462, 3476), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3474, 3476), True, 'import matplotlib.pyplot as plt\n'), ((3480, 3591), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['data[:, 0]', 'data[:, 6]'], {'basey': '(10)', 'color': '"""blue"""', 'marker': '"""o"""', 'markersize': '(9)', 'label': '"""$||\\\\eta||$"""'}), "(data[:, 0], data[:, 6], basey=10, color='blue', marker='o',\n markersize=9, label='$||\\\\eta||$')\n", (3492, 3591), True, 'import matplotlib.pyplot as plt\n'), ((3587, 3704), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['data[:, 0]', 'data[:, 8]'], {'basey': '(10)', 'color': '"""red"""', 'marker': '"""s"""', 'markersize': '(9)', 'label': '"""$||\\\\Gamma_{od}||$"""'}), "(data[:, 0], data[:, 8], basey=10, color='red', marker='s',\n markersize=9, label='$||\\\\Gamma_{od}||$')\n", (3599, 3704), True, 'import matplotlib.pyplot as plt\n'), ((3838, 3868), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$s$"""'], {'fontsize': '(20)'}), "('$s$', fontsize=20)\n", (3848, 3868), True, 'import matplotlib.pyplot as plt\n'), ((3871, 3947), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$||\\\\eta||, ||\\\\Gamma_{od}||\\\\, [\\\\mathrm{a.u.}]$"""'], {'fontsize': '(20)'}), "('$||\\\\eta||, ||\\\\Gamma_{od}||\\\\, [\\\\mathrm{a.u.}]$', fontsize=20)\n", (3881, 3947), True, 'import matplotlib.pyplot as plt\n'), ((3947, 4012), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.05, 0.05)', 'loc': '(3)', 'borderaxespad': '(0.5)'}), '(bbox_to_anchor=(0.05, 0.05), loc=3, borderaxespad=0.5)\n', (3957, 4012), True, 'import matplotlib.pyplot as plt\n'), ((4122, 4132), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4130, 4132), True, 'import matplotlib.pyplot as plt\n'), ((4135, 4146), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4144, 4146), True, 'import matplotlib.pyplot as plt\n'), ((4414, 4446), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'skiprows': '(2)'}), '(filename, skiprows=2)\n', (4424, 4446), True, 'import numpy as np\n'), ((894, 905), 'numpy.log10', 'np.log10', (['x'], {}), '(x)\n', (902, 905), True, 'import numpy as np\n'), ((2092, 2118), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['myLogLabels'], {}), '(myLogLabels)\n', (2105, 2118), False, 'from matplotlib.ticker import FuncFormatter\n'), ((2151, 2174), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['myLabels'], {}), '(myLabels)\n', (2164, 2174), False, 'from matplotlib.ticker import FuncFormatter\n'), ((2960, 2986), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['myLogLabels'], {}), '(myLogLabels)\n', (2973, 2986), False, 'from matplotlib.ticker import FuncFormatter\n'), ((3019, 3045), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['myLogLabels'], {}), '(myLogLabels)\n', (3032, 3045), False, 'from matplotlib.ticker import FuncFormatter\n'), ((3752, 3775), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['myLabels'], {}), '(myLabels)\n', (3765, 3775), False, 'from matplotlib.ticker import FuncFormatter\n'), ((3808, 3834), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['myLogLabels'], {}), '(myLogLabels)\n', (3821, 3834), False, 'from matplotlib.ticker import FuncFormatter\n')] |
from __future__ import division, print_function
import torch
import numpy as np
try:
import librosa
except ImportError:
librosa = None
class Compose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.Scale(),
>>> transforms.PadTrim(max_len=16000),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, audio):
for t in self.transforms:
audio = t(audio)
return audio
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class Scale(object):
"""Scale audio tensor from a 16-bit integer (represented as a FloatTensor)
to a floating point number between -1.0 and 1.0. Note the 16-bit number is
called the "bit depth" or "precision", not to be confused with "bit rate".
Args:
factor (int): maximum value of input tensor. default: 16-bit depth
"""
def __init__(self, factor=2**31):
self.factor = factor
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor of audio of size (Samples x Channels)
Returns:
Tensor: Scaled by the scale factor. (default between -1.0 and 1.0)
"""
if isinstance(tensor, (torch.LongTensor, torch.IntTensor)):
tensor = tensor.float()
return tensor / self.factor
def __repr__(self):
return self.__class__.__name__ + '()'
class PadTrim(object):
"""Pad/Trim a 1d-Tensor (Signal or Labels)
Args:
tensor (Tensor): Tensor of audio of size (Samples x Channels)
max_len (int): Length to which the tensor will be padded
"""
def __init__(self, max_len, fill_value=0):
self.max_len = max_len
self.fill_value = fill_value
def __call__(self, tensor):
"""
Returns:
Tensor: (max_len x Channels)
"""
if self.max_len > tensor.size(0):
pad = torch.ones((self.max_len - tensor.size(0),
tensor.size(1))) * self.fill_value
pad = pad.type_as(tensor)
tensor = torch.cat((tensor, pad), dim=0)
elif self.max_len < tensor.size(0):
tensor = tensor[:self.max_len, :]
return tensor
def __repr__(self):
return self.__class__.__name__ + '(max_len={0})'.format(self.max_len)
class DownmixMono(object):
"""Downmix any stereo signals to mono
Inputs:
tensor (Tensor): Tensor of audio of size (Samples x Channels)
Returns:
tensor (Tensor) (Samples x 1):
"""
def __init__(self):
pass
def __call__(self, tensor):
if isinstance(tensor, (torch.LongTensor, torch.IntTensor)):
tensor = tensor.float()
if tensor.size(1) > 1:
tensor = torch.mean(tensor.float(), 1, True)
return tensor
def __repr__(self):
return self.__class__.__name__ + '()'
class LC2CL(object):
"""Permute a 2d tensor from samples (Length) x Channels to Channels x
samples (Length)
"""
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor of spectrogram with shape (BxLxC)
Returns:
tensor (Tensor): Tensor of spectrogram with shape (CxBxL)
"""
return tensor.transpose(0, 1).contiguous()
def __repr__(self):
return self.__class__.__name__ + '()'
class MEL(object):
"""Create MEL Spectrograms from a raw audio signal. Relatively pretty slow.
Usage (see librosa.feature.melspectrogram docs):
MEL(sr=16000, n_fft=1600, hop_length=800, n_mels=64)
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor of audio of size (samples x channels)
Returns:
tensor (Tensor): n_mels x hops x channels (BxLxC), where n_mels is
the number of mel bins, hops is the number of hops, and channels
is unchanged.
"""
if librosa is None:
print("librosa not installed, cannot create spectrograms")
return tensor
L = []
for i in range(tensor.size(1)):
nparr = tensor[:, i].numpy() # (samples, )
sgram = librosa.feature.melspectrogram(
nparr, **self.kwargs) # (n_mels, hops)
L.append(sgram)
L = np.stack(L, 2) # (n_mels, hops, channels)
tensor = torch.from_numpy(L).type_as(tensor)
return tensor
def __repr__(self):
return self.__class__.__name__ + '()'
class BLC2CBL(object):
"""Permute a 3d tensor from Bands x samples (Length) x Channels to Channels x
Bands x samples (Length)
"""
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor of spectrogram with shape (BxLxC)
Returns:
tensor (Tensor): Tensor of spectrogram with shape (CxBxL)
"""
return tensor.permute(2, 0, 1).contiguous()
def __repr__(self):
return self.__class__.__name__ + '()'
class MuLawEncoding(object):
"""Encode signal based on mu-law companding. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This algorithm assumes the signal has been scaled to between -1 and 1 and
returns a signal encoded with values from 0 to quantization_channels - 1
Args:
quantization_channels (int): Number of channels. default: 256
"""
def __init__(self, quantization_channels=256):
self.qc = quantization_channels
def __call__(self, x):
"""
Args:
x (FloatTensor/LongTensor or ndarray)
Returns:
x_mu (LongTensor or ndarray)
"""
mu = self.qc - 1.
if isinstance(x, np.ndarray):
x_mu = np.sign(x) * np.log1p(mu * np.abs(x)) / np.log1p(mu)
x_mu = ((x_mu + 1) / 2 * mu + 0.5).astype(int)
elif isinstance(x, (torch.Tensor, torch.LongTensor)):
if isinstance(x, torch.LongTensor):
x = x.float()
mu = torch.FloatTensor([mu])
x_mu = torch.sign(x) * torch.log1p(mu *
torch.abs(x)) / torch.log1p(mu)
x_mu = ((x_mu + 1) / 2 * mu + 0.5).long()
return x_mu
def __repr__(self):
return self.__class__.__name__ + '()'
class MuLawExpanding(object):
"""Decode mu-law encoded signal. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This expects an input with values between 0 and quantization_channels - 1
and returns a signal scaled between -1 and 1.
Args:
quantization_channels (int): Number of channels. default: 256
"""
def __init__(self, quantization_channels=256):
self.qc = quantization_channels
def __call__(self, x_mu):
"""
Args:
x_mu (FloatTensor/LongTensor or ndarray)
Returns:
x (FloatTensor or ndarray)
"""
mu = self.qc - 1.
if isinstance(x_mu, np.ndarray):
x = ((x_mu) / mu) * 2 - 1.
x = np.sign(x) * (np.exp(np.abs(x) * np.log1p(mu)) - 1.) / mu
elif isinstance(x_mu, (torch.Tensor, torch.LongTensor)):
if isinstance(x_mu, torch.LongTensor):
x_mu = x_mu.float()
mu = torch.FloatTensor([mu])
x = ((x_mu) / mu) * 2 - 1.
x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.) / mu
return x
def __repr__(self):
return self.__class__.__name__ + '()'
| [
"numpy.stack",
"torch.from_numpy",
"numpy.abs",
"torch.FloatTensor",
"torch.cat",
"torch.log1p",
"torch.sign",
"torch.abs",
"numpy.sign",
"librosa.feature.melspectrogram",
"numpy.log1p"
] | [((4813, 4827), 'numpy.stack', 'np.stack', (['L', '(2)'], {}), '(L, 2)\n', (4821, 4827), True, 'import numpy as np\n'), ((2477, 2508), 'torch.cat', 'torch.cat', (['(tensor, pad)'], {'dim': '(0)'}), '((tensor, pad), dim=0)\n', (2486, 2508), False, 'import torch\n'), ((4685, 4737), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', (['nparr'], {}), '(nparr, **self.kwargs)\n', (4715, 4737), False, 'import librosa\n'), ((4873, 4892), 'torch.from_numpy', 'torch.from_numpy', (['L'], {}), '(L)\n', (4889, 4892), False, 'import torch\n'), ((6322, 6334), 'numpy.log1p', 'np.log1p', (['mu'], {}), '(mu)\n', (6330, 6334), True, 'import numpy as np\n'), ((6551, 6574), 'torch.FloatTensor', 'torch.FloatTensor', (['[mu]'], {}), '([mu])\n', (6568, 6574), False, 'import torch\n'), ((7860, 7883), 'torch.FloatTensor', 'torch.FloatTensor', (['[mu]'], {}), '([mu])\n', (7877, 7883), False, 'import torch\n'), ((6282, 6292), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (6289, 6292), True, 'import numpy as np\n'), ((6690, 6705), 'torch.log1p', 'torch.log1p', (['mu'], {}), '(mu)\n', (6701, 6705), False, 'import torch\n'), ((7633, 7643), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (7640, 7643), True, 'import numpy as np\n'), ((6594, 6607), 'torch.sign', 'torch.sign', (['x'], {}), '(x)\n', (6604, 6607), False, 'import torch\n'), ((7939, 7952), 'torch.sign', 'torch.sign', (['x'], {}), '(x)\n', (7949, 7952), False, 'import torch\n'), ((6309, 6318), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (6315, 6318), True, 'import numpy as np\n'), ((6674, 6686), 'torch.abs', 'torch.abs', (['x'], {}), '(x)\n', (6683, 6686), False, 'import torch\n'), ((7654, 7663), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (7660, 7663), True, 'import numpy as np\n'), ((7666, 7678), 'numpy.log1p', 'np.log1p', (['mu'], {}), '(mu)\n', (7674, 7678), True, 'import numpy as np\n'), ((7966, 7978), 'torch.abs', 'torch.abs', (['x'], {}), '(x)\n', (7975, 7978), False, 'import torch\n'), ((7981, 7996), 'torch.log1p', 'torch.log1p', (['mu'], {}), '(mu)\n', (7992, 7996), False, 'import torch\n')] |
"""Network models and submodels.
The :class:`Model` class is used to encapsulate a set of Theano shared
variables (model parameters), and can create symbolic expressions for model
outputs and loss functions.
This module also contains subclasses, such as :class:`Linear`, that function
as building blocks for more complex networks.
"""
from collections import OrderedDict
import pickle
import sys
import numpy as np
import theano
from theano.ifelse import ifelse
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from theano import tensor as T
from . import init
from . import search
from .fun import train_mode, function
from .utils import expand_to_batch, softmax_masked, softmax_3d, softmax_4d
class Model:
"""Base class for neural network models.
Attributes
----------
name : str
Name of the model.
params : OrderedDict of str -> :class:`theano.compile.sharedvalue.SharedVariable`
Mapping from parameter names to Theano shared variables. Note that
submodel parameters are not included, so this should normally not be
accessed directly, rather use `self.parameters()`.
regularization : list of Theano symbolic expressions
These expressions should all be added to the loss function when
optimizing. Use `self.regularize()` to modify.
"""
def __init__(self, name):
"""Initialize an empty model.
Parameters
----------
name : str
Name of the model.
"""
self.name = name
self.params = OrderedDict()
self.regularization = []
self.submodels = OrderedDict()
def loss(self):
"""Part of the loss function that is independent of inputs."""
terms = [submodel.loss() for submodel in self.submodels.values()] \
+ self.regularization
return sum(terms, T.as_tensor_variable(0.0))
def parameters(self, include_submodels=True):
"""Iterate over the parameters of this model and its submodels.
Each value produced by the iterator is a tuple (name, value), where
the name is a tuple of strings describing the hierarchy of submodels,
e.g. ('hidden', 'b'), and the value is a Theano shared variable.
Parameters
----------
include_submodels : bool
If ``True`` (default), also iterate over submodel parameters.
"""
for name, p in self.params.items():
yield ((name,), p)
if include_submodels:
for submodel in self.submodels.values():
for name, p in submodel.parameters():
yield ((submodel.name,) + name, p)
def summarize(self, grads, f=sys.stdout):
def tensor_stats(m):
return ', '.join([
'norm = %g' % np.sqrt((m*m).sum()),
'maxabs = %g' % np.abs(m).max(),
'minabs = %g' % np.abs(m).min()])
def summarize_parameter(name, p, g):
p_stats = tensor_stats(p)
g_stats = tensor_stats(g)
print('%s\n parameter %s\n gradient %s' % (
name, p_stats, g_stats),
file=f)
params = list(self.parameters())
assert len(grads) == len(params)
for (name, p), grad in zip(params, grads):
summarize_parameter('.'.join(name), p.get_value(), grad)
f.flush()
def parameters_list(self, include_submodels=True):
"""Return a list with parameters, without their names."""
return list(p for name, p in
self.parameters(include_submodels=include_submodels))
def parameter(self, name):
"""Return the parameter with the given name.
Parameters
----------
name : tuple of str
Path to variable, e.g. ('hidden', 'b') to find the parameter 'b'
in the submodel 'hidden'.
Returns
-------
value : :class:`theano.compile.sharedvalue.SharedVariable`
"""
if not isinstance(name, tuple):
raise TypeError('Expected tuple, got %s' % type(name))
if len(name) == 1:
return self.params[name[0]]
elif len(name) >= 2:
return self.submodels[name[0]].parameter(name[1:])
else:
raise ValueError('Name tuple must not be empty!')
def parameter_count(self):
"""Return the total number of parameters of the model."""
return sum(p.get_value(borrow=True).size for _,p in self.parameters())
def param(self, name, dims, init_f=None,
value=None, dtype=theano.config.floatX):
"""Create a new parameter, or share an existing one.
Parameters
----------
name : str
Name of parameter, this will be used directly in `self.params`
and used to create `self._name`.
dims : tuple
Shape of the parameter vector.
value : :class:`theano.compile.sharedvalue.SharedVariable`, optional
If this parameter should be shared, a SharedVariable instance can
be passed here.
init_f : (tuple => numpy.ndarray)
Function used to initialize the parameter vector.
dtype : str or numpy.dtype
Data type (default is `theano.config.floatX`)
Returns
-------
p : :class:`theano.compile.sharedvalue.SharedVariable`
"""
if name in self.params:
if not value is None:
raise ValueError('Trying to add a shared parameter (%s), '
'but a parameter with the same name already '
'exists in %s!' % (name, self.name))
return self.params[name]
if value is None:
if init_f is None:
raise ValueError('Creating new parameter, but no '
'initialization specified!')
p = theano.shared(init_f(dims, dtype=dtype), name=name)
self.params[name] = p
else:
p = value
setattr(self, '_'+name, p)
return p
def regularize(self, p, regularizer):
"""Add regularization to a parameter.
Parameters
----------
p : :class:`theano.compile.sharedvalue.SharedVariable`
Parameter to apply regularization
regularizer : function
Regularization function, which should return a symbolic
expression.
"""
if not regularizer is None:
self.regularization.append(regularizer(p))
def add(self, submodel):
"""Import parameters from a submodel.
If a submodel named "hidden" has a parameter "b", it will be imported
as "hidden_b", also accessible as `self._hidden_b`.
Parameters
----------
submodel : :class:`.Model`
Returns
-------
submodel : :class:`.Model`
Equal to the parameter, for convenience.
"""
if submodel.name in self.submodels:
raise ValueError('Submodel with name %s already exists in %s!' % (
submodel.name, self.name))
self.submodels[submodel.name] = submodel
setattr(self, submodel.name, submodel)
return submodel
def save(self, f, include_submodels=True):
"""Save the parameter values of this model to a file object.
Parameters
----------
f : file
File object to write to, assumed to be opened in 'wb' mode.
include_submodels : bool
If ``True`` (default), also save submodel parameters.
"""
pickle.dump({name: p.get_value(borrow=True)
for name, p in self.parameters(
include_submodels=include_submodels)},
f, -1)
def load(self, f, allow_incomplete=False, allow_unused=False):
"""Load (some) weights of this model from a file object.
Parameters
----------
f : file
File object to read from, assumeb to be opened in 'rb' mode.
allow_incomplete : bool
If ``False``, throw a `ValueError` if some model parameters are
missing in the file.
allow_unused : bool
If ``False``, throw a `ValueError` if the file contains model
parameters that are not used in this model.
"""
data = pickle.load(f)
parameters = dict(self.parameters())
names = frozenset(data.keys()) & frozenset(parameters.keys())
if not allow_incomplete and len(names) < len(parameters):
diff = sorted(frozenset(parameters.keys()) - names)
raise ValueError(
'The following parameters are missing: %s' % ', '.join(
'.'.join(t) for t in diff))
if not allow_unused and len(names) < len(data):
diff = sorted(frozenset(data.keys()) - names)
raise ValueError(
'The following parameters are unused: %s' % ', '.join(
'.'.join(t) for t in diff))
for name in names:
value = data[name]
old_value = parameters[name].get_value(borrow=True)
if value.shape != old_value.shape:
raise ValueError(
'Loaded shape is %s but %s expected' % (
value.shape, old_value.shape))
parameters[name].set_value(value)
def compile(self, *args):
return function(list(args), self(*args))
class Linear(Model):
"""Fully connected linear layer.
This layer creates one shared parameter, `w` of shape
`(input_dims, output_dims)` if `use_bias` is ``False``, otherwise it
also creates `name_b` of shape `output_dims` for biases.
Parameters
----------
name : str
Name of layer.
input_dims : int
Number of inputs.
output_dims : int
Number of outputs.
w : :class:`theano.compile.sharedvalue.SharedVariable`
Weight vector to use, or pass ``None`` (default) to create a new
one.
w_init : :class:`.init.InitializationFunction`
Initialization for weight vector, in case `w` is ``None``.
w_regularizer : :class:`.regularize.Regularizer`, optional
Regularization for weight matrix.
b : :class:`theano.compile.sharedvalue.SharedVariable`
Bias vector to use, or pass ``None`` (default) to create a new
one.
b_init : :class:`.init.InitializationFunction`
Initialization for bias vector, in case `b` is ``None``.
b_regularizer : :class:`.regularize.Regularizer`, optional
Regularization for biases.
use_bias : bool
If ``False``, no bias is used and the `b` and `b_init` parameters
are ignored.
dropout : float
Dropout factor (the default value of 0 means dropout is not used).
layernorm : bool
If ``True``, layer normalization is used on the activations.
"""
def __init__(self, name, input_dims, output_dims,
w=None, w_init=None, w_regularizer=None,
b=None, b_init=None, b_regularizer=None,
use_bias=True, dropout=0, layernorm=False):
super().__init__(name)
self.input_dims = input_dims
self.output_dims = output_dims
self.use_bias = use_bias
self.dropout = dropout
self.layernorm = layernorm
if w_init is None: w_init = init.Gaussian(fan_in=input_dims)
if b_init is None: b_init = init.Constant(0.0)
self.param('w', (input_dims, output_dims), init_f=w_init, value=w)
self.regularize(self._w, w_regularizer)
if use_bias:
self.param('b', (output_dims,), init_f=b_init, value=b)
self.regularize(self._b, b_regularizer)
if dropout:
self.add(Dropout('dropout', dropout))
if layernorm:
self.add(LayerNormalization('ln', (None, output_dims)))
def __call__(self, inputs):
outputs = T.dot(inputs, self._w)
if self.layernorm: outputs = self.ln(outputs)
if self.use_bias: outputs = outputs + self._b
if self.dropout: outputs = self.dropout(outputs)
return outputs
class Embeddings(Model):
"""Embeddings layer.
This layer creates one shared parameter, `w` of shape
`(alphabet_size, embedding_dims)`.
Parameters
----------
name : str
Name of layer.
alphabet_size : int
Size of symbol alphabet.
embedding_dims : int
Dimensionality of embeddings.
w : :class:`theano.compile.sharedvalue.SharedVariable`
Weight vector to use, or pass ``None`` (default) to create a new
one.
w_init : :class:`.init.InitializationFunction`
Initialization for weight vector, in case `w` is ``None``.
w_regularizer : :class:`.regularize.Regularizer`, optional
Regularization for weight matrix.
dropout : float
Dropout factor (the default value of 0 means dropout is not used).
"""
def __init__(self, name, alphabet_size, embedding_dims,
w=None, w_init=None, w_regularizer=None,
dropout=0):
super().__init__(name)
self.embedding_dims = embedding_dims
self.alphabet_size = alphabet_size
self.dropout = dropout
if w_init is None: w_init = init.Gaussian(fan_in=embedding_dims)
self.param('w',
(alphabet_size, embedding_dims), init_f=w_init, value=w)
self.regularize(self._w, w_regularizer)
if dropout:
self.add(Dropout('dropout', dropout, sequence=True))
def __call__(self, inputs):
outputs = self._w[inputs]
if self.dropout: outputs = self.dropout(outputs)
return outputs
class Conv1D(Model):
"""1D convolution layer with linear activations.
The input shape is assumed to be (batch_size, length, dims).
"""
def __init__(self, name, input_dims, output_dims,
filter_dims=3, stride=1,
f=None, f_init=None, f_regularizer=None,
b=None, b_init=None, b_regularizer=None):
super().__init__(name)
if f_init is None:
f_init = init.Gaussian(fan_in=filter_dims*input_dims)
if b_init is None:
b_init = init.Constant(0.0)
self.stride = stride
self.input_dims = input_dims
self.f_shape = (output_dims, input_dims, filter_dims, 1)
self.param('f', self.f_shape, init_f=f_init)
self.param('b', (output_dims,), init_f=b_init)
def __call__(self, inputs, inputs_mask):
x = T.nnet.conv2d(
(inputs * inputs_mask.dimshuffle(0,1,'x')
).dimshuffle(0,2,1,'x'),
self._f,
input_shape=(None, self.input_dims, None, 1),
filter_shape=self.f_shape,
border_mode='half',
subsample=(self.stride, 1),
filter_flip=True)
batch_size = inputs.shape[0]
length = inputs.shape[1]
dims = inputs.shape[2]
x = x.reshape((batch_size, dims, length)).dimshuffle(0,2,1)
return x + self._b.dimshuffle('x','x',0)
class LSTM(Model):
"""Long Short-Term Memory.
name : str
Name of layer.
input_dims : int
Length of each vector in the input sequence.
state_dims : int
Size of internal states. An LSTM contains two states, each of the will
be of size state_dims.
attention_dims : int
If specified, use attention and let this be the size of the hidden
attention state.
attented_dims : int
Dimensionality of the sequence to have attention on.
layernorm : str
One of `'ba1'` (eq 20--22 of Ba et al.), `'ba2'` (eq 29--31) or
`False` (no layer normalization).
"""
def __init__(self, name, input_dims, state_dims,
w=None, w_init=None, w_regularizer=None,
u=None, u_init=None, u_regularizer=None,
b=None, b_init=None, b_regularizer=None,
attention_dims=None, attended_dims=None,
layernorm=False, contextgate=False):
super().__init__(name)
assert layernorm in (False, 'ba1', 'ba2')
assert (attention_dims is None) == (attended_dims is None)
assert not (contextgate and (attention_dims is None))
self.n_states = 2
if attended_dims is not None:
if not contextgate:
input_dims += attended_dims
self.input_dims = input_dims
self.state_dims = state_dims
self.layernorm = layernorm
self.attention_dims = attention_dims
self.attended_dims = attended_dims
self.use_attention = attention_dims is not None
self.use_contextgate = contextgate
if w_init is None: w_init = init.Gaussian(fan_in=input_dims)
if u_init is None: u_init = init.Concatenated(
[init.Orthogonal()]*4, axis=1)
if b_init is None: b_init = init.Concatenated(
[init.Constant(x) for x in [0.0, 1.0, 0.0, 0.0]])
if self.use_contextgate:
self.param('wzg', (input_dims, state_dims*2),
init_f=init.Gaussian(fan_in=input_dims))
self.param('uzg', (state_dims, state_dims*2),
init_f=init.Concatenated([init.Orthogonal()]*2, axis=1))
self.param('bzg', (state_dims*2,), init_f=init.Constant(0.0))
self.param('czs', (attended_dims, state_dims*2),
init_f=init.Gaussian(fan_in=attended_dims))
self.param('bs', (state_dims,), init_f=init.Constant(0.0))
self.param('w', (state_dims, state_dims*4), init_f=w_init, value=w)
self.param('u', (state_dims, state_dims*4), init_f=u_init, value=u)
self.param('b', (state_dims*4,), init_f=b_init, value=b)
else:
self.param('w', (input_dims, state_dims*4), init_f=w_init, value=w)
self.param('u', (state_dims, state_dims*4), init_f=u_init, value=u)
self.param('b', (state_dims*4,), init_f=b_init, value=b)
if self.use_attention:
self.add(Linear('attention_u', attended_dims, attention_dims))
self.param('attention_w', (state_dims, attention_dims),
init_f=init.Gaussian(fan_in=state_dims))
self.param('attention_v', (attention_dims,),
init_f=init.Gaussian(fan_in=attention_dims))
self.regularize(self._attention_w, w_regularizer)
if layernorm == 'ba1':
self.add(LayerNormalization('ln_a', (None, attention_dims)))
self.regularize(self._w, w_regularizer)
self.regularize(self._u, u_regularizer)
self.regularize(self._b, b_regularizer)
if layernorm == 'ba1':
self.add(LayerNormalization('ln_1', (None, state_dims*4)))
self.add(LayerNormalization('ln_2', (None, state_dims*4)))
if layernorm:
self.add(LayerNormalization('ln_h', (None, state_dims)))
def __call__(self, inputs, h_tm1, c_tm1,
attended=None, attended_dot_u=None, attention_mask=None):
if self.use_attention:
# Non-precomputed part of the attention vector for this time step
# _ x batch_size x attention_dims
h_dot_w = T.dot(h_tm1, self._attention_w)
if self.layernorm == 'ba1': h_dot_w = self.ln_a(h_dot_w)
h_dot_w = h_dot_w.dimshuffle('x',0,1)
# Attention vector, with distributions over the positions in
# attended. Elements that fall outside the sentence in each batch
# are set to zero.
# sequence_length x batch_size
# Note that attention.T is returned
attention = softmax_masked(
T.dot(
T.tanh(attended_dot_u + h_dot_w),
self._attention_v).T,
attention_mask.T).T
# Compressed attended vector, weighted by the attention vector
# batch_size x attended_dims
compressed = (attended * attention.dimshuffle(0,1,'x')).sum(axis=0)
# Append the compressed vector to the inputs and continue as usual
if not self.use_contextgate:
inputs = T.concatenate([inputs, compressed], axis=1)
else:
zg = (T.dot(inputs, self._wzg) + T.dot(h_tm1, self._uzg) +
self._bzg.dimshuffle('x', 0))
zs = T.dot(compressed, self._czs)
def part(m,i):
return m[:, i*self.state_dims:(i+1)*self.state_dims]
z = T.nnet.sigmoid(part(zg,0) + part(zs,0))
g = part(zg,1)
s = part(zs,1) + self._bs.dimshuffle('x', 0)
inputs = z*s + (1-z)*g
if self.layernorm == 'ba1':
x = (self.ln_1(T.dot(inputs, self._w)) +
self.ln_2(T.dot(h_tm1, self._u)))
else:
x = T.dot(inputs, self._w) + T.dot(h_tm1, self._u)
x = x + self._b.dimshuffle('x', 0)
def x_part(i): return x[:, i*self.state_dims:(i+1)*self.state_dims]
i = T.nnet.sigmoid(x_part(0))
f = T.nnet.sigmoid(x_part(1))
o = T.nnet.sigmoid(x_part(2))
c = T.tanh( x_part(3))
c_t = f*c_tm1 + i*c
h_t = o*T.tanh(self.ln_h(c_t) if self.layernorm else c_t)
if self.use_attention:
return h_t, c_t, attention.T
else:
return h_t, c_t
class LSTMSequence(Model):
def __init__(self, name, backwards, *args,
dropout=0, trainable_initial=False, offset=0, **kwargs):
super().__init__(name)
self.backwards = backwards
self.trainable_initial = trainable_initial
self.offset = offset
self._step_fun = None
self._attention_u_fun = None
self.add(Dropout('dropout', dropout))
self.add(LSTM('gate', *args, **kwargs))
if self.trainable_initial:
self.param('h_0', (self.gate.state_dims,),
init_f=init.Gaussian(fan_in=self.gate.state_dims))
self.param('c_0', (self.gate.state_dims,),
init_f=init.Gaussian(fan_in=self.gate.state_dims))
def step(self, inputs, inputs_mask, h_tm1, c_tm1, h_mask, *non_sequences):
if self.gate.use_attention:
# attended is the
# src_sequence_length x batch_size x attention_dims
# matrix which we have attention on.
#
# attended_dot_u is the h_t-independent part of the final
# attention vectors, which is precomputed for efficiency.
#
# attention_mask is a binary mask over the valid elements of
# attended, which in practice is the same as the mask passed to
# the encoder that created attended. Size
# src_sequence_length x batch_size
h_t, c_t, attention = self.gate(
inputs, h_tm1 * h_mask.astype(theano.config.floatX), c_tm1,
attended=non_sequences[0],
attended_dot_u=non_sequences[1],
attention_mask=non_sequences[2])
return (T.switch(inputs_mask.dimshuffle(0, 'x'), h_t, h_tm1),
T.switch(inputs_mask.dimshuffle(0, 'x'), c_t, c_tm1),
attention)
else:
h_t, c_t = self.gate(
inputs, h_tm1 * h_mask.astype(theano.config.floatX), c_tm1)
return (T.switch(inputs_mask.dimshuffle(0, 'x'), h_t, h_tm1),
T.switch(inputs_mask.dimshuffle(0, 'x'), c_t, c_tm1))
def step_fun(self):
if self._step_fun is None:
inputs = T.matrix('inputs')
h_tm1 = T.matrix('h_tm1')
c_tm1 = T.matrix('c_tm1')
if self.gate.use_attention:
attended=T.tensor3('attended')
attended_dot_u=T.tensor3('attended_dot_u')
attention_mask=T.matrix('attention_mask')
self._step_fun = function(
[inputs, h_tm1, c_tm1,
attended, attended_dot_u, attention_mask],
self.step(inputs, T.ones(inputs.shape[:-1]),
h_tm1, c_tm1, T.ones_like(h_tm1),
attended, attended_dot_u, attention_mask),
name='%s_step_fun'%self.name)
else:
self._step_fun = function(
[inputs, h_tm1, c_tm1],
self.step(inputs, T.ones(inputs.shape[:-1]),
h_tm1, c_tm1, T.ones_like(h_tm1)),
name='%s_step_fun'%self.name)
return self._step_fun
def attention_u_fun(self):
assert self.gate.use_attention
if self._attention_u_fun is None:
attended = T.tensor3('attended')
self._attention_u_fun = function(
[attended], self.gate.attention_u(attended),
name='%s_attention_u_fun'%self.name)
return self._attention_u_fun
def search(self, predict_fun, embeddings,
start_symbol, stop_symbol, max_length,
h_0=None, c_0=None, attended=None, attention_mask=None,
beam_size=4):
if self.gate.use_attention:
attended_dot_u = self.attention_u_fun()(attended)
if self.trainable_initial:
if h_0 is None:
h_0 = self._h_0.get_value()[None,:]
if c_0 is None:
c_0 = self._c_0.get_value()[None,:]
def step(i, states, outputs, outputs_mask):
if self.gate.use_attention:
result = self.step_fun()(
embeddings[outputs[-1]], states[0], states[1],
attended, attended_dot_u, attention_mask)
else:
result = self.step_fun()(
embeddings[outputs[-1]], states[0], states[1])
h_t, c_t = result[:2]
return [h_t, c_t], predict_fun(h_t)
return search.beam(
step, [h_0, c_0], h_0.shape[0], start_symbol, stop_symbol,
max_length, beam_size=beam_size)
def __call__(self, inputs, inputs_mask, h_0=None, c_0=None,
attended=None, attention_mask=None):
if self.trainable_initial:
batch_size = inputs.shape[1]
if h_0 is None:
h_0 = expand_to_batch(self._h_0, batch_size)
if c_0 is None:
c_0 = expand_to_batch(self._c_0, batch_size)
attention_info = []
if self.gate.use_attention:
attention_info = [attended, self.gate.attention_u(attended),
attention_mask]
dropout_masks = [self.dropout.mask(h_0.shape)]
seqs, _ = theano.scan(
fn=self.step,
go_backwards=self.backwards,
sequences=[{'input': inputs, 'taps': [self.offset]},
{'input': inputs_mask, 'taps': [self.offset]}],
outputs_info=[h_0, c_0] + \
[None]*(1 if self.gate.use_attention else 0),
non_sequences=dropout_masks + attention_info + \
self.gate.parameters_list())
if self.backwards:
return tuple(seq[::-1] for seq in seqs)
else:
return seqs
class Sequence(Model):
def __init__(self, name, gate_type, backwards, *args,
dropout=0, trainable_initial=False, offset=0, **kwargs):
super().__init__(name)
self.backwards = backwards
self.trainable_initial = trainable_initial
self.offset = offset
self._step_fun = None
self._attention_u_fun = None
self.add(Dropout('dropout', dropout))
self.add(gate_type('gate', *args, **kwargs))
if self.trainable_initial:
for state in range(self.gate.n_states):
self.param('state_%d_0' % state, (self.gate.state_dims,),
init_f=init.Gaussian(fan_in=self.gate.state_dims))
def step(self, inputs, inputs_mask, *args):
states_tm1 = args[:self.gate.n_states]
h_mask = args[self.gate.n_states]
non_sequences = args[self.gate.n_states+1:]
# TODO: currently assume that dropout is applied only to states[0]
# through h_mask (which is passed through non_sequences and
# constant at each time step)
if self.gate.use_attention:
# attended is the
# src_sequence_length x batch_size x attention_dims
# matrix which we have attention on.
#
# attended_dot_u is the h_t-independent part of the final
# attention vectors, which is precomputed for efficiency.
#
# attention_mask is a binary mask over the valid elements of
# attended, which in practice is the same as the mask passed to
# the encoder that created attended. Size
# src_sequence_length x batch_size
states_attention = self.gate(
inputs,
*((states_tm1[0] * h_mask.astype(theano.config.floatX),) +
states_tm1[1:]),
attended=non_sequences[0],
attended_dot_u=non_sequences[1],
attention_mask=non_sequences[2])
states_t = states_attention[:-1]
attention = states_attention[-1]
return tuple(T.switch(inputs_mask.dimshuffle(0, 'x'), s_t, s_tm1)
for s_t, s_tm1 in zip(states_t, states_tm1)
) + (attention,)
else:
states_t = self.gate(
inputs,
*((states_tm1[0] * h_mask.astype(theano.config.floatX),) +
states_tm1[1:]))
return tuple(T.switch(inputs_mask.dimshuffle(0, 'x'), s_t, s_tm1)
for s_t, s_tm1 in zip(states_t, states_tm1))
def step_fun(self):
if self._step_fun is None:
inputs = T.matrix('inputs')
states_tm1 = [T.matrix('state_%d_tm1' % state)
for state in range(self.gate.n_states)]
if self.gate.use_attention:
attended=T.tensor3('attended')
attended_dot_u=T.tensor3('attended_dot_u')
attention_mask=T.matrix('attention_mask')
self._step_fun = function(
[inputs] + states_tm1 + [
attended, attended_dot_u, attention_mask],
self.step(*([inputs, T.ones(inputs.shape[:-1])] +
states_tm1 + [T.ones_like(states_tm1[0]),
attended, attended_dot_u,
attention_mask])),
name='%s_step_fun'%self.name)
else:
self._step_fun = function(
[inputs] + states_tm1,
self.step(*([inputs, T.ones(inputs.shape[:-1])] +
states_tm1 + [T.ones_like(states_tm1[0])])),
name='%s_step_fun'%self.name)
return self._step_fun
def attention_u_fun(self):
assert self.gate.use_attention
if self._attention_u_fun is None:
attended = T.tensor3('attended')
self._attention_u_fun = function(
[attended], self.gate.attention_u(attended),
name='%s_attention_u_fun'%self.name)
return self._attention_u_fun
def search(self, predict_fun, embeddings,
start_symbol, stop_symbol, max_length,
states_0=None, attended=None, attention_mask=None,
fixed=None,
beam_size=4):
if self.gate.use_attention:
attended_dot_u = self.attention_u_fun()(attended)
if self.trainable_initial:
if states_0 is None:
states_0 = [
getattr(self, '_state_%d_0' % state).get_value()[None,:]
for state in range(self.gate.n_states)]
def step(i, states, outputs, outputs_mask):
inputs = embeddings[outputs[-1]]
# TODO: is this the best way to add extra arguments?
if fixed is not None:
inputs = np.concatenate(
[inputs, fixed[None,:].repeat(0, axis=-1)],
axis=-1)
if self.gate.use_attention:
result = self.step_fun()(
*([inputs] + states + [
attended, attended_dot_u, attention_mask]))
else:
result = self.step_fun()(
*([inputs] + states))
states = result[:self.gate.n_states]
# NOTE: state[0] hard-coded
return states, predict_fun(states[0])
return search.beam(
step, states_0, states_0[0].shape[0],
start_symbol, stop_symbol,
max_length, beam_size=beam_size)
def __call__(self, inputs, inputs_mask, states_0=None,
attended=None, attention_mask=None):
if self.trainable_initial:
batch_size = inputs.shape[1]
if states_0 is None:
states_0 = [
expand_to_batch(getattr(self, '_state_%d_0' % state),
batch_size)
for state in range(self.gate.n_states)]
attention_info = []
if self.gate.use_attention:
attention_info = [attended, self.gate.attention_u(attended),
attention_mask]
dropout_masks = [self.dropout.mask(states_0[0].shape)]
seqs, _ = theano.scan(
fn=self.step,
go_backwards=self.backwards,
sequences=[{'input': inputs, 'taps': [self.offset]},
{'input': inputs_mask, 'taps': [self.offset]}],
outputs_info=list(states_0) + \
[None]*(1 if self.gate.use_attention else 0),
non_sequences=dropout_masks + attention_info + \
self.gate.parameters_list())
if self.backwards:
return tuple(seq[::-1] for seq in seqs)
else:
return seqs
# TODO: need to re-think how to handle attention in stacked models
class StackedSequence(Model):
def __init__(self, name, gate_type, backwards, n_layers,
input_dims, state_dims, *args,
dropout=0, trainable_initial=False, offset=0,
use_attention=False,
layer_fixed_size=None, **kwargs):
super().__init__(name)
self.backwards = backwards
self.trainable_initial = trainable_initial
self.offset = offset
self.n_layers = n_layers
self.layer_fixed_size = layer_fixed_size
self._step_fun = None
self._attention_u_fun = None
self.add(Dropout('dropout', dropout))
self.gates = []
for layer in range(n_layers):
total_input_dims = state_dims
if layer == 0:
total_input_dims += input_dims
if layer_fixed_size is not None:
total_input_dims += layer_fixed_size[layer]
gate = gate_type(
'gate%d' % layer,
total_input_dims,
state_dims,
*args,
**kwargs)
self.add(gate)
self.gates.append(gate)
if self.trainable_initial:
for state in range(self.gate0.n_states):
self.param('state_%d_%d_0' % (layer, state),
(self.gate0.state_dims,),
init_f=init.Gaussian(
fan_in=self.gate0.state_dims))
def step(self, inputs, inputs_mask, *args):
total_states = self.gate0.n_states*self.n_layers
layer_states_tm1 = [
args[layer*self.gate0.n_states:(layer+1)*self.gate0.n_states]
for layer in range(self.n_layers)]
n = total_states
h_mask = args[n]
n += 1
layer_fixed = None
if self.layer_fixed_size is not None:
layer_fixed = args[n:n+self.n_layers+1]
n += self.n_layers+1
non_sequences = args[n:]
layer_states_t = []
#states_tm1 = args[:self.gate.n_states]
#h_mask = args[self.gate.n_states]
#non_sequences = args[self.gate.n_states+1:]
# TODO: currently assume that dropout is applied only to states[0]
# through h_mask (which is passed through non_sequences and
# constant at each time step)
if self.gates[-1].use_attention:
raise NotImplementedError('Stacked RNN with attention')
# attended is the
# src_sequence_length x batch_size x attention_dims
# matrix which we have attention on.
#
# attended_dot_u is the h_t-independent part of the final
# attention vectors, which is precomputed for efficiency.
#
# attention_mask is a binary mask over the valid elements of
# attended, which in practice is the same as the mask passed to
# the encoder that created attended. Size
# src_sequence_length x batch_size
states_attention = self.gate(
inputs,
*((states_tm1[0] * h_mask.astype(theano.config.floatX),) +
states_tm1[1:]),
attended=non_sequences[0],
attended_dot_u=non_sequences[1],
attention_mask=non_sequences[2])
states_t = states_attention[:-1]
attention = states_attention[-1]
return tuple(T.switch(inputs_mask.dimshuffle(0, 'x'), s_t, s_tm1)
for s_t, s_tm1 in zip(states_t, states_tm1)
) + (attention,)
else:
for layer in range(self.n_layers):
states_tm1 = layer_states_tm1[layer]
total_inputs = inputs if layer == 0 else layer_states_t[-1][0]
if layer_fixed is not None:
total_inputs = T.concatenate(
[total_inputs, layer_fixed[layer].repeat(
inputs.shape[0], axis=0)],
axis=-1)
states_t = getattr(self, 'gate%d' % layer)(
total_inputs,
*((states_tm1[0] * h_mask.astype(theano.config.floatX),) +
states_tm1[1:]))
layer_states_t.append(states_t)
return tuple(
T.switch(inputs_mask.dimshuffle(0, 'x'), s_t, s_tm1)
for states_t, states_tm1 in zip(
layer_states_t,
layer_states_tm1)
for s_t, s_tm1 in zip(states_t, states_tm1))
#states_t = self.gate(
# inputs,
# *((states_tm1[0] * h_mask.astype(theano.config.floatX),) +
# states_tm1[1:]))
#return tuple(T.switch(inputs_mask.dimshuffle(0, 'x'), s_t, s_tm1)
# for s_t, s_tm1 in zip(states_t, states_tm1))
def step_fun(self):
if self._step_fun is None:
inputs = T.matrix('inputs')
states_tm1 = [T.matrix('state_%d_%d_tm1' % (layer, state))
for layer in range(self.n_layers)
for state in range(self.gate0.n_states)]
if self.gates[-1].use_attention:
raise NotImplementedError('Stacked RNN with attention')
attended=T.tensor3('attended')
attended_dot_u=T.tensor3('attended_dot_u')
attention_mask=T.matrix('attention_mask')
self._step_fun = function(
[inputs] + states_tm1 + [
attended, attended_dot_u, attention_mask],
self.step(*([inputs, T.ones(inputs.shape[:-1])] +
states_tm1 + [T.ones_like(states_tm1[0]),
attended, attended_dot_u,
attention_mask])),
name='%s_step_fun'%self.name)
else:
self._step_fun = function(
[inputs] + states_tm1,
self.step(*([inputs, T.ones(inputs.shape[:-1])] +
states_tm1 + [T.ones_like(states_tm1[0])])),
name='%s_step_fun'%self.name)
return self._step_fun
def attention_u_fun(self):
assert self.gates[-1].use_attention
if self._attention_u_fun is None:
attended = T.tensor3('attended')
self._attention_u_fun = function(
[attended], self.gates[-1].attention_u(attended),
name='%s_attention_u_fun'%self.name)
return self._attention_u_fun
def search(self, predict_fun, embeddings,
start_symbol, stop_symbol, max_length,
layer_states_0=None, attended=None, attention_mask=None,
layer_fixed=None,
beam_size=4):
if self.gates[-1].use_attention:
attended_dot_u = self.attention_u_fun()(attended)
if self.trainable_initial:
if layer_states_0 is None:
layer_states_0 = [
getattr(self, '_state_%d_%d_0' % state).get_value()[None,:]
for layer in range(self.n_layers)
for state in range(self.gate0.n_states)]
def step(i, states, outputs, outputs_mask):
inputs = embeddings[outputs[-1]]
# TODO: need to give sizes of fixed arguments ...
# TODO: is this the best way to add extra arguments?
if layer_fixed is not None and layer_fixed[0] is not None:
# TODO: wasn't this buggy anyway? Why repeat(0, ...) ?
inputs = np.concatenate(
[inputs, layer_fixed[0][None,:]],
axis=-1)
if self.gates[-1].use_attention:
raise NotImplementedError('Stacked RNN with attention')
result = self.step_fun()(
*([inputs] + states + [
attended, attended_dot_u, attention_mask]))
else:
result = self.step_fun()(
*([inputs] + states))
states = result[:self.n_layers*self.gate0.n_states]
# NOTE: state[0] of the last layer hard-coded
return states, predict_fun(
states[(self.n_layers-1)*self.gate0.n_states])
return search.beam(
step, layer_states_0, layer_states_0[0][0].shape[0],
start_symbol, stop_symbol,
max_length, beam_size=beam_size)
def __call__(self, inputs, inputs_mask, layer_states_0=None,
attended=None, attention_mask=None):
if self.trainable_initial:
batch_size = inputs.shape[1]
if layer_states_0 is None:
layer_states_0 = [
expand_to_batch(getattr(self, '_state_%d_%d_0' % (
layer, state)),
batch_size)
for layer in range(self.n_layers)
for state in range(self.gate0.n_states)]
attention_info = []
if self.gates[-1].use_attention:
attention_info = [attended, self.gates[-1].attention_u(attended),
attention_mask]
dropout_masks = [self.dropout.mask(layer_states_0[0].shape)]
seqs, _ = theano.scan(
fn=self.step,
go_backwards=self.backwards,
sequences=[{'input': inputs, 'taps': [self.offset]},
{'input': inputs_mask, 'taps': [self.offset]}],
outputs_info=list(layer_states_0) + \
[None]*(1 if self.gate0.use_attention else 0),
non_sequences=dropout_masks + attention_info + \
sum([gate.parameters_list()
for gate in self.gates], []))
if self.backwards:
return tuple(seq[::-1] for seq in seqs)
else:
return seqs
class Dropout(Model):
"""Dropout layer.
name : str
Name of layer.
dropout : float
Dropout factor (equivalent to 1 - retention probability)
sequence : bool
If True, dropout is not performed on the last dimension. This is
useful for e.g. embedded symbol sequences, where either a symbol is
kept intact or it is completely zeroed out.
"""
def __init__(self, name, dropout, sequence=False):
super().__init__(name)
self.p = 1.0 - dropout
self.rng = RandomStreams()
self.sequence = sequence
def mask(self, shape):
"""Return a scaled mask for a (symbolic) shape.
This can be used for dropout in recurrent layers, where a fixed mask
is passed through the non_sequences argument to theano.scan().
"""
if self.p == 1: return T.ones(shape)
if self.sequence:
m = T.shape_padright(self.rng.binomial(shape[:-1], p=self.p)
).astype(theano.config.floatX)
else:
m = self.rng.binomial(shape, p=self.p).astype(theano.config.floatX)
return m / self.p
def __call__(self, inputs):
if self.p == 1: return inputs
m = self.mask(inputs.shape)
return ifelse(train_mode, inputs * m, inputs)
class LayerNormalization(Model):
"""Layer Normalization (Ba, Kiros and Hinton 2016)."""
def __init__(self, name, inputs_shape, g_init=None, axis=-1, epsilon=1e-6):
super().__init__(name)
self.inputs_shape = inputs_shape
self.axis = axis
self.epsilon = epsilon
if g_init is None: g_init = init.Constant(1.0)
self.param('g', (inputs_shape[self.axis],), init_f=g_init)
def __call__(self, inputs):
broadcast = ['x']*len(self.inputs_shape)
broadcast[self.axis] = 0
mean = inputs.mean(axis=self.axis, keepdims=True).astype(
theano.config.floatX)
std = inputs.std(axis=self.axis, keepdims=True).astype(
theano.config.floatX)
normed = (inputs - mean) / (std + self.epsilon)
return normed * self._g.dimshuffle(*broadcast)
class LinearSelection(Model):
def __init__(self, name, input_dims, output_dims, selector_dims,
parallel_dims,
w=None, w_init=None, w_regularizer=None,
b=None, b_init=None, b_regularizer=None,
sw=None, sw_init=None,
sb=None, sb_init=None,
input_select=False,
use_bias=True, dropout=0, layernorm=False):
super().__init__(name)
self.input_dims = input_dims
self.output_dims = output_dims
self.selector_dims = selector_dims
self.parallel_dims = parallel_dims
self.use_bias = use_bias
self.dropout = dropout
self.layernorm = layernorm
self.input_select = input_select
s_dims = selector_dims + (input_dims if input_select else 0)
if w_init is None: w_init = init.Gaussian(fan_in=input_dims)
if b_init is None: b_init = init.Constant(0.0)
if sw_init is None: sw_init = init.Gaussian(fan_in=s_dims)
if sb_init is None: sb_init = init.Constant(0.0)
self.param('w', (input_dims, output_dims*parallel_dims),
init_f=w_init, value=w)
self.regularize(self._w, w_regularizer)
if use_bias:
self.param('b', (output_dims*parallel_dims,),
init_f=b_init, value=b)
self.regularize(self._b, b_regularizer)
self.param('sw', (s_dims, output_dims*parallel_dims),
init_f=sw_init)
self.param('sb', (output_dims*parallel_dims,),
init_f=sb_init)
if dropout:
self.add(Dropout('dropout', dropout))
if layernorm:
self.add(LayerNormalization('ln', (None, output_dims)))
def __call__(self, inputs, selector, sequence=False):
par = T.dot(inputs, self._w)
if self.use_bias: par = par + self._b
if sequence:
par = par.reshape((par.shape[0], par.shape[1],
self.output_dims, self.parallel_dims))
else:
par = par.reshape((par.shape[0],
self.output_dims, self.parallel_dims))
# Note that par might be a 3D or 4D tensor, while sel is always 3D
if self.input_select and sequence:
# ...except if we condition on the input
selector = T.concatenate([
inputs,
T.repeat(selector.dimshuffle('x',0,1), inputs.shape[0],
axis=0)],
axis=-1)
sel = T.dot(selector, self._sw) + self._sb
sel = sel.reshape(
(sel.shape[0], sel.shape[1],
self.output_dims, self.parallel_dims))
sel = softmax_4d(sel)
outputs = (par * sel).sum(axis=-1)
else:
if self.input_select:
selector = T.concatenate([inputs, selector], axis=-1)
sel = T.dot(selector, self._sw) + self._sb
sel = sel.reshape(
(sel.shape[0], self.output_dims, self.parallel_dims))
sel = softmax_3d(sel)
if sequence:
outputs = (par * sel.dimshuffle('x',0,1,2)).sum(axis=-1)
else:
outputs = (par * sel).sum(axis=-1)
if self.layernorm: outputs = self.ln(outputs)
if self.dropout: outputs = self.dropout(outputs)
return outputs
| [
"theano.tensor.tanh",
"theano.ifelse.ifelse",
"theano.tensor.as_tensor_variable",
"theano.tensor.tensor3",
"theano.tensor.concatenate",
"theano.tensor.ones_like",
"numpy.abs",
"theano.tensor.dot",
"theano.sandbox.rng_mrg.MRG_RandomStreams",
"pickle.load",
"theano.tensor.ones",
"collections.Ord... | [((1561, 1574), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1572, 1574), False, 'from collections import OrderedDict\n'), ((1633, 1646), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1644, 1646), False, 'from collections import OrderedDict\n'), ((8480, 8494), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8491, 8494), False, 'import pickle\n'), ((12112, 12134), 'theano.tensor.dot', 'T.dot', (['inputs', 'self._w'], {}), '(inputs, self._w)\n', (12117, 12134), True, 'from theano import tensor as T\n'), ((45715, 45730), 'theano.sandbox.rng_mrg.MRG_RandomStreams', 'RandomStreams', ([], {}), '()\n', (45728, 45730), True, 'from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n'), ((46446, 46484), 'theano.ifelse.ifelse', 'ifelse', (['train_mode', '(inputs * m)', 'inputs'], {}), '(train_mode, inputs * m, inputs)\n', (46452, 46484), False, 'from theano.ifelse import ifelse\n'), ((49190, 49212), 'theano.tensor.dot', 'T.dot', (['inputs', 'self._w'], {}), '(inputs, self._w)\n', (49195, 49212), True, 'from theano import tensor as T\n'), ((1877, 1902), 'theano.tensor.as_tensor_variable', 'T.as_tensor_variable', (['(0.0)'], {}), '(0.0)\n', (1897, 1902), True, 'from theano import tensor as T\n'), ((19538, 19569), 'theano.tensor.dot', 'T.dot', (['h_tm1', 'self._attention_w'], {}), '(h_tm1, self._attention_w)\n', (19543, 19569), True, 'from theano import tensor as T\n'), ((23998, 24016), 'theano.tensor.matrix', 'T.matrix', (['"""inputs"""'], {}), "('inputs')\n", (24006, 24016), True, 'from theano import tensor as T\n'), ((24037, 24054), 'theano.tensor.matrix', 'T.matrix', (['"""h_tm1"""'], {}), "('h_tm1')\n", (24045, 24054), True, 'from theano import tensor as T\n'), ((24075, 24092), 'theano.tensor.matrix', 'T.matrix', (['"""c_tm1"""'], {}), "('c_tm1')\n", (24083, 24092), True, 'from theano import tensor as T\n'), ((25193, 25214), 'theano.tensor.tensor3', 'T.tensor3', (['"""attended"""'], {}), "('attended')\n", (25202, 25214), True, 'from theano import tensor as T\n'), ((30511, 30529), 'theano.tensor.matrix', 'T.matrix', (['"""inputs"""'], {}), "('inputs')\n", (30519, 30529), True, 'from theano import tensor as T\n'), ((31827, 31848), 'theano.tensor.tensor3', 'T.tensor3', (['"""attended"""'], {}), "('attended')\n", (31836, 31848), True, 'from theano import tensor as T\n'), ((40022, 40040), 'theano.tensor.matrix', 'T.matrix', (['"""inputs"""'], {}), "('inputs')\n", (40030, 40040), True, 'from theano import tensor as T\n'), ((41493, 41514), 'theano.tensor.tensor3', 'T.tensor3', (['"""attended"""'], {}), "('attended')\n", (41502, 41514), True, 'from theano import tensor as T\n'), ((46040, 46053), 'theano.tensor.ones', 'T.ones', (['shape'], {}), '(shape)\n', (46046, 46053), True, 'from theano import tensor as T\n'), ((20518, 20561), 'theano.tensor.concatenate', 'T.concatenate', (['[inputs, compressed]'], {'axis': '(1)'}), '([inputs, compressed], axis=1)\n', (20531, 20561), True, 'from theano import tensor as T\n'), ((20728, 20756), 'theano.tensor.dot', 'T.dot', (['compressed', 'self._czs'], {}), '(compressed, self._czs)\n', (20733, 20756), True, 'from theano import tensor as T\n'), ((21226, 21248), 'theano.tensor.dot', 'T.dot', (['inputs', 'self._w'], {}), '(inputs, self._w)\n', (21231, 21248), True, 'from theano import tensor as T\n'), ((21251, 21272), 'theano.tensor.dot', 'T.dot', (['h_tm1', 'self._u'], {}), '(h_tm1, self._u)\n', (21256, 21272), True, 'from theano import tensor as T\n'), ((24158, 24179), 'theano.tensor.tensor3', 'T.tensor3', (['"""attended"""'], {}), "('attended')\n", (24167, 24179), True, 'from theano import tensor as T\n'), ((24211, 24238), 'theano.tensor.tensor3', 'T.tensor3', (['"""attended_dot_u"""'], {}), "('attended_dot_u')\n", (24220, 24238), True, 'from theano import tensor as T\n'), ((24270, 24296), 'theano.tensor.matrix', 'T.matrix', (['"""attention_mask"""'], {}), "('attention_mask')\n", (24278, 24296), True, 'from theano import tensor as T\n'), ((30556, 30588), 'theano.tensor.matrix', 'T.matrix', (["('state_%d_tm1' % state)"], {}), "('state_%d_tm1' % state)\n", (30564, 30588), True, 'from theano import tensor as T\n'), ((30720, 30741), 'theano.tensor.tensor3', 'T.tensor3', (['"""attended"""'], {}), "('attended')\n", (30729, 30741), True, 'from theano import tensor as T\n'), ((30773, 30800), 'theano.tensor.tensor3', 'T.tensor3', (['"""attended_dot_u"""'], {}), "('attended_dot_u')\n", (30782, 30800), True, 'from theano import tensor as T\n'), ((30832, 30858), 'theano.tensor.matrix', 'T.matrix', (['"""attention_mask"""'], {}), "('attention_mask')\n", (30840, 30858), True, 'from theano import tensor as T\n'), ((40067, 40111), 'theano.tensor.matrix', 'T.matrix', (["('state_%d_%d_tm1' % (layer, state))"], {}), "('state_%d_%d_tm1' % (layer, state))\n", (40075, 40111), True, 'from theano import tensor as T\n'), ((40381, 40402), 'theano.tensor.tensor3', 'T.tensor3', (['"""attended"""'], {}), "('attended')\n", (40390, 40402), True, 'from theano import tensor as T\n'), ((40434, 40461), 'theano.tensor.tensor3', 'T.tensor3', (['"""attended_dot_u"""'], {}), "('attended_dot_u')\n", (40443, 40461), True, 'from theano import tensor as T\n'), ((40493, 40519), 'theano.tensor.matrix', 'T.matrix', (['"""attention_mask"""'], {}), "('attention_mask')\n", (40501, 40519), True, 'from theano import tensor as T\n'), ((42759, 42817), 'numpy.concatenate', 'np.concatenate', (['[inputs, layer_fixed[0][None, :]]'], {'axis': '(-1)'}), '([inputs, layer_fixed[0][None, :]], axis=-1)\n', (42773, 42817), True, 'import numpy as np\n'), ((49935, 49960), 'theano.tensor.dot', 'T.dot', (['selector', 'self._sw'], {}), '(selector, self._sw)\n', (49940, 49960), True, 'from theano import tensor as T\n'), ((50268, 50310), 'theano.tensor.concatenate', 'T.concatenate', (['[inputs, selector]'], {'axis': '(-1)'}), '([inputs, selector], axis=-1)\n', (50281, 50310), True, 'from theano import tensor as T\n'), ((50329, 50354), 'theano.tensor.dot', 'T.dot', (['selector', 'self._sw'], {}), '(selector, self._sw)\n', (50334, 50354), True, 'from theano import tensor as T\n'), ((21119, 21141), 'theano.tensor.dot', 'T.dot', (['inputs', 'self._w'], {}), '(inputs, self._w)\n', (21124, 21141), True, 'from theano import tensor as T\n'), ((21172, 21193), 'theano.tensor.dot', 'T.dot', (['h_tm1', 'self._u'], {}), '(h_tm1, self._u)\n', (21177, 21193), True, 'from theano import tensor as T\n'), ((20602, 20626), 'theano.tensor.dot', 'T.dot', (['inputs', 'self._wzg'], {}), '(inputs, self._wzg)\n', (20607, 20626), True, 'from theano import tensor as T\n'), ((20629, 20652), 'theano.tensor.dot', 'T.dot', (['h_tm1', 'self._uzg'], {}), '(h_tm1, self._uzg)\n', (20634, 20652), True, 'from theano import tensor as T\n'), ((24500, 24525), 'theano.tensor.ones', 'T.ones', (['inputs.shape[:-1]'], {}), '(inputs.shape[:-1])\n', (24506, 24525), True, 'from theano import tensor as T\n'), ((24575, 24593), 'theano.tensor.ones_like', 'T.ones_like', (['h_tm1'], {}), '(h_tm1)\n', (24586, 24593), True, 'from theano import tensor as T\n'), ((24877, 24902), 'theano.tensor.ones', 'T.ones', (['inputs.shape[:-1]'], {}), '(inputs.shape[:-1])\n', (24883, 24902), True, 'from theano import tensor as T\n'), ((24952, 24970), 'theano.tensor.ones_like', 'T.ones_like', (['h_tm1'], {}), '(h_tm1)\n', (24963, 24970), True, 'from theano import tensor as T\n'), ((20055, 20087), 'theano.tensor.tanh', 'T.tanh', (['(attended_dot_u + h_dot_w)'], {}), '(attended_dot_u + h_dot_w)\n', (20061, 20087), True, 'from theano import tensor as T\n'), ((2879, 2888), 'numpy.abs', 'np.abs', (['m'], {}), '(m)\n', (2885, 2888), True, 'import numpy as np\n'), ((2928, 2937), 'numpy.abs', 'np.abs', (['m'], {}), '(m)\n', (2934, 2937), True, 'import numpy as np\n'), ((31147, 31173), 'theano.tensor.ones_like', 'T.ones_like', (['states_tm1[0]'], {}), '(states_tm1[0])\n', (31158, 31173), True, 'from theano import tensor as T\n'), ((31576, 31602), 'theano.tensor.ones_like', 'T.ones_like', (['states_tm1[0]'], {}), '(states_tm1[0])\n', (31587, 31602), True, 'from theano import tensor as T\n'), ((40808, 40834), 'theano.tensor.ones_like', 'T.ones_like', (['states_tm1[0]'], {}), '(states_tm1[0])\n', (40819, 40834), True, 'from theano import tensor as T\n'), ((41237, 41263), 'theano.tensor.ones_like', 'T.ones_like', (['states_tm1[0]'], {}), '(states_tm1[0])\n', (41248, 41263), True, 'from theano import tensor as T\n'), ((31068, 31093), 'theano.tensor.ones', 'T.ones', (['inputs.shape[:-1]'], {}), '(inputs.shape[:-1])\n', (31074, 31093), True, 'from theano import tensor as T\n'), ((31499, 31524), 'theano.tensor.ones', 'T.ones', (['inputs.shape[:-1]'], {}), '(inputs.shape[:-1])\n', (31505, 31524), True, 'from theano import tensor as T\n'), ((40729, 40754), 'theano.tensor.ones', 'T.ones', (['inputs.shape[:-1]'], {}), '(inputs.shape[:-1])\n', (40735, 40754), True, 'from theano import tensor as T\n'), ((41160, 41185), 'theano.tensor.ones', 'T.ones', (['inputs.shape[:-1]'], {}), '(inputs.shape[:-1])\n', (41166, 41185), True, 'from theano import tensor as T\n')] |
# -*- coding: utf-8 -*-
displayFull = False
import numpy as np
from pandas import read_csv as importDB
import pandas as pd
database = r'\\UBSPROD.MSAD.UBS.NET\UserData\ozsanos\RF\Desktop\Black\stockData.csv'
tickers = ['AAPL','ADBE','ADI','AMD','AXP','BRCM','C','GLD','GOOG','GS','HNZ','HPQ','IBM','MSFT','TXN','XOM']
dateRange = [("2010-01-01","2010-12-31"),("2011-01-01","2011-12-31")]
# dateRange = pd.date_range(startDate, endDate)
'''
Pre-weightings permutations
'''
schemes = []
points = range(0, 11, 1)
for i in points:
for j in points:
for k in points:
z = i + j + k
if z <= 10:
schemes.append((round(i/10.0,1), round(j/10.0,1), round(k/10.0,1), round(1.0 - z/10.0,1)))
schemes = tuple(schemes)
'''
*** Code Body ***
'''
def getData(startDate, endDate, symbolSet):
return importDB(database, usecols = ['Close'] + symbolSet, index_col = 'Close').loc[startDate : endDate]
def simulate(startDate, endDate, symbolSet, weights):
marketData = getData(startDate, endDate, symbolSet).values
days = len(marketData)
portfolio = np.zeros(days)
returns = portfolio.copy()
for e in range(len(marketData[0])):
marketData[:,e] = weights[e] * marketData[:,e] / marketData[0,e]
portfolio += marketData[:,e]
for e in range(days):
if e > 0: returns[e] = (portfolio[e]/portfolio[e-1]) - 1
meanDailyReturn = np.average(returns)
stdDailyReturn = np.std(returns)
cummDailyReturn = portfolio[-1]
SharpeRatio = (days**0.5) * (meanDailyReturn / stdDailyReturn)
return [round(SharpeRatio,6), round(meanDailyReturn,6), round(stdDailyReturn,6), round(cummDailyReturn,6)]
def optimise(symbolSet, dateFlag):
print('\n+ - + - +')
maxSharpe = 0.0
metrics = []
for e in schemes:
s = simulate(dateRange[dateFlag][0], dateRange[dateFlag][1], symbolSet, e)
if displayFull:
print(s[0]),
print(e)
if s[0] > maxSharpe:
maxSharpe = s[0]
metrics = [s, e]
print("\nPortfolio:")
print(tuple(symbolSet))
print("\nOptimal Weights:")
print(metrics[1])
print("\nPerformance Metrics:")
print(tuple(metrics[0]))
print('\n+ - + - +')
| [
"numpy.std",
"numpy.average",
"numpy.zeros",
"pandas.read_csv"
] | [((1113, 1127), 'numpy.zeros', 'np.zeros', (['days'], {}), '(days)\n', (1121, 1127), True, 'import numpy as np\n'), ((1422, 1441), 'numpy.average', 'np.average', (['returns'], {}), '(returns)\n', (1432, 1441), True, 'import numpy as np\n'), ((1463, 1478), 'numpy.std', 'np.std', (['returns'], {}), '(returns)\n', (1469, 1478), True, 'import numpy as np\n'), ((850, 918), 'pandas.read_csv', 'importDB', (['database'], {'usecols': "(['Close'] + symbolSet)", 'index_col': '"""Close"""'}), "(database, usecols=['Close'] + symbolSet, index_col='Close')\n", (858, 918), True, 'from pandas import read_csv as importDB\n')] |
# coding=utf-8
# Copyright 2019 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Writes prediction to a csv file."""
import collections
import copy
import csv
from typing import Iterable, Mapping, Text, Tuple
import numpy as np
import tensorflow as tf
from absl import logging
from tapas.utils import text_utils
def _get_question_id(features):
"""Restores question id from int sequence."""
if "question_id_ints" in features:
question_id = text_utils.ints_to_str(
features["question_id_ints"].numpy()[0])
if question_id:
return question_id
# TODO Remove once the data has been updated.
return features["question_id"][0,0].numpy().decode("utf-8")
def get_cell_token_probs(prediction):
probabilities = prediction["probabilities"][0].numpy()
for i, p in enumerate(probabilities):
segment_id = prediction["segment_ids"][0][i].numpy()
col = prediction["column_ids"][0][i].numpy() - 1
row = prediction["row_ids"][0][i].numpy() - 1
if col >= 0 and row >= 0 and segment_id == 1:
yield i, p
def get_mean_cell_probs(prediction):
"""Computes average probability per cell, aggregating over tokens."""
coords_to_probs = collections.defaultdict(list)
for i, prob in get_cell_token_probs(prediction):
col = prediction["column_ids"][0][i].numpy() - 1
row = prediction["row_ids"][0][i].numpy() - 1
coords_to_probs[(col, row)].append(prob)
return {
coords: np.array(cell_probs).mean()
for coords, cell_probs in coords_to_probs.items()
}
def get_answer_indexes(
prediction,
cell_classification_threshold,
):
"""Computes answer indexes."""
input_ids = prediction["input_ids"][0].numpy()
span_indexes = prediction.get("span_indexes")
span_logits = prediction.get("span_logits")
if span_indexes is not None and span_logits is not None:
best_logit, best_span = max(zip(span_logits, span_indexes.tolist()),)
logging.log_every_n(
logging.INFO,
"best_span: %s, score: %s",
500,
best_span,
best_logit,
)
return [input_ids[i] for i in range(best_span[0], best_span[1] + 1)]
answers = []
for i, prob in get_cell_token_probs(prediction):
if prob > cell_classification_threshold:
answers.append(input_ids[i])
return answers
def get_predictions(
predictions,
do_model_aggregation,
do_model_classification,
cell_classification_threshold,
):
"""Writes predictions to an output TSV file.
Predictions header: [id, annotator, position, answer_coordinates, gold_aggr,
pred_aggr]
Args:
predictions: model predictions
do_model_aggregation: Indicates whther to write predicted aggregations.
do_model_classification: Indicates whther to write predicted classes.
cell_classification_threshold: Threshold for selecting a cell.
"""
results = []
header = [
"question_id",
"id",
"annotator",
"position",
"answer_coordinates",
"answer",
]
if do_model_aggregation:
header.extend(["gold_aggr", "pred_aggr"])
if do_model_classification:
header.extend(["gold_cls", "pred_cls", "logits_cls"])
for prediction in predictions:
question_id = _get_question_id(prediction)
max_width = prediction["column_ids"][0].numpy().max()
max_height = prediction["row_ids"][0].numpy().max()
if (max_width == 0 and max_height == 0 and
question_id == text_utils.get_padded_question_id()):
logging.info("Removing padded example: %s", question_id)
continue
cell_coords_to_prob = get_mean_cell_probs(prediction)
answer_indexes = get_answer_indexes(
prediction,
cell_classification_threshold,
)
# Select the answers above a classification threshold.
answer_coordinates = []
answer_probablities = []
for col in range(max_width):
for row in range(max_height):
cell_prob = cell_coords_to_prob.get((col, row), None)
if cell_prob is not None:
if cell_prob > cell_classification_threshold:
answer_coordinates.append(str((row, col)))
answer_probablities.append(cell_prob)
try:
example_id, annotator, position = text_utils.parse_question_id(
question_id)
position = str(position)
except ValueError:
example_id = "_"
annotator = "_"
position = "_"
prediction_to_write = {
"question_id": question_id,
"id": example_id,
"annotator": annotator,
"position": position,
"answer_coordinates": str(answer_coordinates),
"answer": str(answer_indexes),
"answer_probablities": answer_probablities if len(answer_probablities) else [0.0]
}
if do_model_aggregation:
prediction_to_write["pred_aggr"] = str(prediction["pred_aggr"][0].numpy())
if do_model_classification:
prediction_to_write["pred_cls"] = str(prediction["pred_cls"])
prediction_to_write["logits_cls"] = str(
prediction["logits_cls"])
results.append(prediction_to_write)
return results | [
"tapas.utils.text_utils.parse_question_id",
"absl.logging.log_every_n",
"collections.defaultdict",
"absl.logging.info",
"numpy.array",
"tapas.utils.text_utils.get_padded_question_id"
] | [((1788, 1817), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (1811, 1817), False, 'import collections\n'), ((2564, 2657), 'absl.logging.log_every_n', 'logging.log_every_n', (['logging.INFO', '"""best_span: %s, score: %s"""', '(500)', 'best_span', 'best_logit'], {}), "(logging.INFO, 'best_span: %s, score: %s', 500,\n best_span, best_logit)\n", (2583, 2657), False, 'from absl import logging\n'), ((4219, 4275), 'absl.logging.info', 'logging.info', (['"""Removing padded example: %s"""', 'question_id'], {}), "('Removing padded example: %s', question_id)\n", (4231, 4275), False, 'from absl import logging\n'), ((5058, 5099), 'tapas.utils.text_utils.parse_question_id', 'text_utils.parse_question_id', (['question_id'], {}), '(question_id)\n', (5086, 5099), False, 'from tapas.utils import text_utils\n'), ((2060, 2080), 'numpy.array', 'np.array', (['cell_probs'], {}), '(cell_probs)\n', (2068, 2080), True, 'import numpy as np\n'), ((4169, 4204), 'tapas.utils.text_utils.get_padded_question_id', 'text_utils.get_padded_question_id', ([], {}), '()\n', (4202, 4204), False, 'from tapas.utils import text_utils\n')] |
from .general import *
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from easydict import EasyDict
from tqdm import tqdm_notebook
import shutil
import datetime
import pickle
from collections import Counter
import random
import subprocess
import yaml
import re
## File utilities
def ensure_folder(folder):
"""Make sure a folder exists."""
Path(folder).mkdir(exist_ok=True, parents=True)
def ensure_delete(folder_or_file):
anything = Path(folder_or_file)
if anything.is_dir() and not anything.is_symlink():
shutil.rmtree(str(folder_or_file))
elif anything.exists() or anything.is_symlink():
anything.unlink()
def copy_file(src, dst):
"""Copy source file to destination file."""
assert Path(src).is_file()
shutil.copy(str(src), str(dst))
def _copy_any(src, dst, symlinks):
if Path(src).is_dir():
if Path(dst).is_dir():
dst = Path(dst)/Path(src).name
assert not Path(dst).exists()
shutil.copytree(src, dst, symlinks=symlinks)
else:
copy_file(src, dst)
def copy_any(src, dst, symlinks=True):
"""Copy any file or folder recursively.
Source file can be list/array of files.
"""
do_list_item(_copy_any, src, dst, symlinks)
def do_list_item(func, src, *prms):
if is_array_like(src):
result = True
for element in src:
result = do_list_item(func, element, *prms) and result
return result
else:
return func(src, *prms)
def _move_file(src, dst):
shutil.move(str(src), str(dst))
def move_file(src, dst):
"""Move source file to destination file/folder.
Source file can be list/array of files.
"""
do_list_item(_move_file, src, dst)
def symlink_file(fromfile, tofile):
"""Make fromfile's symlink as tofile."""
Path(tofile).symlink_to(fromfile)
def make_copy_to(dest_folder, files, n_sample=None, operation=copy_file):
"""Do file copy like operation from files to dest_folder.
If n_sample is set, it creates symlinks up to number of n_sample files.
If n_sample is greater than len(files), symlinks are repeated twice or more until it reaches to n_sample.
If n_sample is less than len(files), n_sample symlinks are created for the top n_sample samples in files."""
dest_folder.mkdir(exist_ok=True, parents=True)
if n_sample is None:
n_sample = len(files)
_done = False
_dup = 0
_count = 0
while not _done: # yet
for f in files:
f = Path(f)
name = f.stem+('_%d'%_dup)+f.suffix if 0 < _dup else f.name
to_file = dest_folder / name
operation(f, to_file)
_count += 1
_done = n_sample <= _count
if _done: break
_dup += 1
print('Now', dest_folder, 'has', len(list(dest_folder.glob('*'))), 'files.')
def expand_path(path):
"""Performs `ls` like operation.
Lists contents in a folder if path is a folder.
Expands wildcard if path contains wildcard in its name part."""
path = Path(path)
d, n = path.parent, path.name
return list(Path(d).glob(n))
def _copy_with_prefix(file, dest_folder, prefix, symlinks):
assert file.is_file()
new_filename = prefix + file.name
if symlinks:
symlink_file(file, dest_folder/new_filename)
else:
copy_file(file, dest_folder/new_filename)
def copy_with_prefix(files, dest_folder, prefix, symlinks=False):
"""Copy all files to destination folder,
and new file names will have prefix+original_filename."""
if not Path(dest_folder).is_dir():
raise Exception(f'{dest_folder} has to be an existing folder.')
# ensure files as array-like object
files = files if is_array_like(files) else [files]
# expand wild card
files = [
f.absolute() for could_be_wild in files for f in expand_path(could_be_wild)
]
# test all files are actually file
for f in files:
if not f.is_file(): raise Exception(f'Error: {f} is not a file.')
# do it
do_list_item(_copy_with_prefix, files, dest_folder, prefix, symlinks)
def load_yaml_config(path_to_config, default):
"""Load yaml configuration, or create default.
Args:
path_to_config (str or Path): path to the config file.
default (dict): default values.
Returns:
config (EasyDict): read/default configuration key/values.
"""
try:
assert path_to_config.is_file()
with open(path_to_config) as f:
yaml_contents = yaml.safe_load(f)
except:
# cannot read, create one
yaml_contents = default
with open(path_to_config, 'w') as f:
f.write(yaml.dump(default))
print(f' {path_to_config} cannot be read correctly, then created with default contents. Pls edit.')
cfg = EasyDict(yaml_contents)
return cfg
def tgz_all(base_dir, files, dest_tgz_path=None, test=True, logger=None):
"""Make .tgz of file/folders relative from base folder.
This just does:
cd base_dir && tar czf dest_tgz_path files
mkdir /tmp/dest_tgz_path.stem && cd /tmp/dest_tgz_path.stem && tar xf dest_tgz_path.absolute()
cd base_dir && for f in files: diff /tmp/dest_tgz_path.stem/f f
"""
logger = get_logger() if logger is None else logger
if len(files) == 0: return None
if dest_tgz_path is None:
dest_tgz_path = base_dir/Path(files[0]).with_suffix('.tgz').name
# zip them
files = [str(f) for f in files]
commands = f'cd {base_dir} && tar czf {dest_tgz_path.absolute()} {" ".join(files)}'
ret, out = exec_commands(commands)
if ret != 0:
logger.error(f'Failed with commands: {commands}\n"{out}"')
return None
# test zip
if test:
test_folder = Path('/tmp')/('dummy_'+dest_tgz_path.stem)
ensure_delete(test_folder)
commands = f'mkdir {test_folder} && cd {test_folder} && tar xf {dest_tgz_path.absolute()}'
ret, out = exec_commands(commands)
if ret != 0:
logger.error(f'Test failed with commands: {commands}\n"{out}"\n* {dest_tgz_path} still exists.')
return None
failed = False
for f in files:
commands = f'cd {base_dir} && diff {test_folder/f} {f}'
ret, out = exec_commands(commands)
if ret != 0:
logger.error(f'Test failed: {commands}\n{out}\n* {dest_tgz_path} still exists.')
failed = True
ensure_delete(test_folder)
if failed:
return None
return dest_tgz_path
def chmod_tree_all(tree_root, mode=0o775):
"""Change permission for all the files or directories under the tree_root."""
for root, dirs, files in os.walk(tree_root):
for d in dirs:
os.chmod(os.path.join(root, d), mode)
for f in files:
os.chmod(os.path.join(root, f), mode)
def subsample_files_in_tree(root, filename_pattern, size):
"""
Sub-sample list of filenames under root folder.
This ensures to keep sample balance among folders.
Arguments:
root: Root folder to search files from.
filename_pattern: Wildcard pattern like: '*.png'.
size:
(0, 1): size to sub-sample; 0.5 for 50%.
1 or 1.: 100%.
integer > 1: Number of samples.
Returns:
List of sub-sampled files.
Note that number of files in a folder could be less than size,
if original number of files is less than size. No oversampling.
"""
files = []
folders = [f for f in root.glob('**') if f.is_dir()]
for folder in folders:
candidates = [str(f) for f in folder.glob(filename_pattern)]
n_sample = int(len(candidates) * size) if size < 1. else \
len(candidates) if int(size) == 1 else min(size, len(candidates))
if n_sample <= 0: continue
files.extend(random.sample(candidates, n_sample))
return files
def copy_subsampled_files(root, dest, wildcard, size, symlinks=False):
"""
Copy all files that match wildcard under root folder, to the dest folder.
Note that all files in the sub tree of root folder will be copied.
Latter found file among the same name files will survive,
all others will be overwritten.
Arguments:
root: Root source folder.
dest: destination folder.
wildcard: Wildcard to find files.
size: Size to subsample, see subsample_files_in_tree() for the detail.
symlinks: Keeps symbolic links or makes new copy. See shutil.copytree() for the detail.
"""
files = subsample_files_in_tree(root, wildcard, size=size)
ensure_folder(dest)
for f in files:
copy_any(Path(f).absolute(), dest, symlinks=symlinks)
def save_as_pkl_binary(obj, filename):
"""Save object as pickle binary file.
Thanks to https://stackoverflow.com/questions/19201290/how-to-save-a-dictionary-to-a-file/32216025
"""
with open(filename, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_pkl(filename):
"""Load pickle object from file."""
with open(filename, 'rb') as f:
return pickle.load(f)
def read_yaml(file_name, fix_none=True):
"""Read yaml file and set None if str is 'None'."""
def fix_dict_none(dict_):
"""Fix dict item 'None' as None"""
for k in dict_:
if isinstance(dict_[k], dict):
fix_dict_none(dict_[k])
elif isinstance(dict_[k], str) and dict_[k] == 'None':
dict_[k] = None
with open(file_name) as f:
yaml_data = yaml.safe_load(f)
if fix_none:
fix_dict_none(yaml_data)
return yaml_data
## Log utilities
import logging
_loggers = {}
def get_logger(name=None, level=logging.DEBUG, format=None, print=True, output_file=None):
"""One liner to get logger.
See test_log.py for example.
"""
name = name or __name__
if _loggers.get(name):
return _loggers.get(name)
else:
log = logging.getLogger(name)
formatter = logging.Formatter(format or '%(asctime)s %(name)s %(funcName)s [%(levelname)s]: %(message)s')
def add_handler(handler):
handler.setFormatter(formatter)
handler.setLevel(level)
log.addHandler(handler)
if print:
add_handler(logging.StreamHandler())
if output_file:
ensure_folder(Path(output_file).parent)
add_handler(logging.FileHandler(output_file))
log.setLevel(level)
log.propagate = False
_loggers[name] = log
return log
## Multi process utilities
def caller_func_name(level=2):
"""Return caller function name."""
return sys._getframe(level).f_code.co_name
def _file_mutex_filename(filename):
return filename or '/tmp/'+Path(caller_func_name(level=3)).stem+'.lock'
def lock_file_mutex(filename=None):
"""Lock file mutex (usually placed under /tmp).
Note that filename will be created based on caller function name.
"""
filename = _file_mutex_filename(filename)
with open(filename, 'w') as f:
f.write('locked at {}'.format(datetime.datetime.now()))
def release_file_mutex(filename=None):
"""Release file mutex."""
filename = _file_mutex_filename(filename)
ensure_delete(filename)
def is_file_mutex_locked(filename=None):
"""Check if file mutex is locked or not."""
filename = _file_mutex_filename(filename)
return Path(filename).exists()
def exec_commands(commands):
"""Execute commands with subprocess.Popen.
Returns:
Return code, console output as str.
"""
p = subprocess.Popen(commands, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
retval = p.wait()
return retval, p.stdout.read().decode() if p.stdout else ''
## Date utilities
def str_to_date(text):
if '/' in text:
temp_dt = datetime.datetime.strptime(text, '%Y/%m/%d')
else:
temp_dt = datetime.datetime.strptime(text, '%Y-%m-%d')
return datetime.date(temp_dt.year, temp_dt.month, temp_dt.day)
def get_week_start_end_dates(week_no:int, year=None) -> [datetime.datetime, datetime.datetime]:
"""Get start and end date of an ISO calendar week.
ISO week starts on Monday, and ends on Sunday.
Arguments:
week_no: ISO calendar week number
year: Year to calculate, None will set this year
Returns:
[start_date:datetime, end_date:datetime]
"""
if not year:
year, this_week, this_day = datetime.datetime.today().isocalendar()
start_date = datetime.datetime.strptime(f'{year}-W{week_no:02d}-1', "%G-W%V-%u").date()
end_date = datetime.datetime.strptime(f'{year}-W{week_no:02d}-7', "%G-W%V-%u").date()
return [start_date, end_date]
def get_this_week_no(date=None):
"""Get ISO calendar week no of given date.
If date is not given, get for today."""
if date is None:
date = datetime.date.today()
return date.isocalendar()[1]
def get_num_of_weeks(year):
"""Returns number of weeks in a given year.
Following wikipedia: _'The number of weeks in a given year is equal to the corresponding week number of 28 December.'_
"""
return get_this_week_no(date=datetime.date(year=year, month=12, day=28))
def daterange(start_date, end_date, inclusive=False):
"""Yield date from start_date until the day before end_date.
Note that end_date is NOT inclusive.
Thanks to https://stackoverflow.com/questions/1060279/iterating-through-a-range-of-dates-in-python
"""
days = int((end_date - start_date).days) + (1 if inclusive else 0)
for n in range(days):
yield start_date + datetime.timedelta(n)
def add_days_to_date(one_date, days, not_exceed='today'):
"""Add number of days to one_date that doesn't exceed not_exceed.
Arguments:
one_date: datetime.datetime date to add days.
days: Adding number of days, int.
not_exceed:
- datetime.datetime date if limiting resulting date,
- None if nothing to limit,
- 'today' if limiting to today.
Returns:
datetime.datetime date.
"""
added = one_date + datetime.timedelta(days)
not_exceed = datetime.datetime.today().date() if not_exceed == 'today' else not_exceed
if not_exceed is not None and not_exceed < added:
added = not_exceed
return added
## List utilities
def write_text_list(textfile, a_list):
"""Write list of str to a file with new lines."""
with open(textfile, 'w') as f:
f.write('\n'.join(a_list)+'\n')
def read_text_list(filename) -> list:
"""Read text file splitted as list of texts, stripped."""
with open(filename) as f:
lines = f.read().splitlines()
return [l.strip() for l in lines]
from itertools import chain
def flatten_list(lists):
return list(chain.from_iterable(lists))
def is_array_like(item):
"""Check if item is an array-like object."""
return isinstance(item, (list, set, tuple, np.ndarray))
def is_flat_array(array):
"""Check if array doesn't have array-like object."""
for item in array:
if is_array_like(item): return False
return True
# Thanks to https://stackoverflow.com/questions/3844801/check-if-all-elements-in-a-list-are-identical
def all_elements_are_identical(iterator):
"""Check all elements in iterable like list are identical."""
iterator = iter(iterator)
try:
first = next(iterator)
except StopIteration:
return True
return all(first == rest for rest in iterator)
## Text utilities
# Thanks to https://github.com/dsindex/blog/wiki/%5Bpython%5D-difflib,-show-differences-between-two-strings
import difflib
def show_text_diff(text, n_text):
"""
http://stackoverflow.com/a/788780
Unify operations between two compared strings seqm is a difflib.
SequenceMatcher instance whose a & b are strings
"""
seqm = difflib.SequenceMatcher(None, text, n_text)
output= []
for opcode, a0, a1, b0, b1 in seqm.get_opcodes():
if opcode == 'equal':
pass # output.append(seqm.a[a0:a1])
elif opcode == 'insert':
output.append("<INS>" + seqm.b[b0:b1] + "</INS>")
elif opcode == 'delete':
output.append("<DEL>" + seqm.a[a0:a1] + "</DEL>")
elif opcode == 'replace':
# seqm.a[a0:a1] -> seqm.b[b0:b1]
output.append("<REPL>" + seqm.b[b0:b1] + "</REPL>")
else:
raise RuntimeError
return ''.join(output)
import unicodedata
def unicode_visible_width(unistr):
"""Returns the number of printed characters in a Unicode string."""
return sum([1 if unicodedata.east_asian_width(char) in ['N', 'Na'] else 2 for char in unistr])
def int_from_text(text, default=0):
"""Extract leftmost int number found in given text."""
g = re.search(r'\d+', str(text))
return default if g is None else int(g.group(0))
## Pandas utilities
def df_to_csv_excel_friendly(df, filename, **args):
"""df.to_csv() to be excel friendly UTF-8 handling."""
df.to_csv(filename, encoding='utf_8_sig', **args)
def df_merge_update(df_list):
"""Merge data frames while update duplicated index with followed row.
Usages:
- df_merge_update([df1, df2, ...]) merges dataframes on the list.
"""
master = df_list[0]
for df in df_list[1:]:
tmp_df = pd.concat([master, df])
master = tmp_df[~tmp_df.index.duplicated(keep='last')].sort_index()
return master
from functools import reduce
def df_merge_simply(dataframes):
"""Merge list of data frames into single data frame.
All data frames are supposed to have the same columns.
Thanks to https://stackoverflow.com/questions/44327999/python-pandas-merge-multiple-dataframes/44338256
"""
# Check that all columns are the same
df0 = dataframes[0]
for df in dataframes[1:]:
assert np.all(df0.columns == df.columns)
# Merge all
df_merged = reduce(lambda left,right: pd.merge(left, right, how='outer'), dataframes)
return df_merged
def df_select_by_keyword(source_df, keyword, search_columns=None, as_mask=False):
"""Select data frame rows by a search keyword.
Any row will be selected if any of its search columns contain the keyword.
Returns:
New data frame where rows have the keyword,
or mask if as_mask is True.
"""
search_columns = search_columns or source_df.columns
masks = np.column_stack([source_df[col].str.contains(keyword, na=False) for col in search_columns])
mask = masks.any(axis=1)
if as_mask:
return mask
return source_df.loc[mask]
def df_select_by_keywords(source_df, keys_cols, and_or='or', as_mask=False):
"""Multi keyword version of df_select_by_keyword.
Arguments:
key_cols: dict defined as `{'keyword1': [search columns] or None, ...}`
"""
masks = []
for keyword in keys_cols:
columns = keys_cols[keyword]
mask = df_select_by_keyword(source_df, keyword, search_columns=columns, as_mask=True)
masks.append(mask)
mask = np.column_stack(masks).any(axis=1) if and_or == 'or' else \
np.column_stack(masks).all(axis=1)
if as_mask:
return mask
return source_df.loc[mask]
def df_mask_by_str_or_list(df, column, keys):
"""Find str match and make mask of dataframe.
If multiple keys are fed, mask will be AND-calculated among keys.
"""
mask = None
if type(keys) == str: keys = [keys]
for key in keys:
this_mask = df[column].str.find(key) >= 0
mask = this_mask if mask is None else (mask & this_mask)
return mask
def df_mask_by_str_conditions(df, conditions):
"""Find dataframe rows that matches condition of str search.
Returns:
Aggregated mask from masks calculated from sub conditions recursively.
"""
col_or_op, key_or_conds = conditions
if is_array_like(key_or_conds):
if col_or_op not in ['and', 'or']:
raise Exception(f'unknown condition: {col_or_op}')
masks = [df_mask_by_str_conditions(df, sub_conds) for sub_conds in key_or_conds]
mask = np.column_stack(masks).any(axis=1) if col_or_op == 'or' else \
np.column_stack(masks).all(axis=1)
return mask
else:
return df_mask_by_str_or_list(df, col_or_op, key_or_conds)
def df_str_replace(df, from_strs, to_str, regex=True):
"""Apply str.replace to entire DataFrame inplace.
- All string columns will be applied. (dtype == 'objet')
- All other dtype columns will not be applied.
"""
for c in df.columns:
if df[c].dtype != 'object': continue
df[c] = df[c].str.replace(from_strs, to_str, regex=regex)
def df_cell_str_replace(df, from_str, to_str):
"""Replace cell string with new string if entire string matches."""
df_str_replace(df, from_strs, to_str, regex=False)
def df_print_differences(df1, df2):
"""Print all difference between two dataframes."""
if df1.shape != df2.shape:
print(f'Error: df1.shape={df1.shape} != df2.shape{df2.shape}')
return
rows, cols = np.where(df1 != df2)
for r, c in zip(rows, cols):
print(f'at[{r},{c}] "{df1.iat[r, c]}" != "{df2.iat[r, c]}"')
_EXCEL_LIKE = ['.csv', '.xls', '.xlsx', '.xlsm']
def is_excel_file(filename):
# not accepted if suffix == '.csv': return True
return Path(filename).suffix.lower() in _EXCEL_LIKE
def is_csv_file(filename):
return Path(filename).suffix.lower() == '.csv'
def pd_read_excel_keep_dtype(io, **args):
"""pd.read_excel() wrapper to do as described in pandas document:
'... preserve data as stored in Excel and not interpret dtype'
Details:
- String '1' might be loaded as int 1 by pd.read_excel(file).
- By setting `dtype=object` it will preserve it as string '1'.
"""
return pd.read_excel(io, dtype=object, **args)
def pd_read_csv_as_str(filename, **args):
"""pd.read_csv() wrapper to preserve data type = str"""
return pd.read_csv(filename, dtype=object, **args)
def df_load_excel_like(filename, preserve_dtype=True, **args):
"""Load Excel like files. (csv, xlsx, ...)"""
if is_csv_file(filename):
if preserve_dtype:
return pd_read_csv_as_str(filename, **args)
return pd.read_csv(filename, **args)
if preserve_dtype:
return pd_read_excel_keep_dtype(filename, **args)
return pd.read_excel(filename, **args)
import codecs
def df_read_sjis_csv(filename, **args):
"""Read shift jis Japanese csv file.
Thanks to https://qiita.com/niwaringo/items/d2a30e04e08da8eaa643
"""
with codecs.open(filename, 'r', 'Shift-JIS', 'ignore') as file:
return pd.read_table(file, delimiter=',', **args)
def df_highlight_max(df, color='yellow', axis=1):
"""Highlight max valued cell with color.
Thanks to https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html
"""
def highlight_max(s):
'''Highlight the maximum in a Series yellow or any color.'''
is_max = s == s.max()
return [f'background-color: {color}' if v else '' for v in is_max]
df = df.copy()
return df.style.apply(highlight_max, axis=axis)
def df_apply_sns_color_map(df, color='red', **kwargs):
"""Set color map to a dataframe.
Thanks to https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html
"""
import seaborn as sns
cm = sns.light_palette(color, as_cmap=True, **kwargs)
df = df.copy()
return df.style.background_gradient(cmap=cm)
## Dataset utilities
def flatten_y_if_onehot(y):
"""De-one-hot y, i.e. [0,1,0,0,...] to 1 for all y."""
return y if len(np.array(y).shape) == 1 else np.argmax(y, axis = -1)
def get_class_distribution(y):
"""Calculate number of samples per class."""
# y_cls can be one of [OH label, index of class, class label name string]
# convert OH to index of class
y_cls = flatten_y_if_onehot(y)
# y_cls can be one of [index of class, class label name]
classset = sorted(list(set(y_cls)))
sample_distribution = {cur_cls:len([one for one in y_cls if one == cur_cls]) for cur_cls in classset}
return sample_distribution
def get_class_distribution_list(y, num_classes):
"""Calculate number of samples per class as list"""
dist = get_class_distribution(y)
assert(y[0].__class__ != str) # class index or class OH label only
list_dist = np.zeros((num_classes))
for i in range(num_classes):
if i in dist:
list_dist[i] = dist[i]
return list_dist
def _balance_class(X, y, min_or_max, sampler_class, random_state):
"""Balance class distribution with sampler_class."""
y_cls = flatten_y_if_onehot(y)
distribution = get_class_distribution(y_cls)
classes = list(distribution.keys())
counts = list(distribution.values())
nsamples = np.max(counts) if min_or_max == 'max' \
else np.min(counts)
flat_ratio = {cls:nsamples for cls in classes}
Xidx = [[xidx] for xidx in range(len(X))]
sampler_instance = sampler_class(ratio=flat_ratio, random_state=random_state)
Xidx_resampled, y_cls_resampled = sampler_instance.fit_sample(Xidx, y_cls)
sampled_index = [idx[0] for idx in Xidx_resampled]
return np.array([X[idx] for idx in sampled_index]), np.array([y[idx] for idx in sampled_index])
def balance_class_by_over_sampling(X, y, random_state=42):
"""Balance class distribution with imbalanced-learn RandomOverSampler."""
from imblearn.over_sampling import RandomOverSampler
return _balance_class(X, y, 'max', RandomOverSampler, random_state)
def balance_class_by_under_sampling(X, y, random_state=42):
"""Balance class distribution with imbalanced-learn RandomUnderSampler."""
from imblearn.under_sampling import RandomUnderSampler
return _balance_class(X, y, 'min', RandomUnderSampler, random_state)
def df_balance_class_by_over_sampling(df, label_column, random_state=42):
"""Balance class distribution in DataFrame with imbalanced-learn RandomOverSampler."""
X, y = list(range(len(df))), list(df[label_column])
X, _ = balance_class_by_over_sampling(X, y, random_state=random_state)
return df.iloc[X].sort_index()
def df_balance_class_by_under_sampling(df, label_column, random_state=42):
"""Balance class distribution in DataFrame with imbalanced-learn RandomUnderSampler."""
X, y = list(range(len(df))), list(df[label_column])
X, _ = balance_class_by_under_sampling(X, y, random_state=random_state)
return df.iloc[X].sort_index()
def balance_class_by_limited_over_sampling(X, y, max_sample_per_class=None, multiply_limit=2., random_state=42):
"""Balance class distribution basically by oversampling but limited duplication.
Args:
X: Data samples, only size of samples is used here.
y: Class labels to be balanced.
max_sample_per_class: Number of maximum samples per class, large class will be limitd to this number.
multiply_limit: Small size class samples will be duplicated, but limited to multiple of this number.
"""
assert len(X) == len(y), f'Length of X({len(X)}) and y({len(y)}) is different, supposed to be the same.'
y_count = Counter(y)
max_sample_per_class = max_sample_per_class or np.max(list(y_count.values()))
resampled_idxes = []
random.seed(random_state)
for cur_y, count in y_count.items():
this_samples = np.min([multiply_limit * count, max_sample_per_class]).astype(int)
idxes = np.where(y == cur_y)[0]
# Add all class samples first
resampled_idxes += list(idxes)
# Add oversampled
idxes = random.choices(idxes, k=this_samples-len(idxes))
resampled_idxes += list(idxes)
return X[resampled_idxes], y[resampled_idxes]
def df_balance_class_by_limited_over_sampling(df, label_column,
max_sample_per_class=None, multiply_limit=2.,
random_state=42):
"""Balance class distribution in DataFrame with balance_class_by_limited_over_sampling."""
X, y = np.array(range(len(df))), df[label_column].values
X, _ = balance_class_by_limited_over_sampling(X, y, max_sample_per_class=max_sample_per_class,
multiply_limit=multiply_limit, random_state=random_state)
return df.iloc[X].sort_index()
from sklearn.model_selection import train_test_split
def subsample_stratified(X, y, size=0.1):
"""Stratified subsampling."""
_, X_test, _, y_test = train_test_split(X, y, test_size=size, stratify=y)
return X_test, y_test
def all_same_classes(y_a, y_b, delimiter=None):
"""Test if all classes in y_a is also in y_b or not.
If y_a is a single dimension array, test as single labeled.
If y_a is a two dimension array, test as multi-labeled.
Args:
y_a: One list of labels.
y_b: Another list of labels.
delimiter: Set a character if multi-label text is given.
Returns:
True or False.
"""
if is_array_like(y_a[0]):
# binary matrix multi-label table, test that class existance is the same.
y_a, y_b = y_a.sum(axis=0), y_b.sum(axis=0)
classes_a, classes_b = y_a > 0, y_b > 0
return np.all(classes_a == classes_b)
# test: classes contained in both array is consistent.
if delimiter is not None:
y_a = flatten_list([y.split(delimiter) for y in y_a])
y_b = flatten_list([y.split(delimiter) for y in y_b])
classes_a, classes_b = list(set(y_a)), list(set(y_b))
return len(classes_a) == len(classes_b)
def train_test_sure_split(X, y, n_attempt=100, return_last=False, debug=False, **kwargs):
"""Variant of train_test_split that makes validation for sure.
Returned y_test should contain all class samples at least one.
Simply try train_test_split repeatedly until the result satisfies this condition.
Args:
n_attempt: Number of attempts to satisfy class coverage.
return_last: Return last attempt results if all attempts didn't satisfy.
Returns:
X_train, X_test, y_train, y_test if satisfied;
or None, None, None, None.
"""
for i in range(n_attempt):
X_trn, X_val, y_trn, y_val = train_test_split(X, y, **kwargs)
if all_same_classes(y, y_val):
return X_trn, X_val, y_trn, y_val
if debug:
print('.', end='')
if return_last:
return X_trn, X_val, y_trn, y_val
return None, None, None, None
## Visualization utilities
def _expand_labels_from_y(y, labels):
"""Make sure y is index of label set."""
if labels is None:
labels = sorted(list(set(y)))
y = [labels.index(_y) for _y in y]
return y, labels
def visualize_class_balance(title, y, labels=None, sorted=False):
y, labels = _expand_labels_from_y(y, labels)
sample_dist_list = get_class_distribution_list(y, len(labels))
if sorted:
items = list(zip(labels, sample_dist_list))
items.sort(key=lambda x:x[1], reverse=True)
sample_dist_list = [x[1] for x in items]
labels = [x[0] for x in items]
index = range(len(labels))
fig, ax = plt.subplots(1, 1, figsize = (16, 5))
ax.bar(index, sample_dist_list)
ax.set_xlabel('Label')
ax.set_xticks(index)
ax.set_xticklabels(labels, rotation='vertical')
ax.set_ylabel('Number of Samples')
ax.set_title(title)
fig.show()
from collections import OrderedDict
def print_class_balance(title, y, labels=None, sorted=False):
y, labels = _expand_labels_from_y(y, labels)
distributions = get_class_distribution(y)
dist_dic = {labels[cls]:distributions[cls] for cls in distributions}
if sorted:
items = list(dist_dic.items())
items.sort(key=lambda x:x[1], reverse=True)
dist_dic = OrderedDict(items) # sorted(dist_dic.items(), key=...) didn't work for some reason...
print(title, '=', dist_dic)
zeroclasses = [label for i, label in enumerate(labels) if i not in distributions.keys()]
if 0 < len(zeroclasses):
print(' 0 sample classes:', zeroclasses)
from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
def calculate_clf_metrics(y_true, y_pred, average='weighted'):
"""Calculate metrics: f1/recall/precision/accuracy.
Args:
y_true: GT, an index of label or one-hot encoding format.
y_pred: Prediction output, index or one-hot.
average: `average` parameter passed to sklearn.metrics functions.
Returns:
Four metrics: f1, recall, precision, accuracy.
"""
y_true = flatten_y_if_onehot(y_true)
y_pred = flatten_y_if_onehot(y_pred)
if np.max(y_true) < 2 and np.max(y_pred) < 2:
average = 'binary'
f1 = f1_score(y_true, y_pred, average=average)
recall = recall_score(y_true, y_pred, average=average)
precision = precision_score(y_true, y_pred, average=average)
accuracy = accuracy_score(y_true, y_pred)
return f1, recall, precision, accuracy
def skew_bin_clf_preds(y_pred, binary_bias=None, logger=None):
"""Apply bias to prediction results for binary classification.
Calculated as follows.
p(y=1) := p(y=1) ^ binary_bias
p(y=0) := 1 - p(y=0)
0 < binary_bias < 1 will be optimistic with result=1.
Inversely, 1 < binary_bias will make results pesimistic.
"""
_preds = np.array(y_pred.copy())
if binary_bias is not None:
ps = np.power(_preds[:, 1], binary_bias)
_preds[:, 1] = ps
_preds[:, 0] = 1 - ps
logger = get_logger() if logger is None else logger
logger.info(f' @skew{"+" if binary_bias >= 0 else ""}{binary_bias}')
return _preds
def print_clf_metrics(y_true, y_pred, average='weighted', binary_bias=None, title_prefix='', logger=None):
"""Calculate and print metrics: f1/recall/precision/accuracy.
See calculate_clf_metrics() and skew_bin_clf_preds() for more detail.
"""
# Add bias if binary_bias is set
_preds = skew_bin_clf_preds(y_pred, binary_bias, logger=logger)
# Calculate metrics
f1, recall, precision, acc = calculate_clf_metrics(y_true, _preds, average=average)
logger = get_logger() if logger is None else logger
logger.info('{0:s}F1/Recall/Precision/Accuracy = {1:.4f}/{2:.4f}/{3:.4f}/{4:.4f}' \
.format(title_prefix, f1, recall, precision, acc))
# Thanks to https://qiita.com/knknkn1162/items/be87cba14e38e2c0f656
def plt_japanese_font_ready():
"""Set font family with Japanese fonts.
# How to install fonts:
wget https://ipafont.ipa.go.jp/IPAfont/IPAfont00303.zip
unzip -q IPAfont00303.zip
sudo cp IPAfont00303/*.ttf /usr/share/fonts/truetype/
"""
plt.rcParams['font.family'] = 'IPAPGothic'
def plt_looks_good():
"""Plots will be looks good (at least to me)."""
plt.rcParams["figure.figsize"] = [16, 10]
plt.rcParams['font.size'] = 14
plt.rcParams['xtick.labelsize'] = 10
plt.rcParams['ytick.labelsize'] = 10
def pd_display_more(max_cols=100, max_rows=500):
"""Set max cols/rows of pandas display."""
pd.options.display.max_columns = max_cols
pd.options.display.max_rows = max_rows
# Thanks to http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py
import itertools
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(y_test, y_pred, classes,
normalize=True,
title=None,
cmap=plt.cm.Blues):
"""Plot confusion matrix."""
po = np.get_printoptions()
np.set_printoptions(precision=2)
y_test = flatten_y_if_onehot(y_test)
y_pred = flatten_y_if_onehot(y_pred)
cm = confusion_matrix(y_test, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
if title is None: title = 'Normalized confusion matrix'
else:
if title is None: title = 'Confusion matrix (not normalized)'
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
np.set_printoptions(**po)
def deterministic_everything(seed=42, pytorch=True, tf=False):
"""Set pseudo random everything deterministic. a.k.a. `seed_everything`
Universal to major frameworks.
Thanks to https://docs.fast.ai/dev/test.html#getting-reproducible-results
Thanks to https://pytorch.org/docs/stable/notes/randomness.html
"""
# Python RNG
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
# Numpy RNG
import numpy as np
np.random.seed(seed)
# Pytorch RNGs
if pytorch:
import torch
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# TensorFlow RNG
if tf:
import tensorflow as tf
tf.set_random_seed(seed)
def simply_ignore(warnings_):
"""Set warnings to ignore all.
Usage:
import warnings; simply_ignore(warnings)
"""
warnings_.simplefilter('ignore')
| [
"matplotlib.pyplot.title",
"pickle.dump",
"numpy.random.seed",
"numpy.argmax",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"seaborn.light_palette",
"random.sample",
"yaml.dump",
"logging.Formatter",
"sklearn.metrics.f1_score",
"pickle.load... | [((4846, 4869), 'easydict.EasyDict', 'EasyDict', (['yaml_contents'], {}), '(yaml_contents)\n', (4854, 4869), False, 'from easydict import EasyDict\n'), ((10090, 10187), 'logging.Formatter', 'logging.Formatter', (["(format or '%(asctime)s %(name)s %(funcName)s [%(levelname)s]: %(message)s')"], {}), "(format or\n '%(asctime)s %(name)s %(funcName)s [%(levelname)s]: %(message)s')\n", (10107, 10187), False, 'import logging\n'), ((11632, 11725), 'subprocess.Popen', 'subprocess.Popen', (['commands'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(commands, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.STDOUT)\n', (11648, 11725), False, 'import subprocess\n'), ((12018, 12073), 'datetime.date', 'datetime.date', (['temp_dt.year', 'temp_dt.month', 'temp_dt.day'], {}), '(temp_dt.year, temp_dt.month, temp_dt.day)\n', (12031, 12073), False, 'import datetime\n'), ((15951, 15994), 'difflib.SequenceMatcher', 'difflib.SequenceMatcher', (['None', 'text', 'n_text'], {}), '(None, text, n_text)\n', (15974, 15994), False, 'import difflib\n'), ((21184, 21204), 'numpy.where', 'np.where', (['(df1 != df2)'], {}), '(df1 != df2)\n', (21192, 21204), True, 'import numpy as np\n'), ((21930, 21969), 'pandas.read_excel', 'pd.read_excel', (['io'], {'dtype': 'object'}), '(io, dtype=object, **args)\n', (21943, 21969), True, 'import pandas as pd\n'), ((22085, 22128), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'dtype': 'object'}), '(filename, dtype=object, **args)\n', (22096, 22128), True, 'import pandas as pd\n'), ((22494, 22525), 'pandas.read_excel', 'pd.read_excel', (['filename'], {}), '(filename, **args)\n', (22507, 22525), True, 'import pandas as pd\n'), ((23501, 23549), 'seaborn.light_palette', 'sns.light_palette', (['color'], {'as_cmap': '(True)'}), '(color, as_cmap=True, **kwargs)\n', (23518, 23549), True, 'import seaborn as sns\n'), ((24501, 24522), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (24509, 24522), True, 'import numpy as np\n'), ((27299, 27309), 'collections.Counter', 'Counter', (['y'], {}), '(y)\n', (27306, 27309), False, 'from collections import Counter\n'), ((27421, 27446), 'random.seed', 'random.seed', (['random_state'], {}), '(random_state)\n', (27432, 27446), False, 'import random\n'), ((28654, 28704), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'size', 'stratify': 'y'}), '(X, y, test_size=size, stratify=y)\n', (28670, 28704), False, 'from sklearn.model_selection import train_test_split\n'), ((31314, 31349), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(16, 5)'}), '(1, 1, figsize=(16, 5))\n', (31326, 31349), True, 'import matplotlib.pyplot as plt\n'), ((32908, 32949), 'sklearn.metrics.f1_score', 'f1_score', (['y_true', 'y_pred'], {'average': 'average'}), '(y_true, y_pred, average=average)\n', (32916, 32949), False, 'from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score\n'), ((32963, 33008), 'sklearn.metrics.recall_score', 'recall_score', (['y_true', 'y_pred'], {'average': 'average'}), '(y_true, y_pred, average=average)\n', (32975, 33008), False, 'from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score\n'), ((33025, 33073), 'sklearn.metrics.precision_score', 'precision_score', (['y_true', 'y_pred'], {'average': 'average'}), '(y_true, y_pred, average=average)\n', (33040, 33073), False, 'from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score\n'), ((33089, 33119), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (33103, 33119), False, 'from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score\n'), ((35793, 35814), 'numpy.get_printoptions', 'np.get_printoptions', ([], {}), '()\n', (35812, 35814), True, 'import numpy as np\n'), ((35819, 35851), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (35838, 35851), True, 'import numpy as np\n'), ((35944, 35976), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (35960, 35976), False, 'from sklearn.metrics import confusion_matrix\n'), ((36209, 36259), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (36219, 36259), True, 'import matplotlib.pyplot as plt\n'), ((36264, 36280), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (36273, 36280), True, 'import matplotlib.pyplot as plt\n'), ((36285, 36299), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (36297, 36299), True, 'import matplotlib.pyplot as plt\n'), ((36345, 36389), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(45)'}), '(tick_marks, classes, rotation=45)\n', (36355, 36389), True, 'import matplotlib.pyplot as plt\n'), ((36394, 36425), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (36404, 36425), True, 'import matplotlib.pyplot as plt\n'), ((36731, 36755), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (36741, 36755), True, 'import matplotlib.pyplot as plt\n'), ((36760, 36789), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (36770, 36789), True, 'import matplotlib.pyplot as plt\n'), ((36794, 36812), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (36810, 36812), True, 'import matplotlib.pyplot as plt\n'), ((36817, 36842), 'numpy.set_printoptions', 'np.set_printoptions', ([], {}), '(**po)\n', (36836, 36842), True, 'import numpy as np\n'), ((37196, 37213), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (37207, 37213), False, 'import random\n'), ((37303, 37323), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (37317, 37323), True, 'import numpy as np\n'), ((999, 1043), 'shutil.copytree', 'shutil.copytree', (['src', 'dst'], {'symlinks': 'symlinks'}), '(src, dst, symlinks=symlinks)\n', (1014, 1043), False, 'import shutil\n'), ((9028, 9072), 'pickle.dump', 'pickle.dump', (['obj', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(obj, f, pickle.HIGHEST_PROTOCOL)\n', (9039, 9072), False, 'import pickle\n'), ((9190, 9204), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9201, 9204), False, 'import pickle\n'), ((9635, 9652), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (9649, 9652), False, 'import yaml\n'), ((10050, 10073), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (10067, 10073), False, 'import logging\n'), ((11889, 11933), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['text', '"""%Y/%m/%d"""'], {}), "(text, '%Y/%m/%d')\n", (11915, 11933), False, 'import datetime\n'), ((11962, 12006), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['text', '"""%Y-%m-%d"""'], {}), "(text, '%Y-%m-%d')\n", (11988, 12006), False, 'import datetime\n'), ((12939, 12960), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (12958, 12960), False, 'import datetime\n'), ((14188, 14212), 'datetime.timedelta', 'datetime.timedelta', (['days'], {}), '(days)\n', (14206, 14212), False, 'import datetime\n'), ((14874, 14900), 'itertools.chain.from_iterable', 'chain.from_iterable', (['lists'], {}), '(lists)\n', (14893, 14900), False, 'from itertools import chain\n'), ((17423, 17446), 'pandas.concat', 'pd.concat', (['[master, df]'], {}), '([master, df])\n', (17432, 17446), True, 'import pandas as pd\n'), ((17948, 17981), 'numpy.all', 'np.all', (['(df0.columns == df.columns)'], {}), '(df0.columns == df.columns)\n', (17954, 17981), True, 'import numpy as np\n'), ((22372, 22401), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename, **args)\n', (22383, 22401), True, 'import pandas as pd\n'), ((22709, 22758), 'codecs.open', 'codecs.open', (['filename', '"""r"""', '"""Shift-JIS"""', '"""ignore"""'], {}), "(filename, 'r', 'Shift-JIS', 'ignore')\n", (22720, 22758), False, 'import codecs\n'), ((22783, 22825), 'pandas.read_table', 'pd.read_table', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',', **args)\n", (22796, 22825), True, 'import pandas as pd\n'), ((23778, 23799), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(-1)'}), '(y, axis=-1)\n', (23787, 23799), True, 'import numpy as np\n'), ((24943, 24957), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (24949, 24957), True, 'import numpy as np\n'), ((24998, 25012), 'numpy.min', 'np.min', (['counts'], {}), '(counts)\n', (25004, 25012), True, 'import numpy as np\n'), ((25337, 25380), 'numpy.array', 'np.array', (['[X[idx] for idx in sampled_index]'], {}), '([X[idx] for idx in sampled_index])\n', (25345, 25380), True, 'import numpy as np\n'), ((25382, 25425), 'numpy.array', 'np.array', (['[y[idx] for idx in sampled_index]'], {}), '([y[idx] for idx in sampled_index])\n', (25390, 25425), True, 'import numpy as np\n'), ((29380, 29410), 'numpy.all', 'np.all', (['(classes_a == classes_b)'], {}), '(classes_a == classes_b)\n', (29386, 29410), True, 'import numpy as np\n'), ((30377, 30409), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {}), '(X, y, **kwargs)\n', (30393, 30409), False, 'from sklearn.model_selection import train_test_split\n'), ((31963, 31981), 'collections.OrderedDict', 'OrderedDict', (['items'], {}), '(items)\n', (31974, 31981), False, 'from collections import OrderedDict\n'), ((33599, 33634), 'numpy.power', 'np.power', (['_preds[:, 1]', 'binary_bias'], {}), '(_preds[:, 1], binary_bias)\n', (33607, 33634), True, 'import numpy as np\n'), ((37389, 37412), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (37406, 37412), False, 'import torch\n'), ((37583, 37607), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (37601, 37607), True, 'import tensorflow as tf\n'), ((4547, 4564), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (4561, 4564), False, 'import yaml\n'), ((7930, 7965), 'random.sample', 'random.sample', (['candidates', 'n_sample'], {}), '(candidates, n_sample)\n', (7943, 7965), False, 'import random\n'), ((10352, 10375), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (10373, 10375), False, 'import logging\n'), ((10465, 10497), 'logging.FileHandler', 'logging.FileHandler', (['output_file'], {}), '(output_file)\n', (10484, 10497), False, 'import logging\n'), ((12578, 12645), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['f"""{year}-W{week_no:02d}-1"""', '"""%G-W%V-%u"""'], {}), "(f'{year}-W{week_no:02d}-1', '%G-W%V-%u')\n", (12604, 12645), False, 'import datetime\n'), ((12668, 12735), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['f"""{year}-W{week_no:02d}-7"""', '"""%G-W%V-%u"""'], {}), "(f'{year}-W{week_no:02d}-7', '%G-W%V-%u')\n", (12694, 12735), False, 'import datetime\n'), ((13236, 13278), 'datetime.date', 'datetime.date', ([], {'year': 'year', 'month': '(12)', 'day': '(28)'}), '(year=year, month=12, day=28)\n', (13249, 13278), False, 'import datetime\n'), ((18040, 18074), 'pandas.merge', 'pd.merge', (['left', 'right'], {'how': '"""outer"""'}), "(left, right, how='outer')\n", (18048, 18074), True, 'import pandas as pd\n'), ((27594, 27614), 'numpy.where', 'np.where', (['(y == cur_y)'], {}), '(y == cur_y)\n', (27602, 27614), True, 'import numpy as np\n'), ((32828, 32842), 'numpy.max', 'np.max', (['y_true'], {}), '(y_true)\n', (32834, 32842), True, 'import numpy as np\n'), ((32851, 32865), 'numpy.max', 'np.max', (['y_pred'], {}), '(y_pred)\n', (32857, 32865), True, 'import numpy as np\n'), ((11137, 11160), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11158, 11160), False, 'import datetime\n'), ((12521, 12546), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (12544, 12546), False, 'import datetime\n'), ((13678, 13699), 'datetime.timedelta', 'datetime.timedelta', (['n'], {}), '(n)\n', (13696, 13699), False, 'import datetime\n'), ((14230, 14255), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (14253, 14255), False, 'import datetime\n'), ((19145, 19167), 'numpy.column_stack', 'np.column_stack', (['masks'], {}), '(masks)\n', (19160, 19167), True, 'import numpy as np\n'), ((19216, 19238), 'numpy.column_stack', 'np.column_stack', (['masks'], {}), '(masks)\n', (19231, 19238), True, 'import numpy as np\n'), ((27511, 27565), 'numpy.min', 'np.min', (['[multiply_limit * count, max_sample_per_class]'], {}), '([multiply_limit * count, max_sample_per_class])\n', (27517, 27565), True, 'import numpy as np\n'), ((4708, 4726), 'yaml.dump', 'yaml.dump', (['default'], {}), '(default)\n', (4717, 4726), False, 'import yaml\n'), ((16696, 16730), 'unicodedata.east_asian_width', 'unicodedata.east_asian_width', (['char'], {}), '(char)\n', (16724, 16730), False, 'import unicodedata\n'), ((20203, 20225), 'numpy.column_stack', 'np.column_stack', (['masks'], {}), '(masks)\n', (20218, 20225), True, 'import numpy as np\n'), ((20281, 20303), 'numpy.column_stack', 'np.column_stack', (['masks'], {}), '(masks)\n', (20296, 20303), True, 'import numpy as np\n'), ((23749, 23760), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (23757, 23760), True, 'import numpy as np\n')] |
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from extensions.ops.slice_like import SliceLike
from mo.front.common.partial_infer.utils import int64_array
from mo.graph.graph import Node
from mo.utils.unittest.graph import build_graph
nodes_attributes = {
'input_data': {'kind': 'data', 'shape': int64_array([3, 4]), 'value': None},
'shape_like_data': {'kind': 'data', 'shape': int64_array([2, 3]), 'value': None},
'slice_like': {'kind': 'op', 'op': 'slice_data'},
'out_data': {'kind': 'data', 'shape': None, 'value': None}
}
edges = [
('input_data', 'slice_like', {'in': 0}),
('shape_like_data', 'slice_like', {'in': 1}),
('slice_like', 'out_data')
]
class SliceLikeTest(unittest.TestCase):
def test_1(self):
graph = build_graph(nodes_attributes, edges, {'slice_like': {'axes': None}})
slice_like = Node(graph, 'slice_like')
SliceLike.infer(slice_like)
ref_shape = int64_array([2, 3])
res_shape = graph.node['out_data']['shape']
self.assertTrue(np.array_equal(res_shape, ref_shape))
def test_2(self):
graph = build_graph(nodes_attributes, edges, {'slice_like': {'axes': (0, 1)}})
slice_like = Node(graph, 'slice_like')
SliceLike.infer(slice_like)
ref_shape = int64_array([2, 3])
res_shape = graph.node['out_data']['shape']
self.assertTrue(np.array_equal(res_shape, ref_shape))
def test_3(self):
graph = build_graph(nodes_attributes, edges, {'slice_like': {'axes': (0,)}})
slice_like = Node(graph, 'slice_like')
SliceLike.infer(slice_like)
ref_shape = int64_array([2, 4])
res_shape = graph.node['out_data']['shape']
self.assertTrue(np.array_equal(res_shape, ref_shape))
def test_4(self):
graph = build_graph(nodes_attributes, edges, {'slice_like': {'axes': (-1,)}})
slice_like = Node(graph, 'slice_like')
SliceLike.infer(slice_like)
ref_shape = int64_array([3, 3])
res_shape = graph.node['out_data']['shape']
self.assertTrue(np.array_equal(res_shape, ref_shape))
| [
"mo.graph.graph.Node",
"mo.utils.unittest.graph.build_graph",
"mo.front.common.partial_infer.utils.int64_array",
"numpy.array_equal",
"extensions.ops.slice_like.SliceLike.infer"
] | [((877, 896), 'mo.front.common.partial_infer.utils.int64_array', 'int64_array', (['[3, 4]'], {}), '([3, 4])\n', (888, 896), False, 'from mo.front.common.partial_infer.utils import int64_array\n'), ((963, 982), 'mo.front.common.partial_infer.utils.int64_array', 'int64_array', (['[2, 3]'], {}), '([2, 3])\n', (974, 982), False, 'from mo.front.common.partial_infer.utils import int64_array\n'), ((1339, 1407), 'mo.utils.unittest.graph.build_graph', 'build_graph', (['nodes_attributes', 'edges', "{'slice_like': {'axes': None}}"], {}), "(nodes_attributes, edges, {'slice_like': {'axes': None}})\n", (1350, 1407), False, 'from mo.utils.unittest.graph import build_graph\n'), ((1429, 1454), 'mo.graph.graph.Node', 'Node', (['graph', '"""slice_like"""'], {}), "(graph, 'slice_like')\n", (1433, 1454), False, 'from mo.graph.graph import Node\n'), ((1463, 1490), 'extensions.ops.slice_like.SliceLike.infer', 'SliceLike.infer', (['slice_like'], {}), '(slice_like)\n', (1478, 1490), False, 'from extensions.ops.slice_like import SliceLike\n'), ((1511, 1530), 'mo.front.common.partial_infer.utils.int64_array', 'int64_array', (['[2, 3]'], {}), '([2, 3])\n', (1522, 1530), False, 'from mo.front.common.partial_infer.utils import int64_array\n'), ((1684, 1754), 'mo.utils.unittest.graph.build_graph', 'build_graph', (['nodes_attributes', 'edges', "{'slice_like': {'axes': (0, 1)}}"], {}), "(nodes_attributes, edges, {'slice_like': {'axes': (0, 1)}})\n", (1695, 1754), False, 'from mo.utils.unittest.graph import build_graph\n'), ((1776, 1801), 'mo.graph.graph.Node', 'Node', (['graph', '"""slice_like"""'], {}), "(graph, 'slice_like')\n", (1780, 1801), False, 'from mo.graph.graph import Node\n'), ((1810, 1837), 'extensions.ops.slice_like.SliceLike.infer', 'SliceLike.infer', (['slice_like'], {}), '(slice_like)\n', (1825, 1837), False, 'from extensions.ops.slice_like import SliceLike\n'), ((1858, 1877), 'mo.front.common.partial_infer.utils.int64_array', 'int64_array', (['[2, 3]'], {}), '([2, 3])\n', (1869, 1877), False, 'from mo.front.common.partial_infer.utils import int64_array\n'), ((2031, 2099), 'mo.utils.unittest.graph.build_graph', 'build_graph', (['nodes_attributes', 'edges', "{'slice_like': {'axes': (0,)}}"], {}), "(nodes_attributes, edges, {'slice_like': {'axes': (0,)}})\n", (2042, 2099), False, 'from mo.utils.unittest.graph import build_graph\n'), ((2121, 2146), 'mo.graph.graph.Node', 'Node', (['graph', '"""slice_like"""'], {}), "(graph, 'slice_like')\n", (2125, 2146), False, 'from mo.graph.graph import Node\n'), ((2155, 2182), 'extensions.ops.slice_like.SliceLike.infer', 'SliceLike.infer', (['slice_like'], {}), '(slice_like)\n', (2170, 2182), False, 'from extensions.ops.slice_like import SliceLike\n'), ((2203, 2222), 'mo.front.common.partial_infer.utils.int64_array', 'int64_array', (['[2, 4]'], {}), '([2, 4])\n', (2214, 2222), False, 'from mo.front.common.partial_infer.utils import int64_array\n'), ((2376, 2445), 'mo.utils.unittest.graph.build_graph', 'build_graph', (['nodes_attributes', 'edges', "{'slice_like': {'axes': (-1,)}}"], {}), "(nodes_attributes, edges, {'slice_like': {'axes': (-1,)}})\n", (2387, 2445), False, 'from mo.utils.unittest.graph import build_graph\n'), ((2467, 2492), 'mo.graph.graph.Node', 'Node', (['graph', '"""slice_like"""'], {}), "(graph, 'slice_like')\n", (2471, 2492), False, 'from mo.graph.graph import Node\n'), ((2501, 2528), 'extensions.ops.slice_like.SliceLike.infer', 'SliceLike.infer', (['slice_like'], {}), '(slice_like)\n', (2516, 2528), False, 'from extensions.ops.slice_like import SliceLike\n'), ((2549, 2568), 'mo.front.common.partial_infer.utils.int64_array', 'int64_array', (['[3, 3]'], {}), '([3, 3])\n', (2560, 2568), False, 'from mo.front.common.partial_infer.utils import int64_array\n'), ((1607, 1643), 'numpy.array_equal', 'np.array_equal', (['res_shape', 'ref_shape'], {}), '(res_shape, ref_shape)\n', (1621, 1643), True, 'import numpy as np\n'), ((1954, 1990), 'numpy.array_equal', 'np.array_equal', (['res_shape', 'ref_shape'], {}), '(res_shape, ref_shape)\n', (1968, 1990), True, 'import numpy as np\n'), ((2299, 2335), 'numpy.array_equal', 'np.array_equal', (['res_shape', 'ref_shape'], {}), '(res_shape, ref_shape)\n', (2313, 2335), True, 'import numpy as np\n'), ((2645, 2681), 'numpy.array_equal', 'np.array_equal', (['res_shape', 'ref_shape'], {}), '(res_shape, ref_shape)\n', (2659, 2681), True, 'import numpy as np\n')] |
"""!
\ingroup lammpstools
Produces a histogram in a more intuitive way than numpy does.
"""
import numpy as np
def make_histogram( yy, y0, y1, Nbins ):
"""! Produces a histogram from data in yy.
@param yy Data to histogram
@param y0 Lower bound of histogram
@param y1 Upper bound of histogram
@param Nbins Number of bins.
The number of bins, y0 and y1 together implicitly define
the resolution dy = (y1 - y0) / (Nbins-1). Note that data
outside of the bracket [y0,y1] is discarded.
"""
count = 0.0;
hist = np.zeros(Nbins, dtype=float)
dx = (y1 - y0) / (Nbins-1.0)
a = 1.0 / (dx*Nbins)
bins = np.zeros(Nbins, dtype=float)
for i in range(0,Nbins):
bins[i] = y0 + i*dx
misses = 0
mean = 0.0
modal = 0.0
for y in yy:
ibin = int( (y - y0) / dx)
if ibin >= Nbins or ibin < 0:
++misses
else:
hist[ibin] += a;
count += 1.0;
mean += y
if count > 0:
mean /= count;
modal_bin = np.argmax( hist )
modal = bins[modal_bin]
return bins, hist, mean, modal
| [
"numpy.zeros",
"numpy.argmax"
] | [((571, 599), 'numpy.zeros', 'np.zeros', (['Nbins'], {'dtype': 'float'}), '(Nbins, dtype=float)\n', (579, 599), True, 'import numpy as np\n'), ((669, 697), 'numpy.zeros', 'np.zeros', (['Nbins'], {'dtype': 'float'}), '(Nbins, dtype=float)\n', (677, 697), True, 'import numpy as np\n'), ((1076, 1091), 'numpy.argmax', 'np.argmax', (['hist'], {}), '(hist)\n', (1085, 1091), True, 'import numpy as np\n')] |
from Control import Control
import numpy as np
class UpdateGraph:
def update_plot_data(self):
# update ate o graph range
if self.y.size < (self.graphRange / self.sampleTimeSec):
#valor multiplicado pela tensão do arduino
self.mv_value = 5 * self.board.analog[self.analogPort].read()
self.y = np.append(self.y, self.mv_value)
if self.temp.size == 0:
self.temp = np.append(self.temp, 0)
else:
self.temp = np.append(self.temp, (self.temp[-1] + self.sampleTimeSec))
#self.y2[-1] = self.input_disturbance
# update alem do graph range
else:
self.y[:-1] = self.y[1:]
# valor multiplicado pela tensão do arduino
self.mv_value = 5 * self.board.analog[self.analogPort].read()
self.y[-1] = float(self.mv_value)
self.ptr += self.sampleTimeSec
self.graphWidget.setRange(padding=0, xRange=[self.ptr, self.ptr + self.graphRange])
# move graph
self.temp = np.delete(self.temp, 0)
self.temp = np.append(self.temp, (self.temp[-1] + self.sampleTimeSec))
self.label_18.setText("MV: " + str(self.mv_value))
self.curve.setData(self.temp, self.y)
#if not good placed
#IF for only PID uses
if self.radioButton_2.isChecked():
Control.PID_calc(self)
self.pwmValue = self.output / 100
self.board.digital[int(self.pwmPort)].write(self.pwmValue)
if self.got_csv:
#draw disturbance only when using PID
self.y2 = np.append(self.y2, self.csv_y[self.time_index])
self.curve2.setData(self.temp, self.y2)
def mouse_update(self, e):
pos = e[0]
if self.graphWidget.sceneBoundingRect().contains(pos):
mousePoint = self.graphWidget.getPlotItem().vb.mapSceneToView(pos)
self.y_line.setPos(mousePoint.x())
self.x_line.setPos(mousePoint.y())
self.coordinates.setText(f'Coordenadas X: {str(round(mousePoint.x(), 4))} Y: {str(round(mousePoint.y(), 4))}' ) | [
"numpy.append",
"Control.Control.PID_calc",
"numpy.delete"
] | [((351, 383), 'numpy.append', 'np.append', (['self.y', 'self.mv_value'], {}), '(self.y, self.mv_value)\n', (360, 383), True, 'import numpy as np\n'), ((1085, 1108), 'numpy.delete', 'np.delete', (['self.temp', '(0)'], {}), '(self.temp, 0)\n', (1094, 1108), True, 'import numpy as np\n'), ((1133, 1189), 'numpy.append', 'np.append', (['self.temp', '(self.temp[-1] + self.sampleTimeSec)'], {}), '(self.temp, self.temp[-1] + self.sampleTimeSec)\n', (1142, 1189), True, 'import numpy as np\n'), ((1413, 1435), 'Control.Control.PID_calc', 'Control.PID_calc', (['self'], {}), '(self)\n', (1429, 1435), False, 'from Control import Control\n'), ((448, 471), 'numpy.append', 'np.append', (['self.temp', '(0)'], {}), '(self.temp, 0)\n', (457, 471), True, 'import numpy as np\n'), ((518, 574), 'numpy.append', 'np.append', (['self.temp', '(self.temp[-1] + self.sampleTimeSec)'], {}), '(self.temp, self.temp[-1] + self.sampleTimeSec)\n', (527, 574), True, 'import numpy as np\n'), ((1663, 1710), 'numpy.append', 'np.append', (['self.y2', 'self.csv_y[self.time_index]'], {}), '(self.y2, self.csv_y[self.time_index])\n', (1672, 1710), True, 'import numpy as np\n')] |
import numpy as np
import pytest
import torch
from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead
def test_bottom_up_simple_head():
"""test bottom up simple head."""
with pytest.raises(TypeError):
# extra
_ = BottomUpSimpleHead(
in_channels=512, num_joints=17, with_ae_loss=[True], extra=[])
# test final_conv_kernel
with pytest.raises(AssertionError):
_ = BottomUpSimpleHead(
in_channels=512,
num_joints=17,
with_ae_loss=[True],
extra={'final_conv_kernel': 0})
head = BottomUpSimpleHead(
in_channels=512,
num_joints=17,
with_ae_loss=[True],
extra={'final_conv_kernel': 3})
head.init_weights()
assert head.final_layer.padding == (1, 1)
head = BottomUpSimpleHead(
in_channels=512,
num_joints=17,
with_ae_loss=[True],
extra={'final_conv_kernel': 1})
head.init_weights()
assert head.final_layer.padding == (0, 0)
head = BottomUpSimpleHead(
in_channels=512, num_joints=17, with_ae_loss=[True])
head.init_weights()
assert head.final_layer.padding == (0, 0)
# test with_ae_loss
head = BottomUpSimpleHead(
in_channels=512,
num_joints=17,
num_deconv_layers=0,
with_ae_loss=[True],
extra={'final_conv_kernel': 3})
head.init_weights()
input_shape = (1, 512, 32, 32)
inputs = _demo_inputs(input_shape)
out = head(inputs)
assert out[0].shape == torch.Size([1, 34, 32, 32])
head = BottomUpSimpleHead(
in_channels=512,
num_joints=17,
num_deconv_layers=0,
with_ae_loss=[False],
extra={'final_conv_kernel': 3})
head.init_weights()
input_shape = (1, 512, 32, 32)
inputs = _demo_inputs(input_shape)
out = head(inputs)
assert out[0].shape == torch.Size([1, 17, 32, 32])
# test tag_per_joint
head = BottomUpSimpleHead(
in_channels=512,
num_joints=17,
num_deconv_layers=0,
tag_per_joint=False,
with_ae_loss=[False],
extra={'final_conv_kernel': 3})
head.init_weights()
input_shape = (1, 512, 32, 32)
inputs = _demo_inputs(input_shape)
out = head(inputs)
assert out[0].shape == torch.Size([1, 17, 32, 32])
head = BottomUpSimpleHead(
in_channels=512,
num_joints=17,
num_deconv_layers=0,
tag_per_joint=False,
with_ae_loss=[True],
extra={'final_conv_kernel': 3})
head.init_weights()
input_shape = (1, 512, 32, 32)
inputs = _demo_inputs(input_shape)
out = head(inputs)
assert out[0].shape == torch.Size([1, 18, 32, 32])
head = BottomUpSimpleHead(
in_channels=512,
num_joints=17,
num_deconv_layers=0,
tag_per_joint=False,
with_ae_loss=[True],
extra={'final_conv_kernel': 3})
head.init_weights()
input_shape = (1, 512, 32, 32)
inputs = _demo_inputs(input_shape)
out = head([inputs])
assert out[0].shape == torch.Size([1, 18, 32, 32])
def test_bottom_up_higherresolution_head():
"""test bottom up higherresolution head."""
# test final_conv_kernel
with pytest.raises(AssertionError):
_ = BottomUpHigherResolutionHead(
in_channels=512,
num_joints=17,
with_ae_loss=[True, False],
extra={'final_conv_kernel': 0})
head = BottomUpHigherResolutionHead(
in_channels=512,
num_joints=17,
with_ae_loss=[True, False],
extra={'final_conv_kernel': 3},
cat_output=[True])
head.init_weights()
assert head.final_layers[0].padding == (1, 1)
head = BottomUpHigherResolutionHead(
in_channels=512,
num_joints=17,
with_ae_loss=[True, False],
extra={'final_conv_kernel': 1},
cat_output=[True])
head.init_weights()
assert head.final_layers[0].padding == (0, 0)
head = BottomUpHigherResolutionHead(
in_channels=512,
num_joints=17,
with_ae_loss=[True, False],
cat_output=[True])
head.init_weights()
assert head.final_layers[0].padding == (0, 0)
# test deconv layers
with pytest.raises(ValueError):
_ = BottomUpHigherResolutionHead(
in_channels=512,
num_joints=17,
with_ae_loss=[True, False],
num_deconv_kernels=[1],
cat_output=[True])
head = BottomUpHigherResolutionHead(
in_channels=512,
num_joints=17,
with_ae_loss=[True, False],
num_deconv_kernels=[4],
cat_output=[True])
head.init_weights()
assert head.deconv_layers[0][0][0].output_padding == (0, 0)
head = BottomUpHigherResolutionHead(
in_channels=512,
num_joints=17,
with_ae_loss=[True, False],
num_deconv_kernels=[3],
cat_output=[True])
head.init_weights()
assert head.deconv_layers[0][0][0].output_padding == (1, 1)
head = BottomUpHigherResolutionHead(
in_channels=512,
num_joints=17,
with_ae_loss=[True, False],
num_deconv_kernels=[2],
cat_output=[True])
head.init_weights()
assert head.deconv_layers[0][0][0].output_padding == (0, 0)
# test tag_per_joint & ae loss
head = BottomUpHigherResolutionHead(
in_channels=512,
num_joints=17,
tag_per_joint=False,
with_ae_loss=[False, False],
extra={'final_conv_kernel': 3},
cat_output=[True])
head.init_weights()
input_shape = (1, 512, 32, 32)
inputs = _demo_inputs(input_shape)
out = head(inputs)
assert out[0].shape == torch.Size([1, 17, 32, 32])
assert out[1].shape == torch.Size([1, 17, 64, 64])
head = BottomUpHigherResolutionHead(
in_channels=512,
num_joints=17,
tag_per_joint=False,
with_ae_loss=[True, False],
extra={'final_conv_kernel': 3},
cat_output=[True])
head.init_weights()
input_shape = (1, 512, 32, 32)
inputs = _demo_inputs(input_shape)
out = head(inputs)
assert out[0].shape == torch.Size([1, 18, 32, 32])
assert out[1].shape == torch.Size([1, 17, 64, 64])
head = BottomUpHigherResolutionHead(
in_channels=512,
num_joints=17,
tag_per_joint=True,
with_ae_loss=[True, True],
extra={'final_conv_kernel': 3},
cat_output=[True])
head.init_weights()
input_shape = (1, 512, 32, 32)
inputs = _demo_inputs(input_shape)
out = head(inputs)
assert out[0].shape == torch.Size([1, 34, 32, 32])
assert out[1].shape == torch.Size([1, 34, 64, 64])
# cat_output
head = BottomUpHigherResolutionHead(
in_channels=512,
num_joints=17,
tag_per_joint=True,
with_ae_loss=[True, True],
extra={'final_conv_kernel': 3},
cat_output=[False])
head.init_weights()
input_shape = (1, 512, 32, 32)
inputs = _demo_inputs(input_shape)
out = head(inputs)
assert out[0].shape == torch.Size([1, 34, 32, 32])
assert out[1].shape == torch.Size([1, 34, 64, 64])
head = BottomUpHigherResolutionHead(
in_channels=512,
num_joints=17,
tag_per_joint=True,
with_ae_loss=[True, True],
extra={'final_conv_kernel': 3},
cat_output=[False])
head.init_weights()
input_shape = (1, 512, 32, 32)
inputs = _demo_inputs(input_shape)
out = head([inputs])
assert out[0].shape == torch.Size([1, 34, 32, 32])
assert out[1].shape == torch.Size([1, 34, 64, 64])
def _demo_inputs(input_shape=(1, 3, 64, 64)):
"""Create a superset of inputs needed to run backbone.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 3, 64, 64).
Returns:
Random input tensor with the size of input_shape.
"""
inps = np.random.random(input_shape)
inps = torch.FloatTensor(inps)
return inps
| [
"mmpose.models.BottomUpHigherResolutionHead",
"mmpose.models.BottomUpSimpleHead",
"torch.FloatTensor",
"pytest.raises",
"numpy.random.random",
"torch.Size"
] | [((600, 707), 'mmpose.models.BottomUpSimpleHead', 'BottomUpSimpleHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'with_ae_loss': '[True]', 'extra': "{'final_conv_kernel': 3}"}), "(in_channels=512, num_joints=17, with_ae_loss=[True],\n extra={'final_conv_kernel': 3})\n", (618, 707), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((818, 925), 'mmpose.models.BottomUpSimpleHead', 'BottomUpSimpleHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'with_ae_loss': '[True]', 'extra': "{'final_conv_kernel': 1}"}), "(in_channels=512, num_joints=17, with_ae_loss=[True],\n extra={'final_conv_kernel': 1})\n", (836, 925), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((1036, 1107), 'mmpose.models.BottomUpSimpleHead', 'BottomUpSimpleHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'with_ae_loss': '[True]'}), '(in_channels=512, num_joints=17, with_ae_loss=[True])\n', (1054, 1107), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((1222, 1350), 'mmpose.models.BottomUpSimpleHead', 'BottomUpSimpleHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'num_deconv_layers': '(0)', 'with_ae_loss': '[True]', 'extra': "{'final_conv_kernel': 3}"}), "(in_channels=512, num_joints=17, num_deconv_layers=0,\n with_ae_loss=[True], extra={'final_conv_kernel': 3})\n", (1240, 1350), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((1575, 1704), 'mmpose.models.BottomUpSimpleHead', 'BottomUpSimpleHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'num_deconv_layers': '(0)', 'with_ae_loss': '[False]', 'extra': "{'final_conv_kernel': 3}"}), "(in_channels=512, num_joints=17, num_deconv_layers=0,\n with_ae_loss=[False], extra={'final_conv_kernel': 3})\n", (1593, 1704), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((1954, 2104), 'mmpose.models.BottomUpSimpleHead', 'BottomUpSimpleHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'num_deconv_layers': '(0)', 'tag_per_joint': '(False)', 'with_ae_loss': '[False]', 'extra': "{'final_conv_kernel': 3}"}), "(in_channels=512, num_joints=17, num_deconv_layers=0,\n tag_per_joint=False, with_ae_loss=[False], extra={'final_conv_kernel': 3})\n", (1972, 2104), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((2337, 2486), 'mmpose.models.BottomUpSimpleHead', 'BottomUpSimpleHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'num_deconv_layers': '(0)', 'tag_per_joint': '(False)', 'with_ae_loss': '[True]', 'extra': "{'final_conv_kernel': 3}"}), "(in_channels=512, num_joints=17, num_deconv_layers=0,\n tag_per_joint=False, with_ae_loss=[True], extra={'final_conv_kernel': 3})\n", (2355, 2486), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((2719, 2868), 'mmpose.models.BottomUpSimpleHead', 'BottomUpSimpleHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'num_deconv_layers': '(0)', 'tag_per_joint': '(False)', 'with_ae_loss': '[True]', 'extra': "{'final_conv_kernel': 3}"}), "(in_channels=512, num_joints=17, num_deconv_layers=0,\n tag_per_joint=False, with_ae_loss=[True], extra={'final_conv_kernel': 3})\n", (2737, 2868), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((3449, 3593), 'mmpose.models.BottomUpHigherResolutionHead', 'BottomUpHigherResolutionHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'with_ae_loss': '[True, False]', 'extra': "{'final_conv_kernel': 3}", 'cat_output': '[True]'}), "(in_channels=512, num_joints=17, with_ae_loss=[\n True, False], extra={'final_conv_kernel': 3}, cat_output=[True])\n", (3477, 3593), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((3715, 3859), 'mmpose.models.BottomUpHigherResolutionHead', 'BottomUpHigherResolutionHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'with_ae_loss': '[True, False]', 'extra': "{'final_conv_kernel': 1}", 'cat_output': '[True]'}), "(in_channels=512, num_joints=17, with_ae_loss=[\n True, False], extra={'final_conv_kernel': 1}, cat_output=[True])\n", (3743, 3859), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((3981, 4093), 'mmpose.models.BottomUpHigherResolutionHead', 'BottomUpHigherResolutionHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'with_ae_loss': '[True, False]', 'cat_output': '[True]'}), '(in_channels=512, num_joints=17, with_ae_loss=[\n True, False], cat_output=[True])\n', (4009, 4093), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((4473, 4609), 'mmpose.models.BottomUpHigherResolutionHead', 'BottomUpHigherResolutionHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'with_ae_loss': '[True, False]', 'num_deconv_kernels': '[4]', 'cat_output': '[True]'}), '(in_channels=512, num_joints=17, with_ae_loss=[\n True, False], num_deconv_kernels=[4], cat_output=[True])\n', (4501, 4609), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((4745, 4881), 'mmpose.models.BottomUpHigherResolutionHead', 'BottomUpHigherResolutionHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'with_ae_loss': '[True, False]', 'num_deconv_kernels': '[3]', 'cat_output': '[True]'}), '(in_channels=512, num_joints=17, with_ae_loss=[\n True, False], num_deconv_kernels=[3], cat_output=[True])\n', (4773, 4881), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((5017, 5153), 'mmpose.models.BottomUpHigherResolutionHead', 'BottomUpHigherResolutionHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'with_ae_loss': '[True, False]', 'num_deconv_kernels': '[2]', 'cat_output': '[True]'}), '(in_channels=512, num_joints=17, with_ae_loss=[\n True, False], num_deconv_kernels=[2], cat_output=[True])\n', (5045, 5153), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((5324, 5494), 'mmpose.models.BottomUpHigherResolutionHead', 'BottomUpHigherResolutionHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'tag_per_joint': '(False)', 'with_ae_loss': '[False, False]', 'extra': "{'final_conv_kernel': 3}", 'cat_output': '[True]'}), "(in_channels=512, num_joints=17, tag_per_joint=\n False, with_ae_loss=[False, False], extra={'final_conv_kernel': 3},\n cat_output=[True])\n", (5352, 5494), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((5777, 5946), 'mmpose.models.BottomUpHigherResolutionHead', 'BottomUpHigherResolutionHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'tag_per_joint': '(False)', 'with_ae_loss': '[True, False]', 'extra': "{'final_conv_kernel': 3}", 'cat_output': '[True]'}), "(in_channels=512, num_joints=17, tag_per_joint=\n False, with_ae_loss=[True, False], extra={'final_conv_kernel': 3},\n cat_output=[True])\n", (5805, 5946), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((6229, 6396), 'mmpose.models.BottomUpHigherResolutionHead', 'BottomUpHigherResolutionHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'tag_per_joint': '(True)', 'with_ae_loss': '[True, True]', 'extra': "{'final_conv_kernel': 3}", 'cat_output': '[True]'}), "(in_channels=512, num_joints=17, tag_per_joint=\n True, with_ae_loss=[True, True], extra={'final_conv_kernel': 3},\n cat_output=[True])\n", (6257, 6396), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((6696, 6864), 'mmpose.models.BottomUpHigherResolutionHead', 'BottomUpHigherResolutionHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'tag_per_joint': '(True)', 'with_ae_loss': '[True, True]', 'extra': "{'final_conv_kernel': 3}", 'cat_output': '[False]'}), "(in_channels=512, num_joints=17, tag_per_joint=\n True, with_ae_loss=[True, True], extra={'final_conv_kernel': 3},\n cat_output=[False])\n", (6724, 6864), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((7147, 7315), 'mmpose.models.BottomUpHigherResolutionHead', 'BottomUpHigherResolutionHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'tag_per_joint': '(True)', 'with_ae_loss': '[True, True]', 'extra': "{'final_conv_kernel': 3}", 'cat_output': '[False]'}), "(in_channels=512, num_joints=17, tag_per_joint=\n True, with_ae_loss=[True, True], extra={'final_conv_kernel': 3},\n cat_output=[False])\n", (7175, 7315), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((7887, 7916), 'numpy.random.random', 'np.random.random', (['input_shape'], {}), '(input_shape)\n', (7903, 7916), True, 'import numpy as np\n'), ((7928, 7951), 'torch.FloatTensor', 'torch.FloatTensor', (['inps'], {}), '(inps)\n', (7945, 7951), False, 'import torch\n'), ((206, 230), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (219, 230), False, 'import pytest\n'), ((260, 345), 'mmpose.models.BottomUpSimpleHead', 'BottomUpSimpleHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'with_ae_loss': '[True]', 'extra': '[]'}), '(in_channels=512, num_joints=17, with_ae_loss=[True],\n extra=[])\n', (278, 345), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((393, 422), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (406, 422), False, 'import pytest\n'), ((436, 543), 'mmpose.models.BottomUpSimpleHead', 'BottomUpSimpleHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'with_ae_loss': '[True]', 'extra': "{'final_conv_kernel': 0}"}), "(in_channels=512, num_joints=17, with_ae_loss=[True],\n extra={'final_conv_kernel': 0})\n", (454, 543), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((1536, 1563), 'torch.Size', 'torch.Size', (['[1, 34, 32, 32]'], {}), '([1, 34, 32, 32])\n', (1546, 1563), False, 'import torch\n'), ((1890, 1917), 'torch.Size', 'torch.Size', (['[1, 17, 32, 32]'], {}), '([1, 17, 32, 32])\n', (1900, 1917), False, 'import torch\n'), ((2298, 2325), 'torch.Size', 'torch.Size', (['[1, 17, 32, 32]'], {}), '([1, 17, 32, 32])\n', (2308, 2325), False, 'import torch\n'), ((2680, 2707), 'torch.Size', 'torch.Size', (['[1, 18, 32, 32]'], {}), '([1, 18, 32, 32])\n', (2690, 2707), False, 'import torch\n'), ((3064, 3091), 'torch.Size', 'torch.Size', (['[1, 18, 32, 32]'], {}), '([1, 18, 32, 32])\n', (3074, 3091), False, 'import torch\n'), ((3225, 3254), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (3238, 3254), False, 'import pytest\n'), ((3268, 3393), 'mmpose.models.BottomUpHigherResolutionHead', 'BottomUpHigherResolutionHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'with_ae_loss': '[True, False]', 'extra': "{'final_conv_kernel': 0}"}), "(in_channels=512, num_joints=17, with_ae_loss=[\n True, False], extra={'final_conv_kernel': 0})\n", (3296, 3393), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((4230, 4255), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4243, 4255), False, 'import pytest\n'), ((4269, 4405), 'mmpose.models.BottomUpHigherResolutionHead', 'BottomUpHigherResolutionHead', ([], {'in_channels': '(512)', 'num_joints': '(17)', 'with_ae_loss': '[True, False]', 'num_deconv_kernels': '[1]', 'cat_output': '[True]'}), '(in_channels=512, num_joints=17, with_ae_loss=[\n True, False], num_deconv_kernels=[1], cat_output=[True])\n', (4297, 4405), False, 'from mmpose.models import BottomUpHigherResolutionHead, BottomUpSimpleHead\n'), ((5683, 5710), 'torch.Size', 'torch.Size', (['[1, 17, 32, 32]'], {}), '([1, 17, 32, 32])\n', (5693, 5710), False, 'import torch\n'), ((5738, 5765), 'torch.Size', 'torch.Size', (['[1, 17, 64, 64]'], {}), '([1, 17, 64, 64])\n', (5748, 5765), False, 'import torch\n'), ((6135, 6162), 'torch.Size', 'torch.Size', (['[1, 18, 32, 32]'], {}), '([1, 18, 32, 32])\n', (6145, 6162), False, 'import torch\n'), ((6190, 6217), 'torch.Size', 'torch.Size', (['[1, 17, 64, 64]'], {}), '([1, 17, 64, 64])\n', (6200, 6217), False, 'import torch\n'), ((6585, 6612), 'torch.Size', 'torch.Size', (['[1, 34, 32, 32]'], {}), '([1, 34, 32, 32])\n', (6595, 6612), False, 'import torch\n'), ((6640, 6667), 'torch.Size', 'torch.Size', (['[1, 34, 64, 64]'], {}), '([1, 34, 64, 64])\n', (6650, 6667), False, 'import torch\n'), ((7053, 7080), 'torch.Size', 'torch.Size', (['[1, 34, 32, 32]'], {}), '([1, 34, 32, 32])\n', (7063, 7080), False, 'import torch\n'), ((7108, 7135), 'torch.Size', 'torch.Size', (['[1, 34, 64, 64]'], {}), '([1, 34, 64, 64])\n', (7118, 7135), False, 'import torch\n'), ((7506, 7533), 'torch.Size', 'torch.Size', (['[1, 34, 32, 32]'], {}), '([1, 34, 32, 32])\n', (7516, 7533), False, 'import torch\n'), ((7561, 7588), 'torch.Size', 'torch.Size', (['[1, 34, 64, 64]'], {}), '([1, 34, 64, 64])\n', (7571, 7588), False, 'import torch\n')] |
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from numbers import Number
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.utils.validation import check_X_y
from sklearn.utils.validation import check_array
from scipy.optimize import minimize
from pyafm.util import log_one_plus_exp_vect
from pyafm.util import invlogit_vect
class CustomLogistic(BaseEstimator, ClassifierMixin):
def __init__(self, fit_intercept=True, method="TNC", bounds=None,
l2=1.0, max_iter=1000):
"""
My own logistic regression that allows me to pass in box constraints
(i.e., bounds on estimates) and use l2 regularization.
If box constraints are being used, then the only supported methods are:
'l-bfgs-b', 'TNC', or 'SLSQP'
"""
self.fit_intercept = fit_intercept
self.method = method
self.bounds = bounds
self.l2 = l2
self.max_iter = max_iter
def fit(self, X, y):
"""
Train the Logistic model, X and y are numpy arrays.
"""
X, y = check_X_y(X, y)
#, accept_sparse=['csr', 'csc']) # not sure how to handle sparse
self.classes_, y = np.unique(y, return_inverse=True)
if self.fit_intercept:
X = np.insert(X, 0, 1, axis=1)
w0 = np.zeros(X.shape[1])
if self.bounds is None:
self.bounds_ = [(None, None) for v in w0]
elif isinstance(self.bounds, tuple) and len(self.bounds) == 2:
self.bounds_ = [self.bounds for v in w0]
elif self.fit_intercept and len(self.bounds) == len(w0) - 1:
self.bounds_ = np.concatenate(([(None, None)], self.bounds))
else:
self.bounds_ = self.bounds
if len(self.bounds_) != len(w0):
raise ValueError("Bounds must be the same length as the coef")
if isinstance(self.l2, Number):
self.l2_ = [self.l2 for v in w0]
elif self.fit_intercept and len(self.l2) == len(w0) - 1:
self.l2_ = np.insert(self.l2, 0, 0)
else:
self.l2_ = self.l2
if len(self.l2_) != len(w0):
raise ValueError("L2 penalty must be the same length as the coef, be sure the intercept is accounted for.")
# the intercept should never be regularized.
if self.fit_intercept:
self.l2_[0] = 0.0
w = minimize(_ll, w0, args=(X, y, self.l2_),
jac=_ll_grad,
method=self.method, bounds=self.bounds_,
options={'maxiter': self.max_iter,
#'disp': True
})['x']
if self.fit_intercept:
self.intercept_ = w[0:1]
self.coef_ = w[1:]
else:
self.intercept_ = np.array([])
self.coef_ = w
return self
def predict(self, X):
"""
Returns the predicted class for each x in X, predicts 1 if probability
is greater than or equal to 1.
"""
y = np.array(self.predict_proba(X))
y[y >= 0.5] = self.classes_[1]
y[y < 0.5] = self.classes_[0]
return y
def predict_proba(self, X):
"""
Returns the probability of class 1 for each x in X.
"""
try:
getattr(self, "intercept_")
getattr(self, "coef_")
except AttributeError:
raise RuntimeError("You must train classifer before predicting data!")
X = check_array(X)
if self.fit_intercept:
X = np.insert(X, 0, 1, axis=1)
w = np.insert(self.coef_, 0, self.intercept_)
return invlogit_vect(np.dot(w, np.transpose(X)))
def mean_squared_error(self, X, y):
pred = self.predict_proba(X)
sq_err = [(v-pred[i]) * (v-pred[i]) for i,v in enumerate(y)]
return np.average(sq_err)
def _ll(w, X, y, l2):
"""
Logistic Regression loglikelihood given, the weights w, the data X, and the
labels y.
"""
z = np.dot(w, np.transpose(X))
ll = sum(np.subtract(log_one_plus_exp_vect(z), np.multiply(y, z)))
ll += np.dot(np.divide(l2, 2), np.multiply(w, w))
return ll
def _ll_grad(w, X, y, l2):
"""
Logistic Regression loglikelihood gradient given, the weights w, the data
X, and the labels y.
"""
p = invlogit_vect(np.dot(w, np.transpose(X)))
g = np.dot(np.transpose(X), np.subtract(y, p))
g -= np.multiply(l2, w)
return -1 * g
| [
"numpy.divide",
"scipy.optimize.minimize",
"numpy.multiply",
"numpy.average",
"numpy.subtract",
"numpy.concatenate",
"sklearn.utils.validation.check_X_y",
"numpy.zeros",
"numpy.transpose",
"numpy.insert",
"numpy.array",
"pyafm.util.log_one_plus_exp_vect",
"numpy.unique",
"sklearn.utils.val... | [((4649, 4667), 'numpy.multiply', 'np.multiply', (['l2', 'w'], {}), '(l2, w)\n', (4660, 4667), True, 'import numpy as np\n'), ((1226, 1241), 'sklearn.utils.validation.check_X_y', 'check_X_y', (['X', 'y'], {}), '(X, y)\n', (1235, 1241), False, 'from sklearn.utils.validation import check_X_y\n'), ((1343, 1376), 'numpy.unique', 'np.unique', (['y'], {'return_inverse': '(True)'}), '(y, return_inverse=True)\n', (1352, 1376), True, 'import numpy as np\n'), ((1466, 1486), 'numpy.zeros', 'np.zeros', (['X.shape[1]'], {}), '(X.shape[1])\n', (1474, 1486), True, 'import numpy as np\n'), ((3703, 3717), 'sklearn.utils.validation.check_array', 'check_array', (['X'], {}), '(X)\n', (3714, 3717), False, 'from sklearn.utils.validation import check_array\n'), ((3805, 3846), 'numpy.insert', 'np.insert', (['self.coef_', '(0)', 'self.intercept_'], {}), '(self.coef_, 0, self.intercept_)\n', (3814, 3846), True, 'import numpy as np\n'), ((4066, 4084), 'numpy.average', 'np.average', (['sq_err'], {}), '(sq_err)\n', (4076, 4084), True, 'import numpy as np\n'), ((4236, 4251), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (4248, 4251), True, 'import numpy as np\n'), ((4341, 4357), 'numpy.divide', 'np.divide', (['l2', '(2)'], {}), '(l2, 2)\n', (4350, 4357), True, 'import numpy as np\n'), ((4359, 4376), 'numpy.multiply', 'np.multiply', (['w', 'w'], {}), '(w, w)\n', (4370, 4376), True, 'import numpy as np\n'), ((4604, 4619), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (4616, 4619), True, 'import numpy as np\n'), ((4621, 4638), 'numpy.subtract', 'np.subtract', (['y', 'p'], {}), '(y, p)\n', (4632, 4638), True, 'import numpy as np\n'), ((1425, 1451), 'numpy.insert', 'np.insert', (['X', '(0)', '(1)'], {'axis': '(1)'}), '(X, 0, 1, axis=1)\n', (1434, 1451), True, 'import numpy as np\n'), ((2538, 2673), 'scipy.optimize.minimize', 'minimize', (['_ll', 'w0'], {'args': '(X, y, self.l2_)', 'jac': '_ll_grad', 'method': 'self.method', 'bounds': 'self.bounds_', 'options': "{'maxiter': self.max_iter}"}), "(_ll, w0, args=(X, y, self.l2_), jac=_ll_grad, method=self.method,\n bounds=self.bounds_, options={'maxiter': self.max_iter})\n", (2546, 2673), False, 'from scipy.optimize import minimize\n'), ((3001, 3013), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3009, 3013), True, 'import numpy as np\n'), ((3765, 3791), 'numpy.insert', 'np.insert', (['X', '(0)', '(1)'], {'axis': '(1)'}), '(X, 0, 1, axis=1)\n', (3774, 3791), True, 'import numpy as np\n'), ((4278, 4302), 'pyafm.util.log_one_plus_exp_vect', 'log_one_plus_exp_vect', (['z'], {}), '(z)\n', (4299, 4302), False, 'from pyafm.util import log_one_plus_exp_vect\n'), ((4304, 4321), 'numpy.multiply', 'np.multiply', (['y', 'z'], {}), '(y, z)\n', (4315, 4321), True, 'import numpy as np\n'), ((4571, 4586), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (4583, 4586), True, 'import numpy as np\n'), ((2183, 2207), 'numpy.insert', 'np.insert', (['self.l2', '(0)', '(0)'], {}), '(self.l2, 0, 0)\n', (2192, 2207), True, 'import numpy as np\n'), ((3886, 3901), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (3898, 3901), True, 'import numpy as np\n'), ((1794, 1839), 'numpy.concatenate', 'np.concatenate', (['([(None, None)], self.bounds)'], {}), '(([(None, None)], self.bounds))\n', (1808, 1839), True, 'import numpy as np\n')] |
import numpy as np
from skopt.sampler import Sobol, Lhs
from litebo.utils.config_space import ConfigurationSpace, Configuration
from litebo.utils.util_funcs import get_types, check_random_state
class Sampler(object):
"""
Generate samples within the specified domain (which defaults to the whole config space).
Users should call generate() which auto-scales the samples to the domain.
To implement new design methodologies, subclasses should implement _generate().
"""
def __init__(self, config_space: ConfigurationSpace,
size, lower_bounds=None, upper_bounds=None,
random_state=None):
"""
Parameters
----------
config_space : ConfigurationSpace
ConfigurationSpace to do sampling.
size : int N
Number of samples.
lower_bounds : lower bounds in [0, 1] for continuous dimensions (optional)
upper_bounds : upper bounds in [0, 1] for continuous dimensions (optional)
"""
self.config_space = config_space
types, bounds = get_types(config_space)
self.search_dims = []
for i in range(len(types)):
if types[i] == 0 and bounds[i][1] == 1.0: # Integer and float
self.search_dims.append((0.0, 1.0))
elif types[i] > 0: # Categorical
self.search_dims.append(list(range(types[i])))
else:
raise NotImplementedError()
self.size = size
default_lb, default_ub = zip(*bounds)
self.lower_bounds = np.array(default_lb) if lower_bounds is None else np.clip(lower_bounds, default_lb, default_ub)
self.upper_bounds = np.array(default_ub) if upper_bounds is None else np.clip(upper_bounds, default_lb, default_ub)
self.rng = check_random_state(random_state)
def set_params(self, **params):
"""
Set the parameters of this sampler.
Parameters
----------
**params : dict
Generator parameters.
Returns
-------
self : object
Generator instance.
"""
if not params:
return self
for key, value in params.items():
setattr(self, key, value)
return self
def generate(self, return_config=True):
"""
Create samples in the domain specified during construction.
Returns
-------
configs : list
List of N sampled configurations within domain. (return_config is True)
X : array, shape (N, D)
Design matrix X in the specified domain. (return_config is False)
"""
X = self._generate()
X = self.lower_bounds + (self.upper_bounds - self.lower_bounds) * X
if return_config:
configs = [Configuration(self.config_space, vector=x) for x in X]
return configs
else:
return X
def _generate(self):
"""
Create unscaled samples.
Returns
-------
X : array, shape (N, D)
Design matrix X in the config space's domain.
"""
raise NotImplementedError()
class SobolSampler(Sampler):
"""
Sobol sequence sampler.
"""
def __init__(self, config_space: ConfigurationSpace,
size, lower_bounds=None, upper_bounds=None,
random_state=None):
"""
Parameters
----------
config_space : ConfigurationSpace
ConfigurationSpace to do sampling.
size : int N
Number of samples.
lower_bounds : lower bounds in [0, 1] for continuous dimensions (optional)
upper_bounds : upper bounds in [0, 1] for continuous dimensions (optional)
seed : int (optional)
Seed number for sobol sequence.
"""
super().__init__(config_space, size, lower_bounds, upper_bounds, random_state)
def _generate(self):
skip = self.rng.randint(int(1e6))
try:
from torch.quasirandom import SobolEngine
sobol = SobolEngine(dimension=len(self.search_dims), scramble=True, seed=skip)
X = sobol.draw(n=self.size).numpy()
except ImportError:
sobol = Sobol(min_skip=skip, max_skip=skip)
X = sobol.generate(self.search_dims, self.size)
return X
class LatinHypercubeSampler(Sampler):
"""
Latin hypercube sampler.
"""
def __init__(self, config_space: ConfigurationSpace,
size, lower_bounds=None, upper_bounds=None,
criterion='maximin', iterations=10000,
random_state=None):
"""
Parameters
----------
config_space : ConfigurationSpace
ConfigurationSpace to do sampling.
size : int N
Number of samples.
lower_bounds : lower bounds in [0, 1] for continuous dimensions (optional)
upper_bounds : upper bounds in [0, 1] for continuous dimensions (optional)
criterion : str or None, default='maximin'
When set to None, the latin hypercube is not optimized
- 'correlation' : optimized latin hypercube by minimizing the correlation
- 'maximin' : optimized latin hypercube by maximizing the minimal pdist
- 'ratio' : optimized latin hypercube by minimizing the ratio
`max(pdist) / min(pdist)`
iterations : int
Define the number of iterations for optimizing latin hypercube.
"""
super().__init__(config_space, size, lower_bounds, upper_bounds, random_state)
self.criterion = criterion
self.iterations = iterations
def _generate(self):
lhs = Lhs(criterion=self.criterion, iterations=self.iterations)
X = lhs.generate(self.search_dims, self.size, random_state=self.rng)
return X
| [
"litebo.utils.util_funcs.check_random_state",
"litebo.utils.config_space.Configuration",
"numpy.clip",
"skopt.sampler.Lhs",
"skopt.sampler.Sobol",
"numpy.array",
"litebo.utils.util_funcs.get_types"
] | [((1087, 1110), 'litebo.utils.util_funcs.get_types', 'get_types', (['config_space'], {}), '(config_space)\n', (1096, 1110), False, 'from litebo.utils.util_funcs import get_types, check_random_state\n'), ((1815, 1847), 'litebo.utils.util_funcs.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (1833, 1847), False, 'from litebo.utils.util_funcs import get_types, check_random_state\n'), ((5760, 5817), 'skopt.sampler.Lhs', 'Lhs', ([], {'criterion': 'self.criterion', 'iterations': 'self.iterations'}), '(criterion=self.criterion, iterations=self.iterations)\n', (5763, 5817), False, 'from skopt.sampler import Sobol, Lhs\n'), ((1575, 1595), 'numpy.array', 'np.array', (['default_lb'], {}), '(default_lb)\n', (1583, 1595), True, 'import numpy as np\n'), ((1625, 1670), 'numpy.clip', 'np.clip', (['lower_bounds', 'default_lb', 'default_ub'], {}), '(lower_bounds, default_lb, default_ub)\n', (1632, 1670), True, 'import numpy as np\n'), ((1699, 1719), 'numpy.array', 'np.array', (['default_ub'], {}), '(default_ub)\n', (1707, 1719), True, 'import numpy as np\n'), ((1749, 1794), 'numpy.clip', 'np.clip', (['upper_bounds', 'default_lb', 'default_ub'], {}), '(upper_bounds, default_lb, default_ub)\n', (1756, 1794), True, 'import numpy as np\n'), ((2827, 2869), 'litebo.utils.config_space.Configuration', 'Configuration', (['self.config_space'], {'vector': 'x'}), '(self.config_space, vector=x)\n', (2840, 2869), False, 'from litebo.utils.config_space import ConfigurationSpace, Configuration\n'), ((4273, 4308), 'skopt.sampler.Sobol', 'Sobol', ([], {'min_skip': 'skip', 'max_skip': 'skip'}), '(min_skip=skip, max_skip=skip)\n', (4278, 4308), False, 'from skopt.sampler import Sobol, Lhs\n')] |
import sys
sys.path.append("../src")
from convolutional_VAE import ConVae
import h5py
import numpy as np
import keras as ker
import os
import tensorflow as tf
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import f1_score
from keras.utils import to_categorical
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from keras.datasets import mnist
print("PID: ", os.getpid())
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
def longform_latent(latent,):
longform_samples = np.zeros((latent.shape[1], latent.shape[0] * latent.shape[2]))
latent_dim = latent.shape[2]
for i, evts in enumerate(latent):
longform_samples[:, i * latent_dim : (i + 1) * latent_dim] = evts
return longform_samples
def compute_accuracy(X, y, Xtest, ytest):
model = LogisticRegression(
C=10,
solver="saga",
multi_class="multinomial",
class_weight="balanced",
max_iter=1000,
)
model.fit(X, y)
train_score = f1_score(y, model.predict(X), average=None)
test_score = f1_score(ytest, model.predict(Xtest), average=None)
return train_score, test_score
# Training dat
# X = np.load("../data/processed/all_0130.npy")
n_layers = 3
filter_architecture = [32, 64, 128]
# kernel_architecture = [19, 19, 17]
strides_architecture = [2, 2, 2]
pool_architecture = [0, 0, 0]
n_clust = 3
mode_config = {
"simulated_mode": False,
"restore_mode": False,
"include_KL": False,
"include_MMD": True,
"include_KM": False,
"batchnorm": False,
}
clustering_config = {
"n_clusters": n_clust,
"alpha": 1,
"delta": 0.01,
"pretrain_simulated": False,
"pretrain_epochs": 200,
"update_interval": 140,
}
data = "clean"
if data == "mnist":
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.expand_dims(x_train, -1) / 255
x_test = np.expand_dims(x_test, -1) / 255
x_tot = np.concatenate([x_train, x_test])
kernel_architecture = [5, 5, 3]
n_clust = 10
if data == "simulated":
train_data = np.load("../data/simulated/pr_train_simulated.npy")
test_data = np.load("../data/simulated/pr_test_simulated.npy")
test_targets = np.load("../data/simulated/test_targets.npy")
x_train = train_data
x_test = test_data
y_test = test_targets
kernel_architecture = [5, 5, 3, 3]
filter_architecture = [16, 32, 64, 64]
clustering_config["pretrain_epochs"] = 20
clustering_config["n_clusters"] = 2
n_layers = 4
# kernel_architecture = [19, 19, 17]
elif data == "clean":
run_130 = np.load("../data/clean/images/run_0130_label_False_size_80.npy")[
:, 1:, 1:, :
]
run_150 = np.load("../data/clean/images/run_0150_label_False_size_80.npy")[
:, 1:, 1:, :
]
# run_170 = np.load("../data/clean/images/run_0170_label_False_size_50.npy")[:, 1:-1, 1:-1, :]
run_190 = np.load("../data/clean/images/run_0190_label_False_size_80.npy")[
:, 1:, 1:, :
]
run_210 = np.load("../data/clean/images/run_0210_label_False_size_80.npy")[
:, 1:, 1:, :
]
x_train = np.concatenate([run_130, run_150, run_190, run_210])
x_test = np.load("../data/clean/images/train_size_80.npy")[:, 1:, 1:, :]
y_test = np.load("../data/clean/targets/train_targets_size_80.npy")
y_test = np.squeeze(y_test)
kernel_architecture = [5, 5, 3, 3]
filter_architecture = [32, 64, 128, 128]
strides_architecture += [2]
pool_architecture += [0]
clustering_config["pretrain_epochs"] = 200
clustering_config["n_clusters"] = 3
n_layers = 4
if data == "real":
# Labelled data for testing
with h5py.File("../data/images.h5", "r") as fo:
train_targets = np.array(fo["train_targets"])
test_targets = np.array(fo["test_targets"])
all_0130 = np.load("../data/processed/all_0130.npy")
x_train = all_0130
# train_data = np.load("../data/processed/train.npy")
test_data = np.load("../data/processed/test.npy")
train_data = np.load("../data/processed/train.npy")
x_test = train_data
y_test = train_targets
kernel_architecture = [7, 7, 5, 5]
filter_architecture = [32, 64, 64, 128]
clustering_config["pretrain_epochs"] = 90
clustering_config["n_clusters"] = 3
n_layers = 4
epochs = 2000
latent_dim = 9
batch_size = 256
cvae = ConVae(
n_layers,
filter_architecture,
kernel_architecture,
strides_architecture,
pool_architecture,
latent_dim,
x_train,
# all_0130,
beta=11,
# sampling_dim=100,
clustering_config=clustering_config,
mode_config=mode_config,
# labelled_data=[test_data, test_targets],
labelled_data=[x_test, y_test],
)
graph_kwds = {"activation": "relu", "output_activation": None}
loss_kwds = {"reconst_loss": None}
cvae.compile_model(graph_kwds, loss_kwds)
opt = tf.train.AdamOptimizer
opt_args = [1e-3]
opt_kwds = {"beta1": 0.7}
cvae.compute_gradients(opt)
sess = tf.InteractiveSession()
lx, lz = cvae.train(
sess, epochs, "../drawing", "../models", batch_size, earlystopping=True
)
sys.exit()
cvae.X = train_data
cvae.generate_latent(sess, "../drawing", (train_data, test_data))
latent_values, _, _ = cvae.generate_latent(
sess, "../drawing", (train_data, test_data), save=False
)
latent_train = longform_latent(latent_values[0])
latent_test = longform_latent(latent_values[1])
train_score, test_score = compute_accuracy(
latent_train, train_targets, latent_test, test_targets
)
print()
print("--------------------")
print("train: ", train_score)
print("test : ", test_score)
print("---------------------")
# cvae.generate_samples("../drawing", )
sess.close()
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(15, 20), sharex=True)
plt.suptitle("Loss function components")
axs[0].plot(range(epochs), lx, label=r"$\mathcal{L}_x$")
axs[1].plot(range(epochs), lz, label=r"$\mathcal{L}_z$")
[a.legend() for a in axs]
[a.set_ylim((1000, 200)) for a in axs]
fig.savefig("../plots/simulated_loss_functions.png")
| [
"sys.path.append",
"numpy.load",
"h5py.File",
"os.getpid",
"numpy.concatenate",
"matplotlib.pyplot.suptitle",
"convolutional_VAE.ConVae",
"keras.datasets.mnist.load_data",
"numpy.zeros",
"numpy.expand_dims",
"sklearn.linear_model.LogisticRegression",
"matplotlib.use",
"numpy.array",
"tenso... | [((12, 37), 'sys.path.append', 'sys.path.append', (['"""../src"""'], {}), "('../src')\n", (27, 37), False, 'import sys\n'), ((413, 434), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (427, 434), False, 'import matplotlib\n'), ((4453, 4688), 'convolutional_VAE.ConVae', 'ConVae', (['n_layers', 'filter_architecture', 'kernel_architecture', 'strides_architecture', 'pool_architecture', 'latent_dim', 'x_train'], {'beta': '(11)', 'clustering_config': 'clustering_config', 'mode_config': 'mode_config', 'labelled_data': '[x_test, y_test]'}), '(n_layers, filter_architecture, kernel_architecture,\n strides_architecture, pool_architecture, latent_dim, x_train, beta=11,\n clustering_config=clustering_config, mode_config=mode_config,\n labelled_data=[x_test, y_test])\n', (4459, 4688), False, 'from convolutional_VAE import ConVae\n'), ((5062, 5085), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (5083, 5085), True, 'import tensorflow as tf\n'), ((5188, 5198), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5196, 5198), False, 'import sys\n'), ((5793, 5854), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': '(15, 20)', 'sharex': '(True)'}), '(nrows=1, ncols=2, figsize=(15, 20), sharex=True)\n', (5805, 5854), True, 'import matplotlib.pyplot as plt\n'), ((5855, 5895), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Loss function components"""'], {}), "('Loss function components')\n", (5867, 5895), True, 'import matplotlib.pyplot as plt\n'), ((516, 527), 'os.getpid', 'os.getpid', ([], {}), '()\n', (525, 527), False, 'import os\n'), ((625, 687), 'numpy.zeros', 'np.zeros', (['(latent.shape[1], latent.shape[0] * latent.shape[2])'], {}), '((latent.shape[1], latent.shape[0] * latent.shape[2]))\n', (633, 687), True, 'import numpy as np\n'), ((921, 1031), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(10)', 'solver': '"""saga"""', 'multi_class': '"""multinomial"""', 'class_weight': '"""balanced"""', 'max_iter': '(1000)'}), "(C=10, solver='saga', multi_class='multinomial',\n class_weight='balanced', max_iter=1000)\n", (939, 1031), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1921, 1938), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (1936, 1938), False, 'from keras.datasets import mnist\n'), ((2045, 2078), 'numpy.concatenate', 'np.concatenate', (['[x_train, x_test]'], {}), '([x_train, x_test])\n', (2059, 2078), True, 'import numpy as np\n'), ((2173, 2224), 'numpy.load', 'np.load', (['"""../data/simulated/pr_train_simulated.npy"""'], {}), "('../data/simulated/pr_train_simulated.npy')\n", (2180, 2224), True, 'import numpy as np\n'), ((2241, 2291), 'numpy.load', 'np.load', (['"""../data/simulated/pr_test_simulated.npy"""'], {}), "('../data/simulated/pr_test_simulated.npy')\n", (2248, 2291), True, 'import numpy as np\n'), ((2311, 2356), 'numpy.load', 'np.load', (['"""../data/simulated/test_targets.npy"""'], {}), "('../data/simulated/test_targets.npy')\n", (2318, 2356), True, 'import numpy as np\n'), ((3927, 3968), 'numpy.load', 'np.load', (['"""../data/processed/all_0130.npy"""'], {}), "('../data/processed/all_0130.npy')\n", (3934, 3968), True, 'import numpy as np\n'), ((4066, 4103), 'numpy.load', 'np.load', (['"""../data/processed/test.npy"""'], {}), "('../data/processed/test.npy')\n", (4073, 4103), True, 'import numpy as np\n'), ((4121, 4159), 'numpy.load', 'np.load', (['"""../data/processed/train.npy"""'], {}), "('../data/processed/train.npy')\n", (4128, 4159), True, 'import numpy as np\n'), ((1953, 1980), 'numpy.expand_dims', 'np.expand_dims', (['x_train', '(-1)'], {}), '(x_train, -1)\n', (1967, 1980), True, 'import numpy as np\n'), ((2000, 2026), 'numpy.expand_dims', 'np.expand_dims', (['x_test', '(-1)'], {}), '(x_test, -1)\n', (2014, 2026), True, 'import numpy as np\n'), ((3220, 3272), 'numpy.concatenate', 'np.concatenate', (['[run_130, run_150, run_190, run_210]'], {}), '([run_130, run_150, run_190, run_210])\n', (3234, 3272), True, 'import numpy as np\n'), ((3363, 3421), 'numpy.load', 'np.load', (['"""../data/clean/targets/train_targets_size_80.npy"""'], {}), "('../data/clean/targets/train_targets_size_80.npy')\n", (3370, 3421), True, 'import numpy as np\n'), ((3435, 3453), 'numpy.squeeze', 'np.squeeze', (['y_test'], {}), '(y_test)\n', (3445, 3453), True, 'import numpy as np\n'), ((3763, 3798), 'h5py.File', 'h5py.File', (['"""../data/images.h5"""', '"""r"""'], {}), "('../data/images.h5', 'r')\n", (3772, 3798), False, 'import h5py\n'), ((3830, 3859), 'numpy.array', 'np.array', (["fo['train_targets']"], {}), "(fo['train_targets'])\n", (3838, 3859), True, 'import numpy as np\n'), ((3883, 3911), 'numpy.array', 'np.array', (["fo['test_targets']"], {}), "(fo['test_targets'])\n", (3891, 3911), True, 'import numpy as np\n'), ((2693, 2757), 'numpy.load', 'np.load', (['"""../data/clean/images/run_0130_label_False_size_80.npy"""'], {}), "('../data/clean/images/run_0130_label_False_size_80.npy')\n", (2700, 2757), True, 'import numpy as np\n'), ((2800, 2864), 'numpy.load', 'np.load', (['"""../data/clean/images/run_0150_label_False_size_80.npy"""'], {}), "('../data/clean/images/run_0150_label_False_size_80.npy')\n", (2807, 2864), True, 'import numpy as np\n'), ((3006, 3070), 'numpy.load', 'np.load', (['"""../data/clean/images/run_0190_label_False_size_80.npy"""'], {}), "('../data/clean/images/run_0190_label_False_size_80.npy')\n", (3013, 3070), True, 'import numpy as np\n'), ((3113, 3177), 'numpy.load', 'np.load', (['"""../data/clean/images/run_0210_label_False_size_80.npy"""'], {}), "('../data/clean/images/run_0210_label_False_size_80.npy')\n", (3120, 3177), True, 'import numpy as np\n'), ((3286, 3335), 'numpy.load', 'np.load', (['"""../data/clean/images/train_size_80.npy"""'], {}), "('../data/clean/images/train_size_80.npy')\n", (3293, 3335), True, 'import numpy as np\n')] |
import logging
import os
from os import PathLike
import re
from typing import Dict, List, Set, Type, Optional, Union
import numpy
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import Params
from allennlp.models import Model
from allennlp.common.registrable import Registrable
from allennlp.data import Instance, Vocabulary
from allennlp.data.batch import Batch
from allennlp.nn import util
from allennlp.nn.regularizers import RegularizerApplicator
from overrides import overrides
import itertools
from dl4nlp_pos_tagging.models.meta_wrapper import MetaWrapper
import dl4nlp_pos_tagging.common.utils as utils
import numpy as np
logger = logging.getLogger(__name__)
@Model.register("meta_tagger_wrapper")
class MetaTaggerWrapper(MetaWrapper):
@overrides
def forward(
self,
tokens,
metadata,
tags: torch.LongTensor = None,
**kwargs
) -> Dict[str, torch.Tensor]:
output_dict = {}
component_args = {}
for name in self.component_models:
component_output = self.component_models[name](
tokens=tokens,
metadata=metadata,
tags=tags
)
component_args[name] = component_output.pop("output")
utils.extend_dictionary_by_namespace(output_dict, name, component_output)
meta_output = self.meta_model(
tokens=tokens,
metadata=metadata,
tags=tags,
**component_args,
**kwargs
)
utils.extend_dictionary_by_namespace(output_dict, "meta", meta_output)
if tags is not None:
loss = sum(output_dict.pop(f"{k}_loss") for k in self.all_model_keys)
output_dict["loss"] = loss
output_dict["actual"] = tags
return output_dict
@overrides
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Does a simple position-wise argmax over each token, converts indices to string labels, and
adds a `"tags"` key to the dictionary with the result.
"""
for name in self.all_model_keys:
all_predictions = output_dict[f"{name}_class_probabilities"]
all_predictions = all_predictions.cpu().data.numpy()
if all_predictions.ndim == 3:
predictions_list = [all_predictions[i] for i in range(all_predictions.shape[0])]
else:
predictions_list = [all_predictions]
all_tags = []
for predictions in predictions_list:
argmax_indices = numpy.argmax(predictions, axis=-1)
tags = [
self.vocab.get_token_from_index(x, namespace=self.label_namespace)
for x in argmax_indices
]
all_tags.append(tags)
output_dict[f"{name}_tags"] = all_tags
return output_dict
| [
"dl4nlp_pos_tagging.common.utils.extend_dictionary_by_namespace",
"numpy.argmax",
"logging.getLogger",
"allennlp.models.Model.register"
] | [((688, 715), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (705, 715), False, 'import logging\n'), ((718, 755), 'allennlp.models.Model.register', 'Model.register', (['"""meta_tagger_wrapper"""'], {}), "('meta_tagger_wrapper')\n", (732, 755), False, 'from allennlp.models import Model\n'), ((1575, 1645), 'dl4nlp_pos_tagging.common.utils.extend_dictionary_by_namespace', 'utils.extend_dictionary_by_namespace', (['output_dict', '"""meta"""', 'meta_output'], {}), "(output_dict, 'meta', meta_output)\n", (1611, 1645), True, 'import dl4nlp_pos_tagging.common.utils as utils\n'), ((1311, 1384), 'dl4nlp_pos_tagging.common.utils.extend_dictionary_by_namespace', 'utils.extend_dictionary_by_namespace', (['output_dict', 'name', 'component_output'], {}), '(output_dict, name, component_output)\n', (1347, 1384), True, 'import dl4nlp_pos_tagging.common.utils as utils\n'), ((2686, 2720), 'numpy.argmax', 'numpy.argmax', (['predictions'], {'axis': '(-1)'}), '(predictions, axis=-1)\n', (2698, 2720), False, 'import numpy\n')] |
import io
from dataclasses import InitVar, dataclass, field
from typing import Any
import numpy as np
from PIL import Image
@dataclass
class Film:
file_name: str
samples: int
width: int
height: int
image: Any = field(init=False)
def __post_init__(self):
self.image = np.zeros([self.height, self.width, 3], np.float)
def set_pixel(self, u, v, c):
self.image[v, u] = [c.x ** (1 / 2.2) * 255, c.y ** (1 / 2.2) * 255, c.z ** (1 / 2.2) * 255]
def save(self):
im = Image.fromarray(self.image.astype(np.uint8), mode='RGB')
im.save(self.file_name)
def as_bmp(self):
im = Image.fromarray(self.image.astype(np.uint8), mode='RGB')
buffer = io.BytesIO()
im.save(buffer, format="BMP")
return buffer.getvalue()
| [
"dataclasses.field",
"io.BytesIO",
"numpy.zeros"
] | [((234, 251), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (239, 251), False, 'from dataclasses import InitVar, dataclass, field\n'), ((303, 351), 'numpy.zeros', 'np.zeros', (['[self.height, self.width, 3]', 'np.float'], {}), '([self.height, self.width, 3], np.float)\n', (311, 351), True, 'import numpy as np\n'), ((720, 732), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (730, 732), False, 'import io\n')] |
import numpy as np
def get_array_indices(*shape):
return np.arange(np.prod(shape)).reshape(shape) | [
"numpy.prod"
] | [((73, 87), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (80, 87), True, 'import numpy as np\n')] |
import numpy as np
from . import posquat as pq
from . import utilities as ut
class Bone(object):
def __init__(self, name, parent=-1):
self.name = name
self.parent = parent
self.children = []
class Skeleton(object):
def __init__(self):
self.bones = []
self.parentlist = []
self.bindpose = np.zeros([512, 4, 4])
self.initialpose = np.zeros([512, 4, 4])
self.localinitialpq = None
self.upleglength = 0
self.leglength = 0
self.hipsid = 0
self.leftlegids = [0, 0, 0]
self.rightlegids = [0, 0, 0]
self.leftfootid = 0
self.rightfootid = 0
self.copid = 0
def local_to_global(self, localpose, extra_root=None):
count = len(self.bones)
pos, quat = localpose
gpos = np.zeros_like(pos)
gquat = np.zeros_like(quat)
if extra_root is None:
# root is not converted
gpos[..., 0, :] = pos[..., 0, :]
gquat[..., 0, :] = quat[..., 0, :]
else:
gpos[..., 0, :], gquat[..., 0, :] = pq.mult(
(pos[..., 0, :], quat[..., 0, :]),
extra_root
)
for i in range(1, count):
gpos[..., i, :], gquat[..., i, :] = pq.mult(
(pos[..., i, :], quat[..., i, :]),
(gpos[..., self.parentlist[i], :], gquat[..., self.parentlist[i], :])
)
return gpos, pq.vec_normalize(gquat)
def global_to_local(self, globalpose, extra_root=None):
"""compute a global pose or global animation out of a local pose"""
gpos, gquat = globalpose
ipose, iquat = pq.inv(None, gpos, gquat)
pos = np.zeros_like(gpos)
quat = np.zeros_like(gquat)
if extra_root is None:
# root is not converted
pos[..., 0, :] = gpos[..., 0, :]
quat[..., 0, :] = gquat[..., 0, :]
else:
pos[..., 0, :], quat[..., 0, :] = pq.mult(
(gpos[..., 0, :], gquat[..., 0, :]),
pq.inv(extra_root)
)
# multiply by the inverse parent
pos[..., 1:, :], quat[..., 1:, :] = pq.mult(
(gpos[..., 1:, :], gquat[..., 1:, :]),
(ipose[..., self.parentlist[1:], :], iquat[..., self.parentlist[1:], :])
)
#remap lengths
if self.localinitialpq != None:
pos[..., 2:, :] = self.localinitialpq[0][2:, :]
return pos, pq.vec_normalize(quat)
def foot_ik(self, hips, leftfoot, rightfoot, globalpose=None):
pos_augmentation = np.ones_like(hips[0][..., 0, 0, np.newaxis].repeat(3, axis=-1))
quat_augmentation = np.ones_like(hips[1][..., 0, 0, np.newaxis].repeat(4, axis=-1))
y_vectors = np.array([0, 1, 0]) * pos_augmentation
if globalpose is None:
gpos, gquat = pq.pose_to_pq(self.initialpose)
gpos = gpos * pos_augmentation
gquat = gquat * quat_augmentation
globalpose = (gpos, gquat)
localpose = self.global_to_local(globalpose)
gpos, gquat = globalpose
lpos, lquat = localpose
def _compute_error_length(start_pos, end_pos):
middle = end_pos - start_pos
return np.maximum(np.sqrt(np.sum(middle * middle, axis=-1, keepdims=True)) - self.leglength - self.upleglength + 0.5, 0)
def _solve_reaching(ghips, gleftfoot, grightfoot, localleftupleg, localrightupleg):
# compute length between frames
hips_distance = ut.compute_distance(ghips[0])[..., np.newaxis] * np.ones(3)
def _solve_one_leg(ghips, localupleg, globalfoot):
gupleg = pq.mult(localupleg, ghips)
hips_to_foot = globalfoot[0] - gupleg[0]
hips_to_foot /= np.linalg.norm(hips_to_foot, axis=-1)[..., np.newaxis] * np.ones(3)
overshoot = _compute_error_length(gupleg[0], globalfoot[0])
ghips[0][..., :] = ghips[0] + hips_to_foot * overshoot
#ghips[0][..., 1] = ghips[0][..., 1] - overshoot[...,0]
def _solve_hips(ghips, hips_distance):
new_dist = ut.compute_distance(ghips[0])[..., np.newaxis] * np.ones(3)
new_dist += 1e-8
hips_vector = ut.compute_vector(ghips[0]) / new_dist * ((hips_distance + new_dist) * 0.5)
ghips[0][..., 1:, :] = ghips[0][..., :-1, :] - hips_vector[..., 1:, :]
# first solve for the left foot
for i in range(5):
_solve_one_leg(ghips, localleftupleg, gleftfoot)
_solve_hips(ghips, hips_distance)
_solve_one_leg(ghips, localrightupleg, grightfoot)
_solve_hips(ghips, hips_distance)
def _compute_leg(start_pos, end_pos, pole_dir):
middle = ((end_pos - start_pos)/(self.upleglength + self.leglength)) * self.upleglength
middle_len = np.sum(middle * middle, axis=-1, keepdims=True)
aim_vec = middle / np.sqrt(middle_len)
up_len = np.zeros_like(middle_len)
sqrt_up_length = self.upleglength * self.upleglength
up_len = np.where(middle_len < sqrt_up_length, np.sqrt(sqrt_up_length - middle_len), up_len)
side_dir = pq.vec_normalize(pq.vec_cross3(aim_vec, pole_dir))
up_dir = pq.vec_normalize(pq.vec_cross3(side_dir, aim_vec))
up_pos = start_pos + middle + up_dir * up_len
start_quat = pq.quat_from_lookat(up_pos - start_pos, pole_dir)
middle_quat = pq.quat_from_lookat(end_pos - up_pos, pole_dir)
return start_quat, middle_quat, up_pos
# make sure we don't over stretch
_solve_reaching(
hips,
leftfoot,
rightfoot,
(lpos[..., self.leftlegids[0], :], lquat[..., self.leftlegids[0], :]),
(lpos[..., self.rightlegids[0], :], lquat[..., self.rightlegids[0], :])
)
# re set the hips in local were we want it (hips is always child of root)
lpos[..., self.hipsid, :], lquat[..., self.hipsid, :] = \
pq.mult(hips, pq.inv(positions=gpos[..., 0, :], quaternions=gquat[..., 0, :]))
# recompute globals
gpos, gquat = self.local_to_global((lpos, lquat))
# compute legs
start_quat, middle_quat, middle_pos = _compute_leg(
gpos[..., self.leftlegids[0], :],
leftfoot[0],
pq.quat_mul_vec(gquat[..., self.leftlegids[0], :], y_vectors)
)
gquat[..., self.leftlegids[0], :] = start_quat
gquat[..., self.leftlegids[1], :] = middle_quat
gpos[..., self.leftlegids[1], :] = middle_pos
gpos[..., self.leftlegids[2], :] = leftfoot[0]
gquat[..., self.leftlegids[2], :] = leftfoot[1]
gpos[..., self.leftlegids[2]+1, :], gquat[..., self.leftlegids[2]+1, :] = pq.mult(
(lpos[..., self.leftlegids[2] + 1, :], lquat[..., self.leftlegids[2] + 1, :]),
(gpos[..., self.leftlegids[2], :], gquat[..., self.leftlegids[2], :])
)
start_quat, middle_quat, middle_pos = _compute_leg(
gpos[..., self.rightlegids[0], :],
rightfoot[0],
pq.quat_mul_vec(gquat[..., self.rightlegids[0], :], y_vectors)
)
gquat[..., self.rightlegids[0], :] = start_quat
gquat[..., self.rightlegids[1], :] = middle_quat
gpos[..., self.rightlegids[1], :] = middle_pos
gpos[..., self.rightlegids[2], :] = rightfoot[0]
gquat[..., self.rightlegids[2], :] = rightfoot[1]
gpos[..., self.rightlegids[2] + 1, :], gquat[..., self.rightlegids[2] + 1, :] = pq.mult(
(lpos[..., self.rightlegids[2] + 1, :], lquat[..., self.rightlegids[2] + 1, :]),
(gpos[..., self.rightlegids[2], :], gquat[..., self.rightlegids[2], :])
)
return gpos, gquat
def boneid(self, name):
for i in range(len(self.bones)):
if self.bones[i].name == name:
return i
return -1
| [
"numpy.zeros_like",
"numpy.sum",
"numpy.zeros",
"numpy.ones",
"numpy.array",
"numpy.linalg.norm",
"numpy.sqrt"
] | [((348, 369), 'numpy.zeros', 'np.zeros', (['[512, 4, 4]'], {}), '([512, 4, 4])\n', (356, 369), True, 'import numpy as np\n'), ((397, 418), 'numpy.zeros', 'np.zeros', (['[512, 4, 4]'], {}), '([512, 4, 4])\n', (405, 418), True, 'import numpy as np\n'), ((826, 844), 'numpy.zeros_like', 'np.zeros_like', (['pos'], {}), '(pos)\n', (839, 844), True, 'import numpy as np\n'), ((861, 880), 'numpy.zeros_like', 'np.zeros_like', (['quat'], {}), '(quat)\n', (874, 880), True, 'import numpy as np\n'), ((1726, 1745), 'numpy.zeros_like', 'np.zeros_like', (['gpos'], {}), '(gpos)\n', (1739, 1745), True, 'import numpy as np\n'), ((1761, 1781), 'numpy.zeros_like', 'np.zeros_like', (['gquat'], {}), '(gquat)\n', (1774, 1781), True, 'import numpy as np\n'), ((2795, 2814), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (2803, 2814), True, 'import numpy as np\n'), ((4974, 5021), 'numpy.sum', 'np.sum', (['(middle * middle)'], {'axis': '(-1)', 'keepdims': '(True)'}), '(middle * middle, axis=-1, keepdims=True)\n', (4980, 5021), True, 'import numpy as np\n'), ((5094, 5119), 'numpy.zeros_like', 'np.zeros_like', (['middle_len'], {}), '(middle_len)\n', (5107, 5119), True, 'import numpy as np\n'), ((3616, 3626), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (3623, 3626), True, 'import numpy as np\n'), ((5053, 5072), 'numpy.sqrt', 'np.sqrt', (['middle_len'], {}), '(middle_len)\n', (5060, 5072), True, 'import numpy as np\n'), ((5244, 5280), 'numpy.sqrt', 'np.sqrt', (['(sqrt_up_length - middle_len)'], {}), '(sqrt_up_length - middle_len)\n', (5251, 5280), True, 'import numpy as np\n'), ((3889, 3899), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (3896, 3899), True, 'import numpy as np\n'), ((4247, 4257), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (4254, 4257), True, 'import numpy as np\n'), ((3832, 3869), 'numpy.linalg.norm', 'np.linalg.norm', (['hips_to_foot'], {'axis': '(-1)'}), '(hips_to_foot, axis=-1)\n', (3846, 3869), True, 'import numpy as np\n'), ((3307, 3354), 'numpy.sum', 'np.sum', (['(middle * middle)'], {'axis': '(-1)', 'keepdims': '(True)'}), '(middle * middle, axis=-1, keepdims=True)\n', (3313, 3354), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
def eps(n):
return np.random.normal(size=n)
def causal_network(B, C, n):
A = 1.414*B + eps(n)
D = -2.71*C + eps(n)
Z = .5*B - 3.14*C + eps(n)
X = 2*A - .3*Z + eps(n)
W = .3*X + eps(n)
Y = 3*W - 1.2*Z + 10*D + eps(n)
df = pd.DataFrame()
df['A'] = A
df['B'] = B
df['C'] = C
df['D'] = D
df['W'] = W
df['X'] = X
df['Y'] = Y
df['Z'] = Z
return df
| [
"pandas.DataFrame",
"numpy.random.normal"
] | [((64, 88), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'n'}), '(size=n)\n', (80, 88), True, 'import numpy as np\n'), ((300, 314), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (312, 314), True, 'import pandas as pd\n')] |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""grad ops"""
import numpy as np
def area_box_grad(box):
"""box area grad"""
return np.stack([
box[:, 1] - box[:, 3],
box[:, 0] - box[:, 2],
box[:, 3] - box[:, 1],
box[:, 2] - box[:, 0]
], axis=1)
def grad_xywh_to_cxcy(arr):
"""xywh to cxcy grad"""
return np.stack([
arr[:, 0] + arr[:, 2],
arr[:, 1] + arr[:, 3],
(-arr[:, 0] + arr[:, 2]) / 2,
(-arr[:, 1] + arr[:, 3]) / 2
], axis=1)
def dres_dwh(dres_df, wh, is_maxmin,
src_boxes_cxcy, target_boxes_cxcy):
"""dres/dwh"""
d_dwh = np.stack([dres_df * wh[:, :, 1], dres_df * wh[:, :, 0]], axis=2)
if is_maxmin:
dwh_dmax = -d_dwh * (wh > 0.).astype(np.int32)
dwh_dmin = d_dwh * (wh > 0.).astype(np.int32)
max_grad_src = (src_boxes_cxcy[:, :2] > target_boxes_cxcy[:, :2]).astype(np.int32)
min_grad_src = (src_boxes_cxcy[:, 2:] < target_boxes_cxcy[:, 2:]).astype(np.int32)
src_grad = np.concatenate([
(dwh_dmax * max_grad_src).sum(axis=1),
(dwh_dmin * min_grad_src).sum(axis=1)
], axis=1)
else:
dwh_dmax = d_dwh * (wh > 0.).astype(np.int32)
dwh_dmin = -d_dwh * (wh > 0.).astype(np.int32)
max_grad_src = (src_boxes_cxcy[:, 2:] > target_boxes_cxcy[:, 2:]).astype(np.int32)
min_grad_src = (src_boxes_cxcy[:, :2] < target_boxes_cxcy[:, :2]).astype(np.int32)
src_grad = np.concatenate([
(dwh_dmin * min_grad_src).sum(axis=1),
(dwh_dmax * max_grad_src).sum(axis=1)
], axis=1)
return src_grad
def grad_l1(src_boxes, tgt_boxes):
"""grad for l1"""
neq_mask = (src_boxes != tgt_boxes).astype(np.float32)
n_boxes = src_boxes.shape[0]
grad_src = (-np.ones_like(src_boxes) + 2 * (src_boxes >= tgt_boxes)) * neq_mask / n_boxes
return grad_src
| [
"numpy.stack",
"numpy.ones_like"
] | [((762, 877), 'numpy.stack', 'np.stack', (['[box[:, 1] - box[:, 3], box[:, 0] - box[:, 2], box[:, 3] - box[:, 1], box[:,\n 2] - box[:, 0]]'], {'axis': '(1)'}), '([box[:, 1] - box[:, 3], box[:, 0] - box[:, 2], box[:, 3] - box[:, \n 1], box[:, 2] - box[:, 0]], axis=1)\n', (770, 877), True, 'import numpy as np\n'), ((980, 1108), 'numpy.stack', 'np.stack', (['[arr[:, 0] + arr[:, 2], arr[:, 1] + arr[:, 3], (-arr[:, 0] + arr[:, 2]) / 2,\n (-arr[:, 1] + arr[:, 3]) / 2]'], {'axis': '(1)'}), '([arr[:, 0] + arr[:, 2], arr[:, 1] + arr[:, 3], (-arr[:, 0] + arr[:,\n 2]) / 2, (-arr[:, 1] + arr[:, 3]) / 2], axis=1)\n', (988, 1108), True, 'import numpy as np\n'), ((1262, 1326), 'numpy.stack', 'np.stack', (['[dres_df * wh[:, :, 1], dres_df * wh[:, :, 0]]'], {'axis': '(2)'}), '([dres_df * wh[:, :, 1], dres_df * wh[:, :, 0]], axis=2)\n', (1270, 1326), True, 'import numpy as np\n'), ((2438, 2461), 'numpy.ones_like', 'np.ones_like', (['src_boxes'], {}), '(src_boxes)\n', (2450, 2461), True, 'import numpy as np\n')] |
###########################################################################
# MÓDULO: VIZUALIZAÇÃO DA ESTRUTURA PARA CONFERÊNCIA DA ENTRADA DOS DADOS #
###########################################################################
import tkinter
import tkinter.messagebox
import numpy
# Inicialização da janela
wnview = tkinter.Tk()
wnview.title('Visualização da Estrutura')
wnview.resizable(width=False, height=False)
frame = tkinter.Frame(wnview)
frame.pack()
width = 800
height = 600
lim_x = int(max(X) * 100) + 200
lim_y = int(max(Y) * 100) + 200
estrut = tkinter.Canvas(frame, height=height, width=width, scrollregion=(0, 0, lim_x, lim_y))
# ----------------------------------------------------------------------------------------------------------------------
# DESENHO DA ESTRUTURA
# Desenho das barras
for i in range(0, nElem):
xs = int(X[j[i]]) * 100 + 100
ys = int(Y[j[i]]) * 100 + 100
xe = int(X[k[i]]) * 100 + 100
ye = int(Y[k[i]]) * 100 + 100
estrut.create_line(xs, ys, xe, ye, width=2)
# Desenho dos apoios
apoio1g_x = tkinter.PhotoImage(file='img/Apoio1g_x.png')
apoio1g_y = tkinter.PhotoImage(file='img/Apoio1g_y.png')
apoio2g = tkinter.PhotoImage(file='img/Apoio2g.png')
apoio3g = tkinter.PhotoImage(file='img/Apoio3g.png')
for i in range(0, nNos):
xc = int(X[i]) * 100 + 100
yc = int(Y[i]) * 100 + 100
x0 = xc - 5
y0 = yc - 5
x1 = xc + 5
y1 = yc + 5
estrut.create_text(xc - 20, yc + 20, font=(12), text=i + 1)
if UX[i] == 1 and UY[i] == 1 and UZ[i] == 1:
estrut.create_image(xc, yc, image=apoio3g)
if UX[i] == 1 and UY[i] == 1 and UZ[i] == 0:
estrut.create_image(xc, yc, image=apoio2g)
estrut.create_oval(x0, y0, x1, y1, fill='white', width=2)
if UX[i] == 1 and UY[i] == 0 and UZ[i] == 0:
estrut.create_image(xc, yc, image=apoio1g_x)
estrut.create_oval(x0, y0, x1, y1, fill='white', width=2)
if UX[i] == 0 and UY[i] == 1 and UZ[i] == 0:
estrut.create_image(xc, yc, image=apoio1g_y)
estrut.create_oval(x0, y0, x1, y1, fill='white', width=2)
# ----------------------------------------------------------------------------------------------------------------------
# DESENHO DAS CARGAS NODAIS
# Definindo função para desenho das setas
def CreateSeta(direct, id_No, F):
if F != 0:
xn = X[id_No] * 100 + 100
yn = Y[id_No] * 100 + 100
if direct == 'x':
x2 = xn + (F / abs(F)) * 80
y2 = yn
x3 = x2 - (F / abs(F)) * 10
y3 = y2 + (F / abs(F)) * 10
x4 = x2 - (F / abs(F)) * 10
y4 = y2 - (F / abs(F)) * 10
estrut.create_line(xn, yn, x2, y2, x3, y3, x2, y2, x4, y4, fill='red', width=2)
estrut.create_text(x2 + 20, y2 + 20, text=abs(F), fill='red')
if direct == 'y':
x2 = xn
y2 = yn + (F / abs(F)) * 80
x3 = x2 - (F / abs(F)) * 10
y3 = y2 - (F / abs(F)) * 10
x4 = x2 + (F / abs(F)) * 10
y4 = y2 - (F / abs(F)) * 10
estrut.create_line(xn, yn, x2, y2, x3, y3, x2, y2, x4, y4, fill='red', width=2)
estrut.create_text(x2 + 20, y2 + 20, text=abs(F), fill='red')
if direct == 'z':
x_ie = xn - 50
y_ie = yn - 50
x_sd = xn + 50
y_sd = yn + 50
xf = xn
yf = yn + (F / abs(F)) * 50
x3 = xf + 10
y3 = yf - 10
x4 = xf + 10
y4 = yf + 10
estrut.create_arc(x_ie, y_ie, x_sd, y_sd, start=-90, extent=180, outline='red', width=2, style=tkinter.ARC)
estrut.create_line(xf, yf, x3, y3, xf, yf, x4, y4, fill='red', width=2)
estrut.create_text(x_sd + 5, y_sd + 5, text=abs(F), fill='red')
# Desenho de cada força nodal (chamando função CreateSeta)
for n in range(0, nNos):
CreateSeta('x', n, FX[n])
CreateSeta('y', n, FY[n])
CreateSeta('z', n, MZ[n])
# ----------------------------------------------------------------------------------------------------------------------
# PROPRIEDADES DAS BARRAS
def PropBar(elem, qxi, qyi, Li, EAi, EIi):
ang = numpy.arcsin(sn[elem]) * 180 / numpy.pi
xm = ((X[j[elem]] * 100 + 100) + (X[k[elem]] * 100 + 100)) / 2
ym = ((Y[j[elem]] * 100 + 100) + (Y[k[elem]] * 100 + 100)) / 2
if qxi != 0 and qyi != 0:
estrut.create_text(xm - abs(sn[elem]) * 25, ym + abs(float(cs[elem])) * 25, angle=ang,
text='Barra ' + str(elem + 1) + ' || qx = ' + str(qxi) + ' || qy = ' + str(qyi),
fill='green', font=('Arial', 10, 'bold'))
if qxi != 0 and qyi == 0:
estrut.create_text(xm - abs(sn[elem]) * 25, ym + abs(float(cs[elem])) * 25, angle=ang,
text='Barra ' + str(elem + 1) + ' || qx = ' + str(qxi), fill='green', font=('Arial', 10, 'bold'))
if qxi == 0 and qyi != 0:
estrut.create_text(xm - abs(sn[elem]) * 25, ym + abs(float(cs[elem])) * 25, angle=ang,
text='Barra ' + str(elem + 1) + ' || qy = ' + str(qyi), fill='green', font=('Arial', 10, 'bold'))
if qxi == 0 and qyi == 0:
estrut.create_text(xm - abs(sn[elem]) * 25, ym + abs(cs[elem]) * 25, angle=ang,
text='Barra ' + str(elem + 1), fill='green', font=('Arial', 10, 'bold'))
estrut.create_text(xm + abs(sn[elem]) * 25, ym - abs(float(cs[elem])) * 25,
angle=ang, text='L = ' + str(Li) + ' || EA = ' + str(EAi) + ' || EI = ' + str(EIi), fill='green', font=('Arial', 10, 'bold'))
# Identificando propriedades das barras (chamando a função Propbar)
for i in range(0, nElem):
PropBar(i, float(qx[i]), float(qy[i]), numpy.around(L[i], decimals=2), float(EA[i]), float(EI[i]))
# ----------------------------------------------------------------------------------------------------------------------
# AJUSTANDO IMAGEM À TELA
estrut.scale('all', 0, 0, 1, -1)
fWidth = int(max(X, key=float)) * 100 + 250
fHeight = int(max(Y, key=float)) * 100 + 250
estrut.move('all', 0, fHeight)
sc = min(width / fWidth, height / fHeight)
estrut.scale('all', 0, 0, sc, sc)
estrut.pack(fill='both')
# ----------------------------------------------------------------------------------------------------------------------
# CONSTRUÇÃO DOS BOTÕES E RESPECTIVOS MÉTODOS DE COMANDO
def accept():
wnview.destroy()
def decline():
tkinter.messagebox.showinfo(title='Fechando...', message='Programa interrompido pelo usuário!\n'
'Redefinir o arquivo de entrada de dados!')
quit()
# Definição dos botões
acc = tkinter.Button(wnview, text="Continuar análise!", command=accept, width=30)
acc.pack(side=tkinter.LEFT)
dec = tkinter.Button(wnview, text="Redefinir estrutura!", command=decline, width=30)
dec.pack(side=tkinter.RIGHT)
wnview.eval('tk::PlaceWindow . center')
wnview.mainloop()
| [
"tkinter.PhotoImage",
"tkinter.Canvas",
"tkinter.Button",
"tkinter.messagebox.showinfo",
"numpy.arcsin",
"numpy.around",
"tkinter.Frame",
"tkinter.Tk"
] | [((318, 330), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (328, 330), False, 'import tkinter\n'), ((425, 446), 'tkinter.Frame', 'tkinter.Frame', (['wnview'], {}), '(wnview)\n', (438, 446), False, 'import tkinter\n'), ((558, 646), 'tkinter.Canvas', 'tkinter.Canvas', (['frame'], {'height': 'height', 'width': 'width', 'scrollregion': '(0, 0, lim_x, lim_y)'}), '(frame, height=height, width=width, scrollregion=(0, 0, lim_x,\n lim_y))\n', (572, 646), False, 'import tkinter\n'), ((1053, 1097), 'tkinter.PhotoImage', 'tkinter.PhotoImage', ([], {'file': '"""img/Apoio1g_x.png"""'}), "(file='img/Apoio1g_x.png')\n", (1071, 1097), False, 'import tkinter\n'), ((1110, 1154), 'tkinter.PhotoImage', 'tkinter.PhotoImage', ([], {'file': '"""img/Apoio1g_y.png"""'}), "(file='img/Apoio1g_y.png')\n", (1128, 1154), False, 'import tkinter\n'), ((1165, 1207), 'tkinter.PhotoImage', 'tkinter.PhotoImage', ([], {'file': '"""img/Apoio2g.png"""'}), "(file='img/Apoio2g.png')\n", (1183, 1207), False, 'import tkinter\n'), ((1218, 1260), 'tkinter.PhotoImage', 'tkinter.PhotoImage', ([], {'file': '"""img/Apoio3g.png"""'}), "(file='img/Apoio3g.png')\n", (1236, 1260), False, 'import tkinter\n'), ((6685, 6760), 'tkinter.Button', 'tkinter.Button', (['wnview'], {'text': '"""Continuar análise!"""', 'command': 'accept', 'width': '(30)'}), "(wnview, text='Continuar análise!', command=accept, width=30)\n", (6699, 6760), False, 'import tkinter\n'), ((6795, 6873), 'tkinter.Button', 'tkinter.Button', (['wnview'], {'text': '"""Redefinir estrutura!"""', 'command': 'decline', 'width': '(30)'}), "(wnview, text='Redefinir estrutura!', command=decline, width=30)\n", (6809, 6873), False, 'import tkinter\n'), ((6442, 6592), 'tkinter.messagebox.showinfo', 'tkinter.messagebox.showinfo', ([], {'title': '"""Fechando..."""', 'message': '"""Programa interrompido pelo usuário!\nRedefinir o arquivo de entrada de dados!"""'}), '(title=\'Fechando...\', message=\n """Programa interrompido pelo usuário!\nRedefinir o arquivo de entrada de dados!"""\n )\n', (6469, 6592), False, 'import tkinter\n'), ((5745, 5775), 'numpy.around', 'numpy.around', (['L[i]'], {'decimals': '(2)'}), '(L[i], decimals=2)\n', (5757, 5775), False, 'import numpy\n'), ((4178, 4200), 'numpy.arcsin', 'numpy.arcsin', (['sn[elem]'], {}), '(sn[elem])\n', (4190, 4200), False, 'import numpy\n')] |
import numpy as np
import scipy.cluster.hierarchy as hy
import matplotlib.pyplot as plt
# Generate random features and distance matrix.
def clusters(number=20, cnumber=10, csize=10):
# Note, the way the clusters are positioned is gaussian randomness
rnum = np.random.rand(cnumber, 2)
rn = rnum[:, 0] * number
rn = rn.astype(int)
rn[np.where(rn < 5)] = 5
rn[np.where(rn > number / 2.)] = round(number / 2., 0)
ra = rnum[:, 1] * 2.9
ra[np.where(ra < 1.5)] = 1.5
cls = np.random.randn(number, 3) * csize
# Random multipliers for central point of cluster
rxyz = np.random.randn(cnumber - 1, 3)
for i in xrange(cnumber - 1):
tmp = np.random.randn(rn[i + 1], 3)
x = tmp[:, 0] + (rxyz[i, 0] * csize)
y = tmp[:, 1] + (rxyz[i, 1] * csize)
z = tmp[:, 2] + (rxyz[i, 2] * csize)
tmp = np.column_stack([x, y, z])
cls = np.vstack([cls, tmp])
return cls
# Here we define a function to collect the coordinates of
# each point of the different clusters.
def group(data, index):
number = np.unique(index)
groups = []
for i in number:
groups.append(data[index == i])
return groups
# Creating a cluster of clusters
cls = clusters()
# Calculating the linkage matrix
Y = hy.linkage(cls[:, 0:2], method='complete')
# Here we use the fcluster function to pull out a
# collection of flat clusters from the hierarchical
# data structure. Note that we are using the same
# cutoff value as in the previous example for the dendrogram
# using the 'complete' method.
cutoff = 0.3 * np.max(Y[:, 2])
index = hy.fcluster(Y, cutoff, 'distance')
# Using the group function, we group points into their
# respective clusters.
groups = group(cls, index)
# Plotting clusters
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
colors = ['r', 'c', 'b', 'g', 'orange', 'k', 'y', 'gray']
for i, g in enumerate(groups):
i = np.mod(i, len(colors))
ax.scatter(g[:, 0], g[:, 1], c=colors[i], edgecolor='none', s=50)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
fig.savefig('scipy_352_ex2.pdf', bbox='tight')
| [
"scipy.cluster.hierarchy.fcluster",
"numpy.random.randn",
"scipy.cluster.hierarchy.linkage",
"matplotlib.pyplot.figure",
"numpy.max",
"numpy.where",
"numpy.column_stack",
"numpy.random.rand",
"numpy.vstack",
"numpy.unique"
] | [((1282, 1324), 'scipy.cluster.hierarchy.linkage', 'hy.linkage', (['cls[:, 0:2]'], {'method': '"""complete"""'}), "(cls[:, 0:2], method='complete')\n", (1292, 1324), True, 'import scipy.cluster.hierarchy as hy\n'), ((1609, 1643), 'scipy.cluster.hierarchy.fcluster', 'hy.fcluster', (['Y', 'cutoff', '"""distance"""'], {}), "(Y, cutoff, 'distance')\n", (1620, 1643), True, 'import scipy.cluster.hierarchy as hy\n'), ((1777, 1803), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (1787, 1803), True, 'import matplotlib.pyplot as plt\n'), ((266, 292), 'numpy.random.rand', 'np.random.rand', (['cnumber', '(2)'], {}), '(cnumber, 2)\n', (280, 292), True, 'import numpy as np\n'), ((606, 637), 'numpy.random.randn', 'np.random.randn', (['(cnumber - 1)', '(3)'], {}), '(cnumber - 1, 3)\n', (621, 637), True, 'import numpy as np\n'), ((1079, 1095), 'numpy.unique', 'np.unique', (['index'], {}), '(index)\n', (1088, 1095), True, 'import numpy as np\n'), ((1585, 1600), 'numpy.max', 'np.max', (['Y[:, 2]'], {}), '(Y[:, 2])\n', (1591, 1600), True, 'import numpy as np\n'), ((353, 369), 'numpy.where', 'np.where', (['(rn < 5)'], {}), '(rn < 5)\n', (361, 369), True, 'import numpy as np\n'), ((382, 409), 'numpy.where', 'np.where', (['(rn > number / 2.0)'], {}), '(rn > number / 2.0)\n', (390, 409), True, 'import numpy as np\n'), ((468, 486), 'numpy.where', 'np.where', (['(ra < 1.5)'], {}), '(ra < 1.5)\n', (476, 486), True, 'import numpy as np\n'), ((505, 531), 'numpy.random.randn', 'np.random.randn', (['number', '(3)'], {}), '(number, 3)\n', (520, 531), True, 'import numpy as np\n'), ((686, 715), 'numpy.random.randn', 'np.random.randn', (['rn[i + 1]', '(3)'], {}), '(rn[i + 1], 3)\n', (701, 715), True, 'import numpy as np\n'), ((865, 891), 'numpy.column_stack', 'np.column_stack', (['[x, y, z]'], {}), '([x, y, z])\n', (880, 891), True, 'import numpy as np\n'), ((906, 927), 'numpy.vstack', 'np.vstack', (['[cls, tmp]'], {}), '([cls, tmp])\n', (915, 927), True, 'import numpy as np\n')] |
import os
import json
import yaml
import argparse
import numpy as np
from math import log
import dgl
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
from tqdm import tqdm
from torch import nn
from torch import optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from bisect import bisect
from util.vocabulary import Vocabulary
from util.checkpointing import CheckpointManager, load_checkpoint
from model.model import CMGCNnet
from model.v7w_traindataset import V7WTrainDataset
from model.v7w_testdataset import V7WTestDataset
from model.fvqa_traindataset import FvqaTrainDataset
from model.fvqa_testdataset import FvqaTestDataset
from model.okvqa_traindataset import OkvqaTrainDataset
from model.okvqa_testdataset import OkvqaTestDataset
que_types_dict = {"eight": 0, "nine": 0, "four": 0, "six": 0, "two": 0,
"other": 0, "one": 0, "five": 0, "ten": 0, "seven": 0, "three": 0}
que_types_res_dict = {"eight": 0, "nine": 0, "four": 0, "six": 0, "two": 0,
"other": 0, "one": 0, "five": 0, "ten": 0, "seven": 0, "three": 0}
def train():
# ============================================================================================
# (1) Input Arguments
# ============================================================================================
parser = argparse.ArgumentParser()
parser.add_argument("--cpu-workers", type=int, default=4, help="Number of CPU workers for dataloader.")
# 快照存储的位置
parser.add_argument("--save-dirpath", default="exp_v7w/testcheckpoints", help="Path of directory to create checkpoint directory and save checkpoints.")
# 继续训练之前的模型
parser.add_argument("--load-pthpath", default="", help="To continue training, path to .pth file of saved checkpoint.")
parser.add_argument("--overfit", action="store_true", help="Whether to validate on val split after every epoch.")
parser.add_argument("--validate", action="store_true", help="Whether to validate on val split after every epoch.")
parser.add_argument("--gpu-ids", nargs="+", type=int, default=0, help="List of ids of GPUs to use.")
parser.add_argument("--dataset", default="v7w", help="dataset that model training on")
# set mannual seed
torch.manual_seed(10)
torch.cuda.manual_seed(10)
cudnn.benchmark = True
cudnn.deterministic = True
args = parser.parse_args()
# ============================================================================================
# (2) Input config file
# ============================================================================================
if (args.dataset == 'v7w'):
config_path = '/home/data1/yjgroup/data_zzh/pr_v7w_memory/model/config_v7w.yml'
elif(args.dataset == 'fvqa'):
config_path = '/home/data1/yjgroup/data_zzh/pr_v7w_memory/model/config_fvqa.yml'
elif(args.dataset == 'okvqa'):
config_path = '/home/data1/yjgroup/data_zzh/pr_okvqa_memory/model/config_okvqa.yml'
config = yaml.load(open(config_path))
if isinstance(args.gpu_ids, int):
args.gpu_ids = [args.gpu_ids]
device = torch.device("cuda", args.gpu_ids[0]) if args.gpu_ids[0] >= 0 else torch.device("cpu")
# device = torch.device("cuda:0") if args.gpus != "cpu" else torch.device("cpu")
# Print config and args.
print(yaml.dump(config, default_flow_style=False))
for arg in vars(args):
print("{:<20}: {}".format(arg, getattr(args, arg)))
# ============================================================================================
# Setup Dataset, Dataloader
# ============================================================================================
if (args.dataset == 'v7w'):
print('Loading V7WTrainDataset...')
train_dataset = V7WTrainDataset(config, overfit=args.overfit, in_memory=True)
train_dataloader = DataLoader(train_dataset,
batch_size=config['solver']['batch_size'],
num_workers=args.cpu_workers,
shuffle=True,
collate_fn=collate_fn)
if args.validate:
print('Loading V7WTestDataset...')
val_dataset = V7WTestDataset(config, overfit=args.overfit, in_memory=True)
val_dataloader = DataLoader(val_dataset,
batch_size=config['solver']['batch_size'],
num_workers=args.cpu_workers,
shuffle=True,
collate_fn=collate_fn)
elif (args.dataset == 'fvqa'):
print('Loading FVQATrainDataset...')
train_dataset = FvqaTrainDataset(config, overfit=args.overfit)
train_dataloader = DataLoader(train_dataset,
batch_size=config['solver']['batch_size'],
num_workers=args.cpu_workers,
shuffle=True,
collate_fn=collate_fn)
if args.validate:
print('Loading FVQATestDataset...')
val_dataset = FvqaTestDataset(config, overfit=args.overfit)
val_dataloader = DataLoader(val_dataset,
batch_size=config['solver']['batch_size'],
num_workers=args.cpu_workers,
shuffle=True,
collate_fn=collate_fn)
elif (args.dataset == 'okvqa'):
if args.validate:
print('Loading OKVQATestDataset...')
val_dataset = OkvqaTestDataset(config, overfit=args.overfit, in_memory=True)
val_dataloader = DataLoader(val_dataset,
batch_size=config['solver']['batch_size'],
num_workers=args.cpu_workers,
shuffle=True,
collate_fn=collate_fn)
print('Loading glove...')
glovevocabulary = Vocabulary(config["dataset"]["word_counts_json"], min_count=config["dataset"]["vocab_min_count"])
glove = np.load(config['dataset']['glove_vec_path'])
glove = torch.Tensor(glove)
# ================================================================================================
# Setup Model & mutil GPUs
# ================================================================================================
print('Building Model...')
model = CMGCNnet(config,
que_vocabulary=glovevocabulary,
glove=glove,
device=device)
model = model.to(device)
if -1 not in args.gpu_ids and len(args.gpu_ids) > 1:
model = nn.DataParallel(model, args.gpu_ids)
# ================================================================================================
# Setup Before Traing Loop
# ================================================================================================
# If loading from checkpoint, adjust start epoch and load parameters.
if args.load_pthpath == "":
start_epoch = 0
else:
start_epoch = int(args.load_pthpath.split("_")[-1][:-4])
model_state_dict, optimizer_state_dict = load_checkpoint(args.load_pthpath)
if isinstance(model, nn.DataParallel):
model.module.load_state_dict(model_state_dict)
else:
model.load_state_dict(model_state_dict)
print("Loading resume model from {}...".format(args.load_pthpath))
if args.validate:
model.eval()
answers = []
preds = []
que_types = []
for i, batch in enumerate(tqdm(val_dataloader)):
for que_type in batch['question_type_list']:
que_types_dict[que_type] = que_types_dict[que_type] + 1
with torch.no_grad():
fact_batch_graph = model(batch)
fact_graphs = dgl.unbatch(fact_batch_graph)
for i, fact_graph in enumerate(fact_graphs):
pred = fact_graph.ndata['h'].squeeze()
preds.append(pred)
answers.append(batch['facts_answer_id_list'][i])
que_types = que_types+batch['question_type_list']
# calculate top@1,top@3
acc_1 = cal_acc(answers, preds, que_types=que_types)
print("acc@1={:.2%} ".format(acc_1))
torch.cuda.empty_cache()
cal_type_acc(que_types_dict, que_types_res_dict)
print('finished !!!')
def cal_type_acc(que_types_dict, que_types_res_dict):
for qt in list(que_types_dict.keys()):
acc = que_types_res_dict[qt] / que_types_dict[qt]
print(qt, acc*100)
def cal_batch_loss(fact_batch_graph, batch, device, pos_weight, neg_weight):
answers = batch['facts_answer_list']
fact_graphs = dgl.unbatch(fact_batch_graph)
batch_loss = torch.tensor(0).to(device)
for i, fact_graph in enumerate(fact_graphs):
class_weight = torch.FloatTensor([neg_weight, pos_weight])
pred = fact_graph.ndata['h'].view(1, -1) # (n,1)
answer = answers[i].view(1, -1).to(device)
pred = pred.squeeze()
answer = answer.squeeze()
weight = class_weight[answer.long()].to(device)
loss_fn = torch.nn.BCELoss(weight=weight)
loss = loss_fn(pred, answer)
batch_loss = batch_loss + loss
return batch_loss / len(answers)
def cal_acc(answers, preds, que_types):
all_num = len(preds)
acc_num_1 = 0
for i, answer_id in enumerate(answers):
pred = preds[i] # (num_nodes)
try:
# top@1
_, idx_1 = torch.topk(pred, k=1)
except RuntimeError:
continue
else:
if idx_1.item() == answer_id:
acc_num_1 = acc_num_1 + 1
que_types_res_dict[que_types[i]] = que_types_res_dict[que_types[i]]+1
return acc_num_1 / all_num
def collate_fn(batch):
res = {}
qid_list = []
question_list = []
question_length_list = []
img_features_list = []
img_relations_list = []
fact_num_nodes_list = []
facts_node_features_list = []
facts_e1ids_list = []
facts_e2ids_list = []
facts_answer_list = []
facts_answer_id_list = []
semantic_num_nodes_list = []
semantic_node_features_list = []
semantic_e1ids_list = []
semantic_e2ids_list = []
semantic_edge_features_list = []
semantic_num_nodes_list = []
question_type_list = []
for item in batch:
# question
qid = item['id']
qid_list.append(qid)
question = item['question']
question_list.append(question)
question_length = item['question_length']
question_length_list.append(question_length)
question_type_list.append(item['question_type'])
# image
img_features = item['img_features']
img_features_list.append(img_features)
img_relations = item['img_relations']
img_relations_list.append(img_relations)
# fact
fact_num_nodes = item['facts_num_nodes']
fact_num_nodes_list.append(fact_num_nodes)
facts_node_features = item['facts_node_features']
facts_node_features_list.append(facts_node_features)
facts_e1ids = item['facts_e1ids']
facts_e1ids_list.append(facts_e1ids)
facts_e2ids = item['facts_e2ids']
facts_e2ids_list.append(facts_e2ids)
facts_answer = item['facts_answer']
facts_answer_list.append(facts_answer)
facts_answer_id = item['facts_answer_id']
facts_answer_id_list.append(facts_answer_id)
# semantic
semantic_num_nodes = item['semantic_num_nodes']
semantic_num_nodes_list.append(semantic_num_nodes)
semantic_node_features = item['semantic_node_features']
semantic_node_features_list.append(semantic_node_features)
semantic_e1ids = item['semantic_e1ids']
semantic_e1ids_list.append(semantic_e1ids)
semantic_e2ids = item['semantic_e2ids']
semantic_e2ids_list.append(semantic_e2ids)
semantic_edge_features = item['semantic_edge_features']
semantic_edge_features_list.append(semantic_edge_features)
res['id_list'] = qid_list
res['question_list'] = question_list
res['question_length_list'] = question_length_list
res['features_list'] = img_features_list
res['img_relations_list'] = img_relations_list
res['facts_num_nodes_list'] = fact_num_nodes_list
res['facts_node_features_list'] = facts_node_features_list
res['facts_e1ids_list'] = facts_e1ids_list
res['facts_e2ids_list'] = facts_e2ids_list
res['facts_answer_list'] = facts_answer_list
res['facts_answer_id_list'] = facts_answer_id_list
res['semantic_node_features_list'] = semantic_node_features_list
res['semantic_e1ids_list'] = semantic_e1ids_list
res['semantic_e2ids_list'] = semantic_e2ids_list
res['semantic_edge_features_list'] = semantic_edge_features_list
res['semantic_num_nodes_list'] = semantic_num_nodes_list
res['question_type_list'] = question_type_list
return res
if __name__ == "__main__":
train()
| [
"numpy.load",
"model.fvqa_testdataset.FvqaTestDataset",
"argparse.ArgumentParser",
"yaml.dump",
"util.checkpointing.load_checkpoint",
"dgl.unbatch",
"torch.device",
"torch.no_grad",
"torch.nn.BCELoss",
"torch.utils.data.DataLoader",
"model.model.CMGCNnet",
"torch.FloatTensor",
"util.vocabula... | [((1503, 1528), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1526, 1528), False, 'import argparse\n'), ((2408, 2429), 'torch.manual_seed', 'torch.manual_seed', (['(10)'], {}), '(10)\n', (2425, 2429), False, 'import torch\n'), ((2434, 2460), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(10)'], {}), '(10)\n', (2456, 2460), False, 'import torch\n'), ((6399, 6501), 'util.vocabulary.Vocabulary', 'Vocabulary', (["config['dataset']['word_counts_json']"], {'min_count': "config['dataset']['vocab_min_count']"}), "(config['dataset']['word_counts_json'], min_count=config[\n 'dataset']['vocab_min_count'])\n", (6409, 6501), False, 'from util.vocabulary import Vocabulary\n'), ((6509, 6553), 'numpy.load', 'np.load', (["config['dataset']['glove_vec_path']"], {}), "(config['dataset']['glove_vec_path'])\n", (6516, 6553), True, 'import numpy as np\n'), ((6566, 6585), 'torch.Tensor', 'torch.Tensor', (['glove'], {}), '(glove)\n', (6578, 6585), False, 'import torch\n'), ((6901, 6977), 'model.model.CMGCNnet', 'CMGCNnet', (['config'], {'que_vocabulary': 'glovevocabulary', 'glove': 'glove', 'device': 'device'}), '(config, que_vocabulary=glovevocabulary, glove=glove, device=device)\n', (6909, 6977), False, 'from model.model import CMGCNnet\n'), ((9292, 9321), 'dgl.unbatch', 'dgl.unbatch', (['fact_batch_graph'], {}), '(fact_batch_graph)\n', (9303, 9321), False, 'import dgl\n'), ((3312, 3349), 'torch.device', 'torch.device', (['"""cuda"""', 'args.gpu_ids[0]'], {}), "('cuda', args.gpu_ids[0])\n", (3324, 3349), False, 'import torch\n'), ((3379, 3398), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3391, 3398), False, 'import torch\n'), ((3525, 3568), 'yaml.dump', 'yaml.dump', (['config'], {'default_flow_style': '(False)'}), '(config, default_flow_style=False)\n', (3534, 3568), False, 'import yaml\n'), ((4022, 4083), 'model.v7w_traindataset.V7WTrainDataset', 'V7WTrainDataset', (['config'], {'overfit': 'args.overfit', 'in_memory': '(True)'}), '(config, overfit=args.overfit, in_memory=True)\n', (4037, 4083), False, 'from model.v7w_traindataset import V7WTrainDataset\n'), ((4111, 4250), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': "config['solver']['batch_size']", 'num_workers': 'args.cpu_workers', 'shuffle': '(True)', 'collate_fn': 'collate_fn'}), "(train_dataset, batch_size=config['solver']['batch_size'],\n num_workers=args.cpu_workers, shuffle=True, collate_fn=collate_fn)\n", (4121, 4250), False, 'from torch.utils.data import DataLoader\n'), ((7145, 7181), 'torch.nn.DataParallel', 'nn.DataParallel', (['model', 'args.gpu_ids'], {}), '(model, args.gpu_ids)\n', (7160, 7181), False, 'from torch import nn\n'), ((7715, 7749), 'util.checkpointing.load_checkpoint', 'load_checkpoint', (['args.load_pthpath'], {}), '(args.load_pthpath)\n', (7730, 7749), False, 'from util.checkpointing import CheckpointManager, load_checkpoint\n'), ((8860, 8884), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (8882, 8884), False, 'import torch\n'), ((9439, 9482), 'torch.FloatTensor', 'torch.FloatTensor', (['[neg_weight, pos_weight]'], {}), '([neg_weight, pos_weight])\n', (9456, 9482), False, 'import torch\n'), ((9730, 9761), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {'weight': 'weight'}), '(weight=weight)\n', (9746, 9761), False, 'import torch\n'), ((4499, 4559), 'model.v7w_testdataset.V7WTestDataset', 'V7WTestDataset', (['config'], {'overfit': 'args.overfit', 'in_memory': '(True)'}), '(config, overfit=args.overfit, in_memory=True)\n', (4513, 4559), False, 'from model.v7w_testdataset import V7WTestDataset\n'), ((4589, 4726), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': "config['solver']['batch_size']", 'num_workers': 'args.cpu_workers', 'shuffle': '(True)', 'collate_fn': 'collate_fn'}), "(val_dataset, batch_size=config['solver']['batch_size'],\n num_workers=args.cpu_workers, shuffle=True, collate_fn=collate_fn)\n", (4599, 4726), False, 'from torch.utils.data import DataLoader\n'), ((4988, 5034), 'model.fvqa_traindataset.FvqaTrainDataset', 'FvqaTrainDataset', (['config'], {'overfit': 'args.overfit'}), '(config, overfit=args.overfit)\n', (5004, 5034), False, 'from model.fvqa_traindataset import FvqaTrainDataset\n'), ((5062, 5201), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': "config['solver']['batch_size']", 'num_workers': 'args.cpu_workers', 'shuffle': '(True)', 'collate_fn': 'collate_fn'}), "(train_dataset, batch_size=config['solver']['batch_size'],\n num_workers=args.cpu_workers, shuffle=True, collate_fn=collate_fn)\n", (5072, 5201), False, 'from torch.utils.data import DataLoader\n'), ((8144, 8164), 'tqdm.tqdm', 'tqdm', (['val_dataloader'], {}), '(val_dataloader)\n', (8148, 8164), False, 'from tqdm import tqdm\n'), ((8406, 8435), 'dgl.unbatch', 'dgl.unbatch', (['fact_batch_graph'], {}), '(fact_batch_graph)\n', (8417, 8435), False, 'import dgl\n'), ((9339, 9354), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (9351, 9354), False, 'import torch\n'), ((10107, 10128), 'torch.topk', 'torch.topk', (['pred'], {'k': '(1)'}), '(pred, k=1)\n', (10117, 10128), False, 'import torch\n'), ((5451, 5496), 'model.fvqa_testdataset.FvqaTestDataset', 'FvqaTestDataset', (['config'], {'overfit': 'args.overfit'}), '(config, overfit=args.overfit)\n', (5466, 5496), False, 'from model.fvqa_testdataset import FvqaTestDataset\n'), ((5526, 5663), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': "config['solver']['batch_size']", 'num_workers': 'args.cpu_workers', 'shuffle': '(True)', 'collate_fn': 'collate_fn'}), "(val_dataset, batch_size=config['solver']['batch_size'],\n num_workers=args.cpu_workers, shuffle=True, collate_fn=collate_fn)\n", (5536, 5663), False, 'from torch.utils.data import DataLoader\n'), ((8314, 8329), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8327, 8329), False, 'import torch\n'), ((5959, 6021), 'model.okvqa_testdataset.OkvqaTestDataset', 'OkvqaTestDataset', (['config'], {'overfit': 'args.overfit', 'in_memory': '(True)'}), '(config, overfit=args.overfit, in_memory=True)\n', (5975, 6021), False, 'from model.okvqa_testdataset import OkvqaTestDataset\n'), ((6051, 6188), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': "config['solver']['batch_size']", 'num_workers': 'args.cpu_workers', 'shuffle': '(True)', 'collate_fn': 'collate_fn'}), "(val_dataset, batch_size=config['solver']['batch_size'],\n num_workers=args.cpu_workers, shuffle=True, collate_fn=collate_fn)\n", (6061, 6188), False, 'from torch.utils.data import DataLoader\n')] |
import numpy as np
from scipy.spatial.distance import pdist, squareform
"""
Compute the KSD divergence using samples, adapted from the theano code
"""
def KSD(z, Sqx):
# compute the rbf kernel
K, dimZ = z.shape
sq_dist = pdist(z)
pdist_square = squareform(sq_dist)**2
# use median
median = np.median(pdist_square)
h_square = 0.5 * median / np.log(K+1.0)
Kxy = np.exp(- pdist_square / h_square / 2.0)
# now compute KSD
Sqxdy = np.dot(Sqx, z.T) - np.tile(np.sum(Sqx * z, 1, keepdims=True), (1, K))
Sqxdy = -Sqxdy / h_square
dxSqy = Sqxdy.T
dxdy = -pdist_square / (h_square ** 2) + dimZ / h_square
# M is a (K, K) tensor
M = (np.dot(Sqx, Sqx.T) + Sqxdy + dxSqy + dxdy) * Kxy
# the following for U-statistic
M2 = M - np.diag(np.diag(M))
return np.sum(M2) / (K * (K - 1))
| [
"numpy.sum",
"numpy.log",
"numpy.median",
"scipy.spatial.distance.squareform",
"scipy.spatial.distance.pdist",
"numpy.exp",
"numpy.dot",
"numpy.diag"
] | [((247, 255), 'scipy.spatial.distance.pdist', 'pdist', (['z'], {}), '(z)\n', (252, 255), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((331, 354), 'numpy.median', 'np.median', (['pdist_square'], {}), '(pdist_square)\n', (340, 354), True, 'import numpy as np\n'), ((411, 449), 'numpy.exp', 'np.exp', (['(-pdist_square / h_square / 2.0)'], {}), '(-pdist_square / h_square / 2.0)\n', (417, 449), True, 'import numpy as np\n'), ((276, 295), 'scipy.spatial.distance.squareform', 'squareform', (['sq_dist'], {}), '(sq_dist)\n', (286, 295), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((386, 401), 'numpy.log', 'np.log', (['(K + 1.0)'], {}), '(K + 1.0)\n', (392, 401), True, 'import numpy as np\n'), ((489, 505), 'numpy.dot', 'np.dot', (['Sqx', 'z.T'], {}), '(Sqx, z.T)\n', (495, 505), True, 'import numpy as np\n'), ((847, 857), 'numpy.sum', 'np.sum', (['M2'], {}), '(M2)\n', (853, 857), True, 'import numpy as np\n'), ((516, 549), 'numpy.sum', 'np.sum', (['(Sqx * z)', '(1)'], {'keepdims': '(True)'}), '(Sqx * z, 1, keepdims=True)\n', (522, 549), True, 'import numpy as np\n'), ((823, 833), 'numpy.diag', 'np.diag', (['M'], {}), '(M)\n', (830, 833), True, 'import numpy as np\n'), ((713, 731), 'numpy.dot', 'np.dot', (['Sqx', 'Sqx.T'], {}), '(Sqx, Sqx.T)\n', (719, 731), True, 'import numpy as np\n')] |
'''
Simpler example showing how to use train and use the convincingness model for prediction.
This script trains a model on the UKPConvArgStrict dataset. So, before running this script, you need to run
"python/analysis/habernal_comparison/run_preprocessing.py" to extract the linguistic features from this dataset.
'''
import sys
# include the paths for the other directories
sys.path.append("./python")
sys.path.append("./python/analysis")
sys.path.append("./python/models")
sys.path.append("./python/analysis/habernal_comparison")
from data_loader import load_single_file_separate_args, load_ling_features
from embeddings import load_embeddings, get_mean_embeddings
from gp_classifier_vb import compute_median_lengthscales
from gp_pref_learning import GPPrefLearning
from run_preprocessing import preprocessing_pipeline
from tests import get_docidxs_from_ids, get_doc_token_seqs
import numpy as np
import logging
import os
from os import listdir
import vocabulary_embeddings_extractor
import pickle
import pandas as pd
# set the path for the java source code here
pkl_file = './model.pkl' # location to save the trained model to
test_data_path = './data/new_test_data' # location of your test data file. MUST HAVE A .CSV SUFFIX
embeddings_dir = './data/'
training_data_path = os.path.abspath("./data/")
training_dataset = 'UKPConvArgStrict'
def load_train_dataset(dataset, embeddings):
_, docids = load_ling_features(dataset, training_data_path)
print('Number of documents: %i' % len(docids))
data_root_dir = os.path.expanduser(training_data_path)
csvdirname = os.path.join(data_root_dir, 'argument_data/%s-new-CSV/' % dataset)
print(('Loading train/test data from %s...' % csvdirname))
person_train = []
a1_train = []
a2_train = []
ids_train = []
prefs_train = []
for file_name in listdir(csvdirname):
if file_name.split('.')[-1] != 'csv':
print("Skipping files without .csv suffix: %s" % csvdirname + '/' + file_name)
continue
_, _, labels, ids, turker_ids, a1, a2 = load_single_file_separate_args(csvdirname, file_name,
word_to_indices_map, None)
a1_train.extend(a1)
a2_train.extend(a2)
person_train.extend(turker_ids)
prefs_train.extend(labels)
ids_train.extend(ids)
train_ids = np.array([ids_pair.split('_') for ids_pair in ids_train])
print('No. documents in training set: %i' % len(np.unique([train_ids[:, 0], train_ids[:, 1]])) )
a1_train = get_docidxs_from_ids(docids, train_ids[:, 0])
a2_train = get_docidxs_from_ids(docids, train_ids[:, 1])
uids, uidxs = np.unique((a1_train, a2_train), return_index=True)
item_ids = uids[:, None] # the features are just the IDs
return item_ids, a1_train, a2_train, prefs_train
def train_test_model(embeddings):
# Train a model...
# item_ids -- an Nx1 vector containing the IDs of all documents;
# a1_train -- the ids of the first items in the pairs
# a2_train -- the ids of the second items in the pairs
# prefs_train -- the labels for the pairs, 1 indicates the first item is preferred, -1 indicates the second is preferred
item_ids, a1_train, a2_train, prefs_train = load_train_dataset(training_dataset, embeddings) # reload only if we use a new dataset
model = GPPrefLearning(ninput_features=1, verbose=False, shape_s0=2.0, rate_s0=200.0, use_svi=True,
ninducing=500, max_update_size=200, kernel_func='diagonal', kernel_combination='*', delay=1)
model.max_iter_VB = 1000
model.fit(a1_train, a2_train, item_ids, np.array(prefs_train, dtype=float) - 1, optimize=False,
input_type='zero-centered', use_median_ls=True)
logging.info("**** Completed training GPPL ****")
# Save the model in case we need to reload it
with open(pkl_file, 'wb') as fh:
pickle.dump(model, fh)
print('Predicting ...')
predicted_f, _ = model.predict_f()
print('Results: id, score ')
for i in range(len(predicted_f)):
print('%s, %s' % (i, predicted_f[i]))
if __name__ == '__main__':
print('This script trains a model on the UKPConvArgStrict dataset.')
word_to_indices_map, word_index_to_embeddings_map, index_to_word_map = vocabulary_embeddings_extractor.load_all(
embeddings_dir + 'vocabulary.embeddings.all.pkl.bz2')
embeddings = load_embeddings(word_index_to_embeddings_map)
train_test_model(embeddings) | [
"sys.path.append",
"embeddings.load_embeddings",
"os.path.abspath",
"vocabulary_embeddings_extractor.load_all",
"gp_pref_learning.GPPrefLearning",
"os.path.join",
"pickle.dump",
"tests.get_docidxs_from_ids",
"logging.info",
"numpy.array",
"data_loader.load_single_file_separate_args",
"os.path.... | [((379, 406), 'sys.path.append', 'sys.path.append', (['"""./python"""'], {}), "('./python')\n", (394, 406), False, 'import sys\n'), ((407, 443), 'sys.path.append', 'sys.path.append', (['"""./python/analysis"""'], {}), "('./python/analysis')\n", (422, 443), False, 'import sys\n'), ((444, 478), 'sys.path.append', 'sys.path.append', (['"""./python/models"""'], {}), "('./python/models')\n", (459, 478), False, 'import sys\n'), ((479, 535), 'sys.path.append', 'sys.path.append', (['"""./python/analysis/habernal_comparison"""'], {}), "('./python/analysis/habernal_comparison')\n", (494, 535), False, 'import sys\n'), ((1286, 1312), 'os.path.abspath', 'os.path.abspath', (['"""./data/"""'], {}), "('./data/')\n", (1301, 1312), False, 'import os\n'), ((1413, 1460), 'data_loader.load_ling_features', 'load_ling_features', (['dataset', 'training_data_path'], {}), '(dataset, training_data_path)\n', (1431, 1460), False, 'from data_loader import load_single_file_separate_args, load_ling_features\n'), ((1533, 1571), 'os.path.expanduser', 'os.path.expanduser', (['training_data_path'], {}), '(training_data_path)\n', (1551, 1571), False, 'import os\n'), ((1589, 1655), 'os.path.join', 'os.path.join', (['data_root_dir', "('argument_data/%s-new-CSV/' % dataset)"], {}), "(data_root_dir, 'argument_data/%s-new-CSV/' % dataset)\n", (1601, 1655), False, 'import os\n'), ((1841, 1860), 'os.listdir', 'listdir', (['csvdirname'], {}), '(csvdirname)\n', (1848, 1860), False, 'from os import listdir\n'), ((2588, 2633), 'tests.get_docidxs_from_ids', 'get_docidxs_from_ids', (['docids', 'train_ids[:, 0]'], {}), '(docids, train_ids[:, 0])\n', (2608, 2633), False, 'from tests import get_docidxs_from_ids, get_doc_token_seqs\n'), ((2649, 2694), 'tests.get_docidxs_from_ids', 'get_docidxs_from_ids', (['docids', 'train_ids[:, 1]'], {}), '(docids, train_ids[:, 1])\n', (2669, 2694), False, 'from tests import get_docidxs_from_ids, get_doc_token_seqs\n'), ((2714, 2764), 'numpy.unique', 'np.unique', (['(a1_train, a2_train)'], {'return_index': '(True)'}), '((a1_train, a2_train), return_index=True)\n', (2723, 2764), True, 'import numpy as np\n'), ((3399, 3593), 'gp_pref_learning.GPPrefLearning', 'GPPrefLearning', ([], {'ninput_features': '(1)', 'verbose': '(False)', 'shape_s0': '(2.0)', 'rate_s0': '(200.0)', 'use_svi': '(True)', 'ninducing': '(500)', 'max_update_size': '(200)', 'kernel_func': '"""diagonal"""', 'kernel_combination': '"""*"""', 'delay': '(1)'}), "(ninput_features=1, verbose=False, shape_s0=2.0, rate_s0=\n 200.0, use_svi=True, ninducing=500, max_update_size=200, kernel_func=\n 'diagonal', kernel_combination='*', delay=1)\n", (3413, 3593), False, 'from gp_pref_learning import GPPrefLearning\n'), ((3808, 3857), 'logging.info', 'logging.info', (['"""**** Completed training GPPL ****"""'], {}), "('**** Completed training GPPL ****')\n", (3820, 3857), False, 'import logging\n'), ((4341, 4439), 'vocabulary_embeddings_extractor.load_all', 'vocabulary_embeddings_extractor.load_all', (["(embeddings_dir + 'vocabulary.embeddings.all.pkl.bz2')"], {}), "(embeddings_dir +\n 'vocabulary.embeddings.all.pkl.bz2')\n", (4381, 4439), False, 'import vocabulary_embeddings_extractor\n'), ((4462, 4507), 'embeddings.load_embeddings', 'load_embeddings', (['word_index_to_embeddings_map'], {}), '(word_index_to_embeddings_map)\n', (4477, 4507), False, 'from embeddings import load_embeddings, get_mean_embeddings\n'), ((2069, 2154), 'data_loader.load_single_file_separate_args', 'load_single_file_separate_args', (['csvdirname', 'file_name', 'word_to_indices_map', 'None'], {}), '(csvdirname, file_name, word_to_indices_map, None\n )\n', (2099, 2154), False, 'from data_loader import load_single_file_separate_args, load_ling_features\n'), ((3954, 3976), 'pickle.dump', 'pickle.dump', (['model', 'fh'], {}), '(model, fh)\n', (3965, 3976), False, 'import pickle\n'), ((3685, 3719), 'numpy.array', 'np.array', (['prefs_train'], {'dtype': 'float'}), '(prefs_train, dtype=float)\n', (3693, 3719), True, 'import numpy as np\n'), ((2523, 2568), 'numpy.unique', 'np.unique', (['[train_ids[:, 0], train_ids[:, 1]]'], {}), '([train_ids[:, 0], train_ids[:, 1]])\n', (2532, 2568), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from pytest import approx
from desdeov2.solver.ASF import SimpleASF
from desdeov2.solver.NumericalMethods import ScipyDE
from desdeov2.solver.ScalarSolver import (
ASFScalarSolver,
EpsilonConstraintScalarSolver,
ScalarSolverError,
WeightingMethodScalarSolver,
)
@pytest.fixture
def Scipyde_method():
method = ScipyDE(
{"tol": 0.000001, "popsize": 10, "maxiter": 50000, "polish": True}
)
return method
@pytest.fixture
def WeightedCylinderSolver(CylinderProblem, Scipyde_method):
return WeightingMethodScalarSolver(CylinderProblem, Scipyde_method)
@pytest.fixture
def SimpleASFCylinderSolver(CylinderProblem, Scipyde_method):
return ASFScalarSolver(CylinderProblem, Scipyde_method)
@pytest.fixture
def EpsilonConstraintCylinderSolver(CylinderProblem, Scipyde_method):
return EpsilonConstraintScalarSolver(CylinderProblem, Scipyde_method)
@pytest.fixture
def cylinder_good_decision_vectors():
"""Inside bounds and do not break any constraints."""
return np.array([[7.5, 18], [5.1, 13.4], [11.5, 24.12]])
@pytest.fixture
def cylinder_bad_decision_vectors():
"""All constraints broken"""
return np.array([[18, 7.5], [13.4, 5.1], [24.12, 11.5]])
def test_weighting_evaluator_zero_weights(
WeightedCylinderSolver, cylinder_good_decision_vectors
):
weights_zeros = np.zeros(3)
WeightedCylinderSolver.weights = weights_zeros
sums = WeightedCylinderSolver._evaluator(cylinder_good_decision_vectors)
assert np.all(sums == approx(0.0))
def test_weighting_evaluator_ones_weights(
WeightedCylinderSolver, cylinder_good_decision_vectors
):
weights_ones = np.ones(3)
WeightedCylinderSolver.weights = weights_ones
res = WeightedCylinderSolver._evaluator(cylinder_good_decision_vectors)
expected = [1982.2033717615695, 503.733320193871, 7456.610960526498]
assert np.all(np.isclose(res, expected))
def test_weighting_evaluator_even_weights(
WeightedCylinderSolver, cylinder_good_decision_vectors
):
weights_even = np.array([0.33, 0.33, 0.33])
WeightedCylinderSolver.weights = weights_even
res = WeightedCylinderSolver._evaluator(cylinder_good_decision_vectors)
expected = list(
map(
lambda x: x * 0.33,
[1982.2033717615695, 503.733320193871, 7456.610960526498],
)
)
assert np.all(np.isclose(res, expected))
def test_weighting_evaluator_single_decision_vector(
WeightedCylinderSolver, cylinder_good_decision_vectors
):
weights = np.ones(3)
WeightedCylinderSolver.weights = weights
res = WeightedCylinderSolver._evaluator(cylinder_good_decision_vectors[0])
assert res == approx(1982.2033717615695)
def test_weighting_evaluator_broken_constraints(
WeightedCylinderSolver, cylinder_bad_decision_vectors
):
weights = np.ones(3)
WeightedCylinderSolver.weights = weights
res = WeightedCylinderSolver._evaluator(cylinder_bad_decision_vectors)
expected = [np.inf, np.inf, np.inf]
assert np.all(np.isclose(res, expected))
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_weighting_solve_ones_weights(WeightedCylinderSolver):
# Suppress RuntimeWarnings, most commonly produced by infinities
weights = np.ones(3)
solver = WeightedCylinderSolver
(variables, (objectives, constraints)) = solver.solve(weights)
expected_variables = np.array([5.0, 10.0])
# Note the quire high absolute tolerance and no relative tolerance.
# The results vary quire a bit, so a high tolerance was chosen.
assert np.all(
np.isclose(variables, expected_variables, rtol=0.0, atol=1.0e-1)
)
assert np.all(np.greater_equal(constraints, 0.0))
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_weighting_solve_single_weight(WeightedCylinderSolver):
# Suppress RuntimeWarnings, most commonly produced by infinities
# Prefer the third objective
weights = np.array([0.0, 0.0, 1.0])
solver = WeightedCylinderSolver
(variables, (objectives, constraints)) = solver.solve(weights)
# Height should be 15, radius can be whatever since the 3rd objective does
# not care for it
assert variables[1] == approx(15.0)
# Constraints should still hold!
assert np.all(np.greater_equal(constraints, 0.0))
def test_epsilon_bad_epsilons(EpsilonConstraintCylinderSolver):
solver = EpsilonConstraintCylinderSolver
with pytest.raises(ScalarSolverError):
solver.epsilons = np.array([0.5, 1.0])
with pytest.raises(ScalarSolverError):
solver.epsilons = np.array([0.5, 1.0, 5.0, 2.0])
def test_epsilon_evaluator(
EpsilonConstraintCylinderSolver,
cylinder_good_decision_vectors,
cylinder_bad_decision_vectors,
):
solver = EpsilonConstraintCylinderSolver
solver.epsilons = np.array([np.inf, np.inf, np.inf])
solver.to_be_minimized = 0
assert np.all(solver._evaluator(cylinder_good_decision_vectors) > -np.inf)
assert np.all(solver._evaluator(cylinder_bad_decision_vectors) == np.inf)
solver.epsilons = np.array([np.inf, np.inf, 5])
assert solver._evaluator(cylinder_good_decision_vectors)[2] == np.inf
solver.to_be_minimized = 2
assert np.all(
np.isclose(
solver._evaluator(cylinder_good_decision_vectors),
np.array([3.0, 1.6, 9.12]),
)
)
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_epsilon_inf_eps_solve(EpsilonConstraintCylinderSolver):
"""By having infinte epsilons, the objective to be solved, should reach
its' minimum possible value.
"""
solver = EpsilonConstraintCylinderSolver
solver.epsilons = np.array([np.inf, np.inf, np.inf])
# minimize volume
min_volume = np.pi * 5 ** 2 * 10
(variables, (objectives, constraints)) = solver.solve(0)
assert objectives[0][0] == approx(min_volume, abs=1e-2)
assert np.all(np.isclose(variables, np.array([5, 10])))
assert np.all(constraints >= 0)
# minimize surface area (actually maximize)
min_area = -(2 * np.pi * 12.5 ** 2 + 2 * np.pi * 12.5 * 25)
(variables, (objectives, constraints)) = solver.solve(1)
assert objectives[0][1] == approx(min_area, abs=1e-2)
assert np.all(np.isclose(variables, np.array([12.5, 25])))
assert np.all(constraints >= 0)
# minimiz height difference
min_deltah = 0
(variables, (objectives, constraints)) = solver.solve(2)
assert objectives[0][2] == approx(min_deltah, abs=1e-2)
assert variables[1] == approx(15.0)
assert np.all(constraints >= 0)
def test_asf_evaluator(
SimpleASFCylinderSolver,
cylinder_good_decision_vectors,
cylinder_bad_decision_vectors,
):
solver = SimpleASFCylinderSolver
weights = np.array([1.0, 1.0, 1.0])
solver.asf = SimpleASF(weights)
solver.reference_point = np.array([500, 500, 500])
res_good = solver._evaluator(cylinder_good_decision_vectors)
res_bad = solver._evaluator(cylinder_bad_decision_vectors)
assert np.all(res_good != np.inf)
assert np.all(res_bad == np.inf)
def test_asf_evaluator_zero_weights(
SimpleASFCylinderSolver,
cylinder_good_decision_vectors,
cylinder_bad_decision_vectors,
):
solver = SimpleASFCylinderSolver
weights = np.zeros(3)
solver.asf = SimpleASF(weights)
solver.reference_point = np.array([500, 500, 500])
res_good = solver._evaluator(cylinder_good_decision_vectors)
res_bad = solver._evaluator(cylinder_bad_decision_vectors)
assert np.all(res_good == approx(0))
assert np.all(res_bad == np.inf)
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_asf_solve_ones_weights(SimpleASFCylinderSolver):
weights = np.ones(3)
solver = SimpleASFCylinderSolver
solver.asf = SimpleASF(weights)
reference_point = np.array([500, 500, 500])
(variables, (objectives, constraints)) = solver.solve(reference_point)
expected_variables = np.array([5.0, 10.0])
assert np.all(
np.isclose(variables, expected_variables, rtol=0.0, atol=1.0e-1)
)
assert np.all(np.greater_equal(constraints, 0))
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_asf_solve_reference_height(SimpleASFCylinderSolver):
"""Ignore all other objectives but the hieght difference. Results should
therefore be the optimal height according to the 3rd objective which is
15, regardless of the reference value for the objective.
"""
weights = np.array([1, 1, 1])
solver = SimpleASFCylinderSolver
solver.asf = SimpleASF(weights)
reference_point = np.array([np.nan, np.nan, 6])
(variables, (objectives, constraints)) = solver.solve(reference_point)
assert objectives[0][2] == approx(0, abs=1e-3)
# Solution should always be feasible
assert np.all(np.greater_equal(constraints, 0))
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_asf_solve_reference_extreme_area_and_volume(SimpleASFCylinderSolver):
"""Test a reference point residing very close to the pareto front in the
2nd objective space. The 1st objective is set to uniquely to specify the
variables to be r=12.5 and h=25. The third objective is ignored.
"""
weights = np.array([1, 1, 1])
solver = SimpleASFCylinderSolver
solver.asf = SimpleASF(weights)
reference_point = np.array([12271.8, -2945.25, np.nan])
(variables, (objectives, constraints)) = solver.solve(reference_point)
assert objectives[0][0] == approx(reference_point[0], abs=5e-2)
assert objectives[0][1] == approx(reference_point[1], abs=5e-2)
assert variables[0] == approx(12.5, abs=1e-3)
assert variables[1] == approx(25.0, abs=1e-3)
assert np.all(np.greater_equal(constraints, 0))
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_asf_solve_reference_pareto(SimpleASFCylinderSolver):
"""Test a reference point residing very close to the pareto front.
The resulting solution should be close to this point.
"""
weights = np.array([1, 1, 1])
solver = SimpleASFCylinderSolver
solver.asf = SimpleASF(weights)
reference_point = np.array([785.398, -471.239, 5.0])
(variables, (objectives, constraints)) = solver.solve(reference_point)
assert objectives[0][0] == approx(reference_point[0], abs=5e-2)
assert objectives[0][1] == approx(reference_point[1], abs=5e-2)
assert variables[0] == approx(5.0, abs=1e-3)
assert variables[1] == approx(10.0, abs=1e-3)
assert np.all(np.greater_equal(constraints, 0))
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_change_asf_parameters(SimpleASFCylinderSolver):
weights = np.array([1, 1, 1])
solver = SimpleASFCylinderSolver
asf = SimpleASF(weights)
solver.asf = asf
before = solver.asf.weights
asf.weights = np.array([2, 2, 2])
after = solver.asf.weights
assert np.all(np.isclose(after, np.array([2, 2, 2])))
assert np.all(np.not_equal(before, after))
| [
"desdeov2.solver.NumericalMethods.ScipyDE",
"numpy.zeros",
"numpy.ones",
"numpy.not_equal",
"desdeov2.solver.ScalarSolver.WeightingMethodScalarSolver",
"numpy.isclose",
"desdeov2.solver.ScalarSolver.ASFScalarSolver",
"numpy.array",
"desdeov2.solver.ASF.SimpleASF",
"pytest.raises",
"pytest.mark.f... | [((3077, 3129), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::RuntimeWarning"""'], {}), "('ignore::RuntimeWarning')\n", (3103, 3129), False, 'import pytest\n'), ((3734, 3786), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::RuntimeWarning"""'], {}), "('ignore::RuntimeWarning')\n", (3760, 3786), False, 'import pytest\n'), ((5385, 5437), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::RuntimeWarning"""'], {}), "('ignore::RuntimeWarning')\n", (5411, 5437), False, 'import pytest\n'), ((7590, 7642), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::RuntimeWarning"""'], {}), "('ignore::RuntimeWarning')\n", (7616, 7642), False, 'import pytest\n'), ((8125, 8177), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::RuntimeWarning"""'], {}), "('ignore::RuntimeWarning')\n", (8151, 8177), False, 'import pytest\n'), ((8846, 8898), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::RuntimeWarning"""'], {}), "('ignore::RuntimeWarning')\n", (8872, 8898), False, 'import pytest\n'), ((9745, 9797), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::RuntimeWarning"""'], {}), "('ignore::RuntimeWarning')\n", (9771, 9797), False, 'import pytest\n'), ((10529, 10581), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::RuntimeWarning"""'], {}), "('ignore::RuntimeWarning')\n", (10555, 10581), False, 'import pytest\n'), ((365, 437), 'desdeov2.solver.NumericalMethods.ScipyDE', 'ScipyDE', (["{'tol': 1e-06, 'popsize': 10, 'maxiter': 50000, 'polish': True}"], {}), "({'tol': 1e-06, 'popsize': 10, 'maxiter': 50000, 'polish': True})\n", (372, 437), False, 'from desdeov2.solver.NumericalMethods import ScipyDE\n'), ((563, 623), 'desdeov2.solver.ScalarSolver.WeightingMethodScalarSolver', 'WeightingMethodScalarSolver', (['CylinderProblem', 'Scipyde_method'], {}), '(CylinderProblem, Scipyde_method)\n', (590, 623), False, 'from desdeov2.solver.ScalarSolver import ASFScalarSolver, EpsilonConstraintScalarSolver, ScalarSolverError, WeightingMethodScalarSolver\n'), ((715, 763), 'desdeov2.solver.ScalarSolver.ASFScalarSolver', 'ASFScalarSolver', (['CylinderProblem', 'Scipyde_method'], {}), '(CylinderProblem, Scipyde_method)\n', (730, 763), False, 'from desdeov2.solver.ScalarSolver import ASFScalarSolver, EpsilonConstraintScalarSolver, ScalarSolverError, WeightingMethodScalarSolver\n'), ((863, 925), 'desdeov2.solver.ScalarSolver.EpsilonConstraintScalarSolver', 'EpsilonConstraintScalarSolver', (['CylinderProblem', 'Scipyde_method'], {}), '(CylinderProblem, Scipyde_method)\n', (892, 925), False, 'from desdeov2.solver.ScalarSolver import ASFScalarSolver, EpsilonConstraintScalarSolver, ScalarSolverError, WeightingMethodScalarSolver\n'), ((1051, 1100), 'numpy.array', 'np.array', (['[[7.5, 18], [5.1, 13.4], [11.5, 24.12]]'], {}), '([[7.5, 18], [5.1, 13.4], [11.5, 24.12]])\n', (1059, 1100), True, 'import numpy as np\n'), ((1200, 1249), 'numpy.array', 'np.array', (['[[18, 7.5], [13.4, 5.1], [24.12, 11.5]]'], {}), '([[18, 7.5], [13.4, 5.1], [24.12, 11.5]])\n', (1208, 1249), True, 'import numpy as np\n'), ((1377, 1388), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1385, 1388), True, 'import numpy as np\n'), ((1682, 1692), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1689, 1692), True, 'import numpy as np\n'), ((2064, 2092), 'numpy.array', 'np.array', (['[0.33, 0.33, 0.33]'], {}), '([0.33, 0.33, 0.33])\n', (2072, 2092), True, 'import numpy as np\n'), ((2550, 2560), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (2557, 2560), True, 'import numpy as np\n'), ((2857, 2867), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (2864, 2867), True, 'import numpy as np\n'), ((3276, 3286), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (3283, 3286), True, 'import numpy as np\n'), ((3416, 3437), 'numpy.array', 'np.array', (['[5.0, 10.0]'], {}), '([5.0, 10.0])\n', (3424, 3437), True, 'import numpy as np\n'), ((3967, 3992), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (3975, 3992), True, 'import numpy as np\n'), ((4841, 4875), 'numpy.array', 'np.array', (['[np.inf, np.inf, np.inf]'], {}), '([np.inf, np.inf, np.inf])\n', (4849, 4875), True, 'import numpy as np\n'), ((5088, 5117), 'numpy.array', 'np.array', (['[np.inf, np.inf, 5]'], {}), '([np.inf, np.inf, 5])\n', (5096, 5117), True, 'import numpy as np\n'), ((5688, 5722), 'numpy.array', 'np.array', (['[np.inf, np.inf, np.inf]'], {}), '([np.inf, np.inf, np.inf])\n', (5696, 5722), True, 'import numpy as np\n'), ((5975, 5999), 'numpy.all', 'np.all', (['(constraints >= 0)'], {}), '(constraints >= 0)\n', (5981, 5999), True, 'import numpy as np\n'), ((6306, 6330), 'numpy.all', 'np.all', (['(constraints >= 0)'], {}), '(constraints >= 0)\n', (6312, 6330), True, 'import numpy as np\n'), ((6555, 6579), 'numpy.all', 'np.all', (['(constraints >= 0)'], {}), '(constraints >= 0)\n', (6561, 6579), True, 'import numpy as np\n'), ((6760, 6785), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (6768, 6785), True, 'import numpy as np\n'), ((6803, 6821), 'desdeov2.solver.ASF.SimpleASF', 'SimpleASF', (['weights'], {}), '(weights)\n', (6812, 6821), False, 'from desdeov2.solver.ASF import SimpleASF\n'), ((6851, 6876), 'numpy.array', 'np.array', (['[500, 500, 500]'], {}), '([500, 500, 500])\n', (6859, 6876), True, 'import numpy as np\n'), ((7019, 7045), 'numpy.all', 'np.all', (['(res_good != np.inf)'], {}), '(res_good != np.inf)\n', (7025, 7045), True, 'import numpy as np\n'), ((7057, 7082), 'numpy.all', 'np.all', (['(res_bad == np.inf)'], {}), '(res_bad == np.inf)\n', (7063, 7082), True, 'import numpy as np\n'), ((7276, 7287), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (7284, 7287), True, 'import numpy as np\n'), ((7305, 7323), 'desdeov2.solver.ASF.SimpleASF', 'SimpleASF', (['weights'], {}), '(weights)\n', (7314, 7323), False, 'from desdeov2.solver.ASF import SimpleASF\n'), ((7353, 7378), 'numpy.array', 'np.array', (['[500, 500, 500]'], {}), '([500, 500, 500])\n', (7361, 7378), True, 'import numpy as np\n'), ((7561, 7586), 'numpy.all', 'np.all', (['(res_bad == np.inf)'], {}), '(res_bad == np.inf)\n', (7567, 7586), True, 'import numpy as np\n'), ((7715, 7725), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (7722, 7725), True, 'import numpy as np\n'), ((7780, 7798), 'desdeov2.solver.ASF.SimpleASF', 'SimpleASF', (['weights'], {}), '(weights)\n', (7789, 7798), False, 'from desdeov2.solver.ASF import SimpleASF\n'), ((7821, 7846), 'numpy.array', 'np.array', (['[500, 500, 500]'], {}), '([500, 500, 500])\n', (7829, 7846), True, 'import numpy as np\n'), ((7949, 7970), 'numpy.array', 'np.array', (['[5.0, 10.0]'], {}), '([5.0, 10.0])\n', (7957, 7970), True, 'import numpy as np\n'), ((8477, 8496), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (8485, 8496), True, 'import numpy as np\n'), ((8551, 8569), 'desdeov2.solver.ASF.SimpleASF', 'SimpleASF', (['weights'], {}), '(weights)\n', (8560, 8569), False, 'from desdeov2.solver.ASF import SimpleASF\n'), ((8592, 8621), 'numpy.array', 'np.array', (['[np.nan, np.nan, 6]'], {}), '([np.nan, np.nan, 6])\n', (8600, 8621), True, 'import numpy as np\n'), ((9224, 9243), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (9232, 9243), True, 'import numpy as np\n'), ((9298, 9316), 'desdeov2.solver.ASF.SimpleASF', 'SimpleASF', (['weights'], {}), '(weights)\n', (9307, 9316), False, 'from desdeov2.solver.ASF import SimpleASF\n'), ((9339, 9376), 'numpy.array', 'np.array', (['[12271.8, -2945.25, np.nan]'], {}), '([12271.8, -2945.25, np.nan])\n', (9347, 9376), True, 'import numpy as np\n'), ((10012, 10031), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (10020, 10031), True, 'import numpy as np\n'), ((10086, 10104), 'desdeov2.solver.ASF.SimpleASF', 'SimpleASF', (['weights'], {}), '(weights)\n', (10095, 10104), False, 'from desdeov2.solver.ASF import SimpleASF\n'), ((10127, 10161), 'numpy.array', 'np.array', (['[785.398, -471.239, 5.0]'], {}), '([785.398, -471.239, 5.0])\n', (10135, 10161), True, 'import numpy as np\n'), ((10653, 10672), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (10661, 10672), True, 'import numpy as np\n'), ((10720, 10738), 'desdeov2.solver.ASF.SimpleASF', 'SimpleASF', (['weights'], {}), '(weights)\n', (10729, 10738), False, 'from desdeov2.solver.ASF import SimpleASF\n'), ((10811, 10830), 'numpy.array', 'np.array', (['[2, 2, 2]'], {}), '([2, 2, 2])\n', (10819, 10830), True, 'import numpy as np\n'), ((1911, 1936), 'numpy.isclose', 'np.isclose', (['res', 'expected'], {}), '(res, expected)\n', (1921, 1936), True, 'import numpy as np\n'), ((2392, 2417), 'numpy.isclose', 'np.isclose', (['res', 'expected'], {}), '(res, expected)\n', (2402, 2417), True, 'import numpy as np\n'), ((2704, 2730), 'pytest.approx', 'approx', (['(1982.2033717615695)'], {}), '(1982.2033717615695)\n', (2710, 2730), False, 'from pytest import approx\n'), ((3047, 3072), 'numpy.isclose', 'np.isclose', (['res', 'expected'], {}), '(res, expected)\n', (3057, 3072), True, 'import numpy as np\n'), ((3605, 3666), 'numpy.isclose', 'np.isclose', (['variables', 'expected_variables'], {'rtol': '(0.0)', 'atol': '(0.1)'}), '(variables, expected_variables, rtol=0.0, atol=0.1)\n', (3615, 3666), True, 'import numpy as np\n'), ((3695, 3729), 'numpy.greater_equal', 'np.greater_equal', (['constraints', '(0.0)'], {}), '(constraints, 0.0)\n', (3711, 3729), True, 'import numpy as np\n'), ((4225, 4237), 'pytest.approx', 'approx', (['(15.0)'], {}), '(15.0)\n', (4231, 4237), False, 'from pytest import approx\n'), ((4294, 4328), 'numpy.greater_equal', 'np.greater_equal', (['constraints', '(0.0)'], {}), '(constraints, 0.0)\n', (4310, 4328), True, 'import numpy as np\n'), ((4451, 4483), 'pytest.raises', 'pytest.raises', (['ScalarSolverError'], {}), '(ScalarSolverError)\n', (4464, 4483), False, 'import pytest\n'), ((4511, 4531), 'numpy.array', 'np.array', (['[0.5, 1.0]'], {}), '([0.5, 1.0])\n', (4519, 4531), True, 'import numpy as np\n'), ((4542, 4574), 'pytest.raises', 'pytest.raises', (['ScalarSolverError'], {}), '(ScalarSolverError)\n', (4555, 4574), False, 'import pytest\n'), ((4602, 4632), 'numpy.array', 'np.array', (['[0.5, 1.0, 5.0, 2.0]'], {}), '([0.5, 1.0, 5.0, 2.0])\n', (4610, 4632), True, 'import numpy as np\n'), ((5875, 5903), 'pytest.approx', 'approx', (['min_volume'], {'abs': '(0.01)'}), '(min_volume, abs=0.01)\n', (5881, 5903), False, 'from pytest import approx\n'), ((6205, 6231), 'pytest.approx', 'approx', (['min_area'], {'abs': '(0.01)'}), '(min_area, abs=0.01)\n', (6211, 6231), False, 'from pytest import approx\n'), ((6475, 6503), 'pytest.approx', 'approx', (['min_deltah'], {'abs': '(0.01)'}), '(min_deltah, abs=0.01)\n', (6481, 6503), False, 'from pytest import approx\n'), ((6531, 6543), 'pytest.approx', 'approx', (['(15.0)'], {}), '(15.0)\n', (6537, 6543), False, 'from pytest import approx\n'), ((7999, 8060), 'numpy.isclose', 'np.isclose', (['variables', 'expected_variables'], {'rtol': '(0.0)', 'atol': '(0.1)'}), '(variables, expected_variables, rtol=0.0, atol=0.1)\n', (8009, 8060), True, 'import numpy as np\n'), ((8088, 8120), 'numpy.greater_equal', 'np.greater_equal', (['constraints', '(0)'], {}), '(constraints, 0)\n', (8104, 8120), True, 'import numpy as np\n'), ((8730, 8750), 'pytest.approx', 'approx', (['(0)'], {'abs': '(0.001)'}), '(0, abs=0.001)\n', (8736, 8750), False, 'from pytest import approx\n'), ((8809, 8841), 'numpy.greater_equal', 'np.greater_equal', (['constraints', '(0)'], {}), '(constraints, 0)\n', (8825, 8841), True, 'import numpy as np\n'), ((9485, 9521), 'pytest.approx', 'approx', (['reference_point[0]'], {'abs': '(0.05)'}), '(reference_point[0], abs=0.05)\n', (9491, 9521), False, 'from pytest import approx\n'), ((9553, 9589), 'pytest.approx', 'approx', (['reference_point[1]'], {'abs': '(0.05)'}), '(reference_point[1], abs=0.05)\n', (9559, 9589), False, 'from pytest import approx\n'), ((9617, 9640), 'pytest.approx', 'approx', (['(12.5)'], {'abs': '(0.001)'}), '(12.5, abs=0.001)\n', (9623, 9640), False, 'from pytest import approx\n'), ((9667, 9690), 'pytest.approx', 'approx', (['(25.0)'], {'abs': '(0.001)'}), '(25.0, abs=0.001)\n', (9673, 9690), False, 'from pytest import approx\n'), ((9708, 9740), 'numpy.greater_equal', 'np.greater_equal', (['constraints', '(0)'], {}), '(constraints, 0)\n', (9724, 9740), True, 'import numpy as np\n'), ((10270, 10306), 'pytest.approx', 'approx', (['reference_point[0]'], {'abs': '(0.05)'}), '(reference_point[0], abs=0.05)\n', (10276, 10306), False, 'from pytest import approx\n'), ((10338, 10374), 'pytest.approx', 'approx', (['reference_point[1]'], {'abs': '(0.05)'}), '(reference_point[1], abs=0.05)\n', (10344, 10374), False, 'from pytest import approx\n'), ((10402, 10424), 'pytest.approx', 'approx', (['(5.0)'], {'abs': '(0.001)'}), '(5.0, abs=0.001)\n', (10408, 10424), False, 'from pytest import approx\n'), ((10451, 10474), 'pytest.approx', 'approx', (['(10.0)'], {'abs': '(0.001)'}), '(10.0, abs=0.001)\n', (10457, 10474), False, 'from pytest import approx\n'), ((10492, 10524), 'numpy.greater_equal', 'np.greater_equal', (['constraints', '(0)'], {}), '(constraints, 0)\n', (10508, 10524), True, 'import numpy as np\n'), ((10939, 10966), 'numpy.not_equal', 'np.not_equal', (['before', 'after'], {}), '(before, after)\n', (10951, 10966), True, 'import numpy as np\n'), ((1543, 1554), 'pytest.approx', 'approx', (['(0.0)'], {}), '(0.0)\n', (1549, 1554), False, 'from pytest import approx\n'), ((5338, 5364), 'numpy.array', 'np.array', (['[3.0, 1.6, 9.12]'], {}), '([3.0, 1.6, 9.12])\n', (5346, 5364), True, 'import numpy as np\n'), ((5944, 5961), 'numpy.array', 'np.array', (['[5, 10]'], {}), '([5, 10])\n', (5952, 5961), True, 'import numpy as np\n'), ((6272, 6292), 'numpy.array', 'np.array', (['[12.5, 25]'], {}), '([12.5, 25])\n', (6280, 6292), True, 'import numpy as np\n'), ((7539, 7548), 'pytest.approx', 'approx', (['(0)'], {}), '(0)\n', (7545, 7548), False, 'from pytest import approx\n'), ((10899, 10918), 'numpy.array', 'np.array', (['[2, 2, 2]'], {}), '([2, 2, 2])\n', (10907, 10918), True, 'import numpy as np\n')] |
from PyQt5.QtCore import QThread, pyqtSignal
import win32
from PIL import Image
from numba import jit, njit
import numpy as np
import time
SLEEP_TIME = 0.002
class CaptureWorker(QThread):
done = pyqtSignal(object)
def __init__(self, parent):
super().__init__(parent)
self.exiting = False
self.captureRate = 1.0 / parent.config.captureFPS
self.startTime = None
self.lastCapturedTime = None
self.capturedFrames = 0
self.showFPS = 0
self.lastFPSQueueTime = None
self.capturedFramesForFPS = 0
self.capture = win32.Win32UICapture()
self.inGameChecker = InGameChecker()
self.fieldReader = FieldReader()
self.scoreReader = ScoreReader()
self.linesReader = LinesReader()
self.levelReader = LevelReader()
self.nextReader = NextReader()
self.statsReader = StatsReader()
def run(self):
self.startTime = time.time()
self.lastCapturedTime = self.startTime
self.lastFPSQueueTime = self.startTime
while not self.exiting:
currentTime = time.time()
if currentTime - self.lastFPSQueueTime > 1:
self.showFPS = self.capturedFramesForFPS / (currentTime - self.lastFPSQueueTime)
self.capturedFramesForFPS = 0
self.lastFPSQueueTime = currentTime
if currentTime - self.lastCapturedTime > self.captureRate:
while self.lastCapturedTime + self.captureRate < currentTime:
self.lastCapturedTime += self.captureRate
handle = self.parent().currentHandle
config = self.parent().config
self.captureRate = 1.0 / config.captureFPS
self.nextReader.setBlackThreshold(config.blackThreshold)
self.inGameChecker.setThreshold(config.inGameThreshold)
try:
image = self.capture.capture((config.xCoord, config.yCoord, config.width, config.height), handle)
image = image.resize((512, 448))
smallImage = image.resize((32, 28))
inGame = self.inGameChecker.check(smallImage)
if inGame:
field = self.fieldReader.read(image)
score = self.scoreReader.read(image)
lines = self.linesReader.read(image)
level = self.levelReader.read(image)
next_ = self.nextReader.read(image)
stats = self.statsReader.read(image)
self.done.emit({ "success": True, "inGame": True, "field": field, "score": score, "lines": lines, "level": level, "next": next_, "stats": stats, "image": image, "fps": self.showFPS })
else:
self.done.emit({ "success": True, "inGame": False, "image": image, "fps": self.showFPS })
except:
self.done.emit({ "success": False })
self.capturedFrames += 1
self.capturedFramesForFPS += 1
else:
self.scoreReader.reset()
time.sleep(SLEEP_TIME)
SCORE_TILE_X = 24
SCORE_TILE_Y = 7
class ScoreReader:
def __init__(self):
self.digitReader = DigitReader()
self.enableHexRead = False
def read(self, image):
result = 0
for i in range(6):
x = (SCORE_TILE_X + i) * 16
y = SCORE_TILE_Y * 16
d = self.digitReader.read(image.crop((x, y, x + 14, y + 14)), i == 0, False)
if d[0][0] == -1: break
if i == 0:
# Avoid misread '8' to 'B'
if d[0][0] == 10: self.enableHexRead = True
if (not self.enableHexRead) and d[0][0] == 11: d[0][0] = 8
result += d[0][0] * (10 ** (5 - i))
return result
def reset(self):
self.enableHexRead = False
LINES_TILE_X = 19
LINES_TILE_Y = 2
class LinesReader:
def __init__(self):
self.digitReader = DigitReader()
def read(self, image):
result = 0
for i in range(3):
x = (LINES_TILE_X + i) * 16
y = LINES_TILE_Y * 16
d = self.digitReader.read(image.crop((x, y, x + 14, y + 14)), False, False)
if d[0][0] == -1: break
result += d[0][0] * (10 ** (2 - i))
return result
LEVEL_TILE_X = 26
LEVEL_TILE_Y = 20
class LevelReader:
def __init__(self):
self.digitReader = DigitReader()
def read(self, image):
result = 0
for i in range(2):
x = (LEVEL_TILE_X + i) * 16
y = LEVEL_TILE_Y * 16
d = self.digitReader.read(image.crop((x, y, x + 14, y + 14)), False, False)
if d[0][0] == -1: break
result += d[0][0] * (10 ** (1 - i))
return result
STATS_TILE_X = 6
STATS_TILE_Y = 11
class StatsReader:
def __init__(self):
self.digitReader = DigitReader()
def read(self, image):
result = [0, 0, 0, 0, 0, 0, 0]
for i in range(7):
for j in range(3):
x = (STATS_TILE_X + j) * 16
y = (STATS_TILE_Y + i * 2) * 16
d = self.digitReader.read(image.crop((x, y, x + 14, y + 14)), False, True)
if d[0][0] == -1: break
result[i] += d[0][0] * (10 ** (2 - j))
return result
FIELD_TILE_X = 12
FIELD_TILE_Y = 5
@njit("uint8[:,:](float32[:,:,:],float32[:],float32[:],float32[:],float32[:])")
def readFieldJit(smallImage, blackSample, whiteSample, color1Sample, color2Sample):
samples = [blackSample, whiteSample, color1Sample, color2Sample]
# result = [[0] * 10] * 20
result = np.zeros((20,10), dtype=np.uint8)
for iy in range(20):
for ix in range(10):
x = ix * 4 + 1
y = iy * 4 + 1
color = smallImage[y][x]
closest = 0
lowest_dist = (256*256)*3
i = 0
for i in range(4):
sample = samples[i]
dist = ((color[0] - sample[0]) * (color[0] - sample[0]) +
(color[1] - sample[1]) * (color[1] - sample[1]) +
(color[2] - sample[2]) * (color[2] - sample[2]))
if dist < lowest_dist:
lowest_dist = dist
closest = i
result[iy][ix] = closest
return result
# Not Used
def readFieldSlow(image, blackSample, whiteSample, color1Sample, color2Sample):
samples = [blackSample, whiteSample, color1Sample, color2Sample]
result = [[None for _ in range(10)] for _ in range(20)]
for iy in range(20):
for ix in range(10):
x = (FIELD_TILE_X + ix) * 16
y = (FIELD_TILE_Y + iy) * 16
color = np.mean(np.asarray(image.crop((x + 5, y + 5, x + 9, y + 9))), (0, 1), dtype=np.float32)
closest = 0
lowest_dist = (256*256)*3
i = 0
for i in range(4):
sample = samples[i]
dist = ((color[0] - sample[0]) * (color[0] - sample[0]) +
(color[1] - sample[1]) * (color[1] - sample[1]) +
(color[2] - sample[2]) * (color[2] - sample[2]))
if dist < lowest_dist:
lowest_dist = dist
closest = i
result[iy][ix] = closest
return result
class FieldReader:
def __init__(self):
pass
def read(self, image):
blackSample = np.mean(np.asarray(image.crop((67, 147, 71, 151))), (0, 1), dtype=np.float32)
whiteSample = np.mean(np.asarray(image.crop((67, 173, 71, 177))), (0, 1), dtype=np.float32)
color1Sample = np.mean(np.asarray(image.crop((69, 205, 73, 208))), (0, 1), dtype=np.float32)
color2Sample = np.mean(np.asarray(image.crop((69, 239, 73, 243))), (0, 1), dtype=np.float32)
smallImage = np.asarray(image.crop((FIELD_TILE_X * 16, FIELD_TILE_Y * 16, (FIELD_TILE_X + 10) * 16, (FIELD_TILE_Y + 20) * 16)).resize((40, 80), Image.BILINEAR), dtype=np.float32)
return readFieldJit(smallImage, blackSample, whiteSample, color1Sample, color2Sample)
class NextReader:
# orange-red-pink
bitToPiece = ["I", "J", "T", "Z", "L", "", "S", "O"]
def __init__(self):
self.threshold = 25
def setBlackThreshold(self, threshold):
self.threshold = threshold
def isNotBlack(self, color):
return color[0] > self.threshold or color[1] > self.threshold or color[2] > self.threshold
def read(self, image):
orange = np.mean(np.asarray(image.crop((403, 249, 405, 251))), (0, 1), dtype=np.float32)
red = np.mean(np.asarray(image.crop((411, 249, 413, 251))), (0, 1), dtype=np.float32)
pink = np.mean(np.asarray(image.crop((427, 249, 429, 251))), (0, 1), dtype=np.float32)
bit = (4 if self.isNotBlack(orange) else 0) + (2 if self.isNotBlack(red) else 0) + (1 if self.isNotBlack(pink) else 0)
return NextReader.bitToPiece[bit]
class InGameChecker:
normalRGB = None
normalMask = None
tetrisRGB = None
tetrisMask = None
def __init__(self):
n = np.asarray(Image.open("assets/normal.png"), dtype=np.int16)
InGameChecker.normal = n[:,:,0:3]
InGameChecker.normalMask = n[:,:,3] / 255
t = np.asarray(Image.open("assets/tetris.png"), dtype=np.int16)
InGameChecker.tetris = t[:,:,0:3]
InGameChecker.tetrisMask = t[:,:,3] / 255
self.threshold = 15000
def setThreshold(self, threshold):
self.threshold = threshold
def check(self, smallImage):
array = np.asarray(smallImage, dtype=np.int16)
normalScore = np.sum(np.multiply(InGameChecker.normalMask, np.sum(np.abs(np.subtract(InGameChecker.normal, array)), axis=2)))
tetrisScore = np.sum(np.multiply(InGameChecker.tetrisMask, np.sum(np.abs(np.subtract(InGameChecker.tetris, array)), axis=2)))
minScore = min(normalScore, tetrisScore)
return minScore < self.threshold
class DigitReader:
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "null"]
digitImages = None
digitArrays = None
digitNumbers = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 2, 13, 14, 15, -1]
def __init__(self):
if not DigitReader.digitImages:
DigitReader.digitImages = [Image.open(f"assets/digit_{e}.png").convert("L") for e in DigitReader.digits]
DigitReader.digitArrays = [np.asarray(e, dtype=np.int16) for e in DigitReader.digitImages]
def read(self, image, hex, red):
result = [(n, 255*14*14) for n in DigitReader.digitNumbers]
mul = 3 if red else 1
array = np.asarray(image.convert("L"), dtype=np.int16) * mul
for i, comp in enumerate(DigitReader.digitArrays):
if ((0 <= i and i <= 9) or i == 16) or hex:
result[i] = (DigitReader.digitNumbers[i], np.sum(np.abs(np.subtract(comp, array))))
return sorted(result, key=lambda x: x[1])
| [
"PyQt5.QtCore.pyqtSignal",
"numpy.subtract",
"numpy.asarray",
"numba.njit",
"numpy.zeros",
"win32.Win32UICapture",
"time.time",
"PIL.Image.open",
"time.sleep"
] | [((4822, 4900), 'numba.njit', 'njit', (['"""uint8[:,:](float32[:,:,:],float32[:],float32[:],float32[:],float32[:])"""'], {}), "('uint8[:,:](float32[:,:,:],float32[:],float32[:],float32[:],float32[:])')\n", (4826, 4900), False, 'from numba import jit, njit\n'), ((199, 217), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['object'], {}), '(object)\n', (209, 217), False, 'from PyQt5.QtCore import QThread, pyqtSignal\n'), ((5092, 5126), 'numpy.zeros', 'np.zeros', (['(20, 10)'], {'dtype': 'np.uint8'}), '((20, 10), dtype=np.uint8)\n', (5100, 5126), True, 'import numpy as np\n'), ((553, 575), 'win32.Win32UICapture', 'win32.Win32UICapture', ([], {}), '()\n', (573, 575), False, 'import win32\n'), ((877, 888), 'time.time', 'time.time', ([], {}), '()\n', (886, 888), False, 'import time\n'), ((8685, 8723), 'numpy.asarray', 'np.asarray', (['smallImage'], {'dtype': 'np.int16'}), '(smallImage, dtype=np.int16)\n', (8695, 8723), True, 'import numpy as np\n'), ((1023, 1034), 'time.time', 'time.time', ([], {}), '()\n', (1032, 1034), False, 'import time\n'), ((8260, 8291), 'PIL.Image.open', 'Image.open', (['"""assets/normal.png"""'], {}), "('assets/normal.png')\n", (8270, 8291), False, 'from PIL import Image\n'), ((8412, 8443), 'PIL.Image.open', 'Image.open', (['"""assets/tetris.png"""'], {}), "('assets/tetris.png')\n", (8422, 8443), False, 'from PIL import Image\n'), ((2791, 2813), 'time.sleep', 'time.sleep', (['SLEEP_TIME'], {}), '(SLEEP_TIME)\n', (2801, 2813), False, 'import time\n'), ((9505, 9534), 'numpy.asarray', 'np.asarray', (['e'], {'dtype': 'np.int16'}), '(e, dtype=np.int16)\n', (9515, 9534), True, 'import numpy as np\n'), ((8801, 8841), 'numpy.subtract', 'np.subtract', (['InGameChecker.normal', 'array'], {}), '(InGameChecker.normal, array)\n', (8812, 8841), True, 'import numpy as np\n'), ((8931, 8971), 'numpy.subtract', 'np.subtract', (['InGameChecker.tetris', 'array'], {}), '(InGameChecker.tetris, array)\n', (8942, 8971), True, 'import numpy as np\n'), ((9394, 9429), 'PIL.Image.open', 'Image.open', (['f"""assets/digit_{e}.png"""'], {}), "(f'assets/digit_{e}.png')\n", (9404, 9429), False, 'from PIL import Image\n'), ((9929, 9953), 'numpy.subtract', 'np.subtract', (['comp', 'array'], {}), '(comp, array)\n', (9940, 9953), True, 'import numpy as np\n')] |
import numpy as np
import torch
from torch.utils import data
class carlaDataset(data.Dataset):
def __init__(self, list_IDs, obs_w, factual, time_shift, args, TEST=False):
'''Initialization'''
self.list_IDs = list_IDs
self.obs_w = obs_w
self.data_dir = args.data_dir
self.ID_all = args.ID_all
self.types_all = args.types_all
self.filenames = args.filenames
self.burn_in = args.burn_in
self.t_eliminated = args.t_eliminated
self.time_shift = time_shift
self.x_dim_permuted = args.x_dim_permuted
self.n_agents = args.n_agents
self.n_agents_all = args.n_agents_all
self.t_future = args.t_future
self.t_future_waypoint = args.t_future_waypoint
self.n_samples = args.n_samples
self.factual = factual
self.TEST = args.TEST
self.max_p = args.max_p
self.max_v = args.max_v
self.max_y = args.max_y
def __len__(self):
'''Denotes the total number of samples'''
return len(self.list_IDs)
def __getitem__(self, index):
'''Generates one sample of data'''
data_dir = self.data_dir
# Select sample
id = self.list_IDs[index]
n_samples = self.n_samples
factual = self.factual[index]
t_eliminated = self.t_eliminated + self.time_shift[index]
speeds_, sizes_, types_, poses_,lengths = [],[],[],[],[]
lengths = np.zeros((n_samples,))
results = np.zeros((self.obs_w,n_samples))
interventions = np.zeros((self.obs_w+1,n_samples))
intervention_targets = np.zeros((self.obs_w,3,n_samples))
mileages = np.zeros((self.obs_w,n_samples))
collisions = np.zeros((self.obs_w,n_samples))
waypoints = np.zeros((self.obs_w,2*self.t_future_waypoint,n_samples)) # 4
for n in range(n_samples):
# Load data
data = np.load(data_dir + self.filenames[n_samples*id+n])
intervention,intervention_target,collision,mileage,mileage_progress,simulate_progress,time,IDs= [],[],[],[],[],[],[],[]
waypoint = []
for dat in data['drive_data']:
intervention.append(dat['intervention'])
if len(dat['intervention_target']) == 0:
intervention_target.append(np.zeros(3))
else:
intervention_target.append(dat['intervention_target'])
collision.append(dat['collision'])
mileage.append(dat['mileage'])
time.append(dat['time'])
IDs = [*dat['actors'].keys()]
for dat in data['waypoint']:
waypoint.append(np.array([dat['x'],dat['y'],dat['z'],dat['yaw']]))
IDs = np.unique(np.array(IDs))
# IDs = np.concatenate([IDs[-1,np.newaxis,np.newaxis],IDs[:-1,np.newaxis]],0).squeeze(1)
intervention = np.array(intervention).astype(np.int)
intervention_target = np.array(intervention_target)
collision = np.array(collision).astype(np.int)
collision[:t_eliminated+self.burn_in] = 0 # ignore collision
mileage = np.array(mileage)
waypoint = np.array(waypoint)
time = np.array(time)-time[0]
# for each object
n_all = len(self.types_all)
speeds = np.zeros((self.obs_w,n_all,1))
poses = np.zeros((self.obs_w,n_all,4))
sizes = np.zeros((self.obs_w,n_all,3))
waypoint_ = np.zeros((self.obs_w,2*self.t_future_waypoint)) # 4
try: types = [['' for _ in range(n_all)] for _ in range(self.obs_w)]
except: import pdb; pdb.set_trace()
i = 0; t = 0
mileage_decrease = 0
strictly_safe = False
for dat in data['drive_data']:
if collision[t] == 1:
if strictly_safe:
# collision[t:] = 1
continue
else:
mileage_decrease += mileage[t]-mileage[t-1]
if t >= t_eliminated and i < self.obs_w :
if strictly_safe:
if np.sum(collision[t+1:t+self.t_future+1])==0:
results[i,n] = mileage[t+self.t_future]
else:
results[i,n] = mileage[t+self.t_future] - mileage_decrease
for ID in IDs:
if ID != 'ego_vehicle':
ID = int(ID)
else:
ID_ego = ID
if ID in [*dat['actors'].keys()]:
ind = list(self.types_all).index(dat['actors'][ID]['type'])
speeds[i,ind] = dat['actors'][ID]['speed']
poses[i,ind] = np.array(dat['actors'][ID]['pose'])
sizes[i,ind] = np.array(dat['actors'][ID]['size'])
types[i][ind] = dat['actors'][ID]['type']
if ID == 'ego_vehicle':
nearest_waypoints = np.argmin(np.sum((waypoint[:,:2]-poses[np.newaxis,i,ind,:2].repeat(waypoint.shape[0],0))**2,1))
T1 = nearest_waypoints+self.t_future_waypoint
T0 = waypoint.shape[0]
if waypoint.shape[0]<nearest_waypoints+1:
import pdb; pdb.set_trace()
elif waypoint.shape[0]<T1:
T2 = self.t_future_waypoint-T1+T0# T1-T0
try: waypoint_[i,:T2*2] = waypoint[nearest_waypoints:T1,:2].reshape((-1,))
except: import pdb; pdb.set_trace()
waypoint_[i,T2*2:] = np.repeat(waypoint[T0-1:T0,:2],self.t_future_waypoint-T2,axis=0).reshape((-1,))
else:
waypoint_[i] = waypoint[nearest_waypoints:T1,:2].reshape((-1,))
i += 1
t += 1
ind_ego = list(self.types_all).index(dat['actors'][ID_ego]['type'])
speeds = np.concatenate([speeds[:,ind_ego:ind_ego+1],speeds[:,:ind_ego],speeds[:,ind_ego+1:]],1)
poses = np.concatenate([poses[:,ind_ego:ind_ego+1],poses[:,:ind_ego],poses[:,ind_ego+1:]],1)
sizes = np.concatenate([sizes[:,ind_ego:ind_ego+1],sizes[:,:ind_ego],sizes[:,ind_ego+1:]],1)
dist = np.sqrt(np.sum((poses[:,0:1,:2].repeat(self.n_agents_all-1,1)-poses[:,1:,:2])**2,2))
ind_dist = np.argsort(np.min(dist,axis=0),axis=0)
ind_dist = np.concatenate([np.zeros((1,)),ind_dist+1],0).astype(np.int32)
speeds__ = speeds[:,:self.n_agents].copy()
poses__ = poses[:,:self.n_agents].copy()
sizes__ = sizes[:,:self.n_agents].copy()
for t in range(self.obs_w):
try: speeds__[t] = speeds[t,ind_dist[:self.n_agents]] # t,
except: import pdb; pdb.set_trace()
poses__[t] = poses[t,ind_dist[:self.n_agents]] # t,
sizes__[t] = sizes[t,ind_dist[:self.n_agents]] # t,
speeds = speeds__
poses = poses__
sizes = sizes__
lengths[n] = i
interventions[:,n] = intervention[t_eliminated:t_eliminated+self.obs_w+1]
intervention_targets[:,:,n] = intervention_target[t_eliminated:t_eliminated+self.obs_w]
collisions[:,n] = collision[t_eliminated:t_eliminated+self.obs_w]
if n == 0:
interventions[np.where(interventions[:,0]==1)[0][0]-1:,0] = 1
waypoints[:,:,n] = waypoint_
mileages[:,n] = mileage[t_eliminated:t_eliminated+self.obs_w]
results[:,n] -= np.repeat(mileages[0,np.newaxis,n],self.obs_w)
mileages[:,n] -= np.repeat(mileages[0,np.newaxis,n],self.obs_w)
if (results[10,n]-results[0,n]>0) and np.min(results[10:,n])<0.01:
import pdb; pdb.set_trace()
if i < self.obs_w:
speeds[i:] = speeds[i-1:i].repeat(self.obs_w-i,0)
poses[i:] = poses[i-1:i].repeat(self.obs_w-i,0)
waypoint_[i:] = waypoint_[i-1:i].repeat(self.obs_w-i,0)
mileages[i:] = mileages[i-1:i].repeat(self.obs_w-i,0)
speeds_.append(np.array(speeds))
poses_.append(np.array(poses))
types_.append(np.array(types))
sizes_.append(np.array(sizes))
speeds_ = np.array(speeds_)/self.max_v
poses_ = np.array(poses_)
types_ = np.array(types_)
sizes_ = np.array(sizes_)
lengths = np.array(lengths)
poses_ = np.concatenate([poses_[:,:,:,:2]/self.max_p,np.cos(poses_[:,:,:,3:4]),np.sin(poses_[:,:,:,3:4])],3) # x,y,cos,sin (eliminate z)
sizes_ = sizes_[:,:,:,:2] # x,y
#if not self.TEST:
lengths = lengths[factual]
# output
X_demographic = np.concatenate([np.zeros((1)),np.max(np.max(sizes_,1),0).reshape((-1,))],0) # 1(dummy)+agent*3
X_demo = torch.from_numpy(X_demographic.astype(np.float32))
y_all = torch.from_numpy(results.astype(np.float32))/self.max_y
y = y_all[:,factual]
X_treatment_opt = np.argmax(y_all[-1,:])
X_treatment_all = torch.from_numpy(interventions.astype(np.float32))
X_treatment = X_treatment_all[:,factual]
horizon = self.obs_w-self.burn_in
X_ind = np.concatenate([speeds_,poses_,sizes_],3).transpose((1,2,3,0)).reshape((self.obs_w,7*self.n_agents,n_samples)) # n,t,agent,dim -> t,dim*agent,n # n_all
X_all = np.concatenate([X_ind,mileages[:,np.newaxis]/self.max_y,X_ind[:,0:1,:],np.zeros((self.obs_w,1,2)),collisions[:,np.newaxis]],1) #
X_all = torch.from_numpy(X_all.astype(np.float32)) # t,dim_all,n
X = X_all[:,:,factual] #
return X, X_demo, X_treatment, y, lengths, index, X_treatment_all, X_all, y_all, X_treatment_opt
| [
"numpy.load",
"numpy.sum",
"numpy.argmax",
"numpy.zeros",
"numpy.min",
"numpy.sin",
"numpy.array",
"pdb.set_trace",
"numpy.cos",
"numpy.max",
"numpy.where",
"numpy.concatenate",
"numpy.repeat"
] | [((1471, 1493), 'numpy.zeros', 'np.zeros', (['(n_samples,)'], {}), '((n_samples,))\n', (1479, 1493), True, 'import numpy as np\n'), ((1512, 1545), 'numpy.zeros', 'np.zeros', (['(self.obs_w, n_samples)'], {}), '((self.obs_w, n_samples))\n', (1520, 1545), True, 'import numpy as np\n'), ((1569, 1606), 'numpy.zeros', 'np.zeros', (['(self.obs_w + 1, n_samples)'], {}), '((self.obs_w + 1, n_samples))\n', (1577, 1606), True, 'import numpy as np\n'), ((1635, 1671), 'numpy.zeros', 'np.zeros', (['(self.obs_w, 3, n_samples)'], {}), '((self.obs_w, 3, n_samples))\n', (1643, 1671), True, 'import numpy as np\n'), ((1689, 1722), 'numpy.zeros', 'np.zeros', (['(self.obs_w, n_samples)'], {}), '((self.obs_w, n_samples))\n', (1697, 1722), True, 'import numpy as np\n'), ((1743, 1776), 'numpy.zeros', 'np.zeros', (['(self.obs_w, n_samples)'], {}), '((self.obs_w, n_samples))\n', (1751, 1776), True, 'import numpy as np\n'), ((1796, 1857), 'numpy.zeros', 'np.zeros', (['(self.obs_w, 2 * self.t_future_waypoint, n_samples)'], {}), '((self.obs_w, 2 * self.t_future_waypoint, n_samples))\n', (1804, 1857), True, 'import numpy as np\n'), ((8868, 8884), 'numpy.array', 'np.array', (['poses_'], {}), '(poses_)\n', (8876, 8884), True, 'import numpy as np\n'), ((8902, 8918), 'numpy.array', 'np.array', (['types_'], {}), '(types_)\n', (8910, 8918), True, 'import numpy as np\n'), ((8936, 8952), 'numpy.array', 'np.array', (['sizes_'], {}), '(sizes_)\n', (8944, 8952), True, 'import numpy as np\n'), ((8971, 8988), 'numpy.array', 'np.array', (['lengths'], {}), '(lengths)\n', (8979, 8988), True, 'import numpy as np\n'), ((9590, 9613), 'numpy.argmax', 'np.argmax', (['y_all[-1, :]'], {}), '(y_all[-1, :])\n', (9599, 9613), True, 'import numpy as np\n'), ((1937, 1991), 'numpy.load', 'np.load', (['(data_dir + self.filenames[n_samples * id + n])'], {}), '(data_dir + self.filenames[n_samples * id + n])\n', (1944, 1991), True, 'import numpy as np\n'), ((3027, 3056), 'numpy.array', 'np.array', (['intervention_target'], {}), '(intervention_target)\n', (3035, 3056), True, 'import numpy as np\n'), ((3211, 3228), 'numpy.array', 'np.array', (['mileage'], {}), '(mileage)\n', (3219, 3228), True, 'import numpy as np\n'), ((3252, 3270), 'numpy.array', 'np.array', (['waypoint'], {}), '(waypoint)\n', (3260, 3270), True, 'import numpy as np\n'), ((3406, 3438), 'numpy.zeros', 'np.zeros', (['(self.obs_w, n_all, 1)'], {}), '((self.obs_w, n_all, 1))\n', (3414, 3438), True, 'import numpy as np\n'), ((3457, 3489), 'numpy.zeros', 'np.zeros', (['(self.obs_w, n_all, 4)'], {}), '((self.obs_w, n_all, 4))\n', (3465, 3489), True, 'import numpy as np\n'), ((3508, 3540), 'numpy.zeros', 'np.zeros', (['(self.obs_w, n_all, 3)'], {}), '((self.obs_w, n_all, 3))\n', (3516, 3540), True, 'import numpy as np\n'), ((3563, 3613), 'numpy.zeros', 'np.zeros', (['(self.obs_w, 2 * self.t_future_waypoint)'], {}), '((self.obs_w, 2 * self.t_future_waypoint))\n', (3571, 3613), True, 'import numpy as np\n'), ((6391, 6493), 'numpy.concatenate', 'np.concatenate', (['[speeds[:, ind_ego:ind_ego + 1], speeds[:, :ind_ego], speeds[:, ind_ego + 1:]]', '(1)'], {}), '([speeds[:, ind_ego:ind_ego + 1], speeds[:, :ind_ego], speeds\n [:, ind_ego + 1:]], 1)\n', (6405, 6493), True, 'import numpy as np\n'), ((6499, 6597), 'numpy.concatenate', 'np.concatenate', (['[poses[:, ind_ego:ind_ego + 1], poses[:, :ind_ego], poses[:, ind_ego + 1:]]', '(1)'], {}), '([poses[:, ind_ego:ind_ego + 1], poses[:, :ind_ego], poses[:,\n ind_ego + 1:]], 1)\n', (6513, 6597), True, 'import numpy as np\n'), ((6604, 6702), 'numpy.concatenate', 'np.concatenate', (['[sizes[:, ind_ego:ind_ego + 1], sizes[:, :ind_ego], sizes[:, ind_ego + 1:]]', '(1)'], {}), '([sizes[:, ind_ego:ind_ego + 1], sizes[:, :ind_ego], sizes[:,\n ind_ego + 1:]], 1)\n', (6618, 6702), True, 'import numpy as np\n'), ((8072, 8121), 'numpy.repeat', 'np.repeat', (['mileages[0, np.newaxis, n]', 'self.obs_w'], {}), '(mileages[0, np.newaxis, n], self.obs_w)\n', (8081, 8121), True, 'import numpy as np\n'), ((8148, 8197), 'numpy.repeat', 'np.repeat', (['mileages[0, np.newaxis, n]', 'self.obs_w'], {}), '(mileages[0, np.newaxis, n], self.obs_w)\n', (8157, 8197), True, 'import numpy as np\n'), ((8822, 8839), 'numpy.array', 'np.array', (['speeds_'], {}), '(speeds_)\n', (8830, 8839), True, 'import numpy as np\n'), ((2812, 2825), 'numpy.array', 'np.array', (['IDs'], {}), '(IDs)\n', (2820, 2825), True, 'import numpy as np\n'), ((3290, 3304), 'numpy.array', 'np.array', (['time'], {}), '(time)\n', (3298, 3304), True, 'import numpy as np\n'), ((6828, 6848), 'numpy.min', 'np.min', (['dist'], {'axis': '(0)'}), '(dist, axis=0)\n', (6834, 6848), True, 'import numpy as np\n'), ((8303, 8318), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (8316, 8318), False, 'import pdb\n'), ((8651, 8667), 'numpy.array', 'np.array', (['speeds'], {}), '(speeds)\n', (8659, 8667), True, 'import numpy as np\n'), ((8695, 8710), 'numpy.array', 'np.array', (['poses'], {}), '(poses)\n', (8703, 8710), True, 'import numpy as np\n'), ((8744, 8759), 'numpy.array', 'np.array', (['types'], {}), '(types)\n', (8752, 8759), True, 'import numpy as np\n'), ((8787, 8802), 'numpy.array', 'np.array', (['sizes'], {}), '(sizes)\n', (8795, 8802), True, 'import numpy as np\n'), ((9051, 9079), 'numpy.cos', 'np.cos', (['poses_[:, :, :, 3:4]'], {}), '(poses_[:, :, :, 3:4])\n', (9057, 9079), True, 'import numpy as np\n'), ((9077, 9105), 'numpy.sin', 'np.sin', (['poses_[:, :, :, 3:4]'], {}), '(poses_[:, :, :, 3:4])\n', (9083, 9105), True, 'import numpy as np\n'), ((9306, 9317), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (9314, 9317), True, 'import numpy as np\n'), ((10038, 10066), 'numpy.zeros', 'np.zeros', (['(self.obs_w, 1, 2)'], {}), '((self.obs_w, 1, 2))\n', (10046, 10066), True, 'import numpy as np\n'), ((2720, 2772), 'numpy.array', 'np.array', (["[dat['x'], dat['y'], dat['z'], dat['yaw']]"], {}), "([dat['x'], dat['y'], dat['z'], dat['yaw']])\n", (2728, 2772), True, 'import numpy as np\n'), ((2955, 2977), 'numpy.array', 'np.array', (['intervention'], {}), '(intervention)\n', (2963, 2977), True, 'import numpy as np\n'), ((3081, 3100), 'numpy.array', 'np.array', (['collision'], {}), '(collision)\n', (3089, 3100), True, 'import numpy as np\n'), ((3741, 3756), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (3754, 3756), False, 'import pdb\n'), ((8246, 8269), 'numpy.min', 'np.min', (['results[10:, n]'], {}), '(results[10:, n])\n', (8252, 8269), True, 'import numpy as np\n'), ((2351, 2362), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2359, 2362), True, 'import numpy as np\n'), ((7255, 7270), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (7268, 7270), False, 'import pdb\n'), ((9799, 9843), 'numpy.concatenate', 'np.concatenate', (['[speeds_, poses_, sizes_]', '(3)'], {}), '([speeds_, poses_, sizes_], 3)\n', (9813, 9843), True, 'import numpy as np\n'), ((4295, 4341), 'numpy.sum', 'np.sum', (['collision[t + 1:t + self.t_future + 1]'], {}), '(collision[t + 1:t + self.t_future + 1])\n', (4301, 4341), True, 'import numpy as np\n'), ((5001, 5036), 'numpy.array', 'np.array', (["dat['actors'][ID]['pose']"], {}), "(dat['actors'][ID]['pose'])\n", (5009, 5036), True, 'import numpy as np\n'), ((5080, 5115), 'numpy.array', 'np.array', (["dat['actors'][ID]['size']"], {}), "(dat['actors'][ID]['size'])\n", (5088, 5115), True, 'import numpy as np\n'), ((6895, 6909), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (6903, 6909), True, 'import numpy as np\n'), ((9327, 9344), 'numpy.max', 'np.max', (['sizes_', '(1)'], {}), '(sizes_, 1)\n', (9333, 9344), True, 'import numpy as np\n'), ((5646, 5661), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (5659, 5661), False, 'import pdb\n'), ((7855, 7889), 'numpy.where', 'np.where', (['(interventions[:, 0] == 1)'], {}), '(interventions[:, 0] == 1)\n', (7863, 7889), True, 'import numpy as np\n'), ((5949, 5964), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (5962, 5964), False, 'import pdb\n'), ((6018, 6089), 'numpy.repeat', 'np.repeat', (['waypoint[T0 - 1:T0, :2]', '(self.t_future_waypoint - T2)'], {'axis': '(0)'}), '(waypoint[T0 - 1:T0, :2], self.t_future_waypoint - T2, axis=0)\n', (6027, 6089), True, 'import numpy as np\n')] |
"""
Usage: python -m recipy example_script3.py OUTPUT.npy
"""
from __future__ import print_function
import sys
import numpy
if len(sys.argv) < 2:
print(__doc__, file=sys.stderr)
sys.exit(1)
arr = numpy.arange(10)
arr = arr + 500
# We've made a fairly big change here!
numpy.save(sys.argv[1], arr)
| [
"numpy.save",
"numpy.arange",
"sys.exit"
] | [((208, 224), 'numpy.arange', 'numpy.arange', (['(10)'], {}), '(10)\n', (220, 224), False, 'import numpy\n'), ((281, 309), 'numpy.save', 'numpy.save', (['sys.argv[1]', 'arr'], {}), '(sys.argv[1], arr)\n', (291, 309), False, 'import numpy\n'), ((189, 200), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (197, 200), False, 'import sys\n')] |
# 引入需要得包
#数据处理常用得包
import numpy as np
import pandas as pd
from sklearn.metrics import f1_score
# 随机森林的包
import sklearn as skl
from sklearn.ensemble import RandomForestClassifier
# 画图的包
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
import warnings
warnings.filterwarnings("ignore")
# 读取数据
# 读取成DataFrame的数据
dt = pd.read_csv(r'E:\机器学习课设数据集\train_all.csv')
dtest = pd.read_csv(r'E:\机器学习课设数据集\train_2.csv')
# 数据处理------------------------
# 去除空值
train = dt.dropna()
train = train.apply(pd.to_numeric,errors="ignore")
test = dtest.dropna()
test = test.apply(pd.to_numeric,errors="ignore")
# 替换current_service
train.loc[train['current_service'] == 90063345, 'current_service'] = 1
train.loc[train['current_service'] == 89950166, 'current_service'] = 2
train.loc[train['current_service'] == 89950167, 'current_service'] = 3
train.loc[train['current_service'] == 99999828, 'current_service'] = 4
train.loc[train['current_service'] == 90109916, 'current_service'] = 5
train.loc[train['current_service'] == 89950168, 'current_service'] = 6
train.loc[train['current_service'] == 99999827, 'current_service'] = 7
train.loc[train['current_service'] == 99999826, 'current_service'] = 8
train.loc[train['current_service'] == 90155946, 'current_service'] = 9
train.loc[train['current_service'] == 99999830, 'current_service'] = 10
train.loc[train['current_service'] == 99999825, 'current_service'] = 11
test.loc[test['current_service'] == 90063345, 'current_service'] = 1
test.loc[test['current_service'] == 89950166, 'current_service'] = 2
test.loc[test['current_service'] == 89950167, 'current_service'] = 3
test.loc[test['current_service'] == 99999828, 'current_service'] = 4
test.loc[test['current_service'] == 90109916, 'current_service'] = 5
test.loc[test['current_service'] == 89950168, 'current_service'] = 6
test.loc[test['current_service'] == 99999827, 'current_service'] = 7
test.loc[test['current_service'] == 99999826, 'current_service'] = 8
test.loc[test['current_service'] == 90155946, 'current_service'] = 9
test.loc[test['current_service'] == 99999830, 'current_service'] = 10
test.loc[test['current_service'] == 99999825, 'current_service'] = 11
# # 删除test里current_service的异常值999999
# for i in test['current_service']:
# if(test['current_service'][i] == 999999):
# del test[i, :]
# 删除is_mix_service这一特征
train.drop('is_mix_service',axis=1,inplace=True)
test.drop('is_mix_service',axis=1,inplace=True)
#4个连续float型,total_fee系列(这四个模式相似的特征可以整合重组,形成新的特征)
train.insert(6, 'total_fee_mean', train.iloc[:,2:6].mean(axis=1)) #float64连续型数据
test.insert(6, 'total_fee_mean', test.iloc[:,2:6].mean(axis=1)) #float64连续型数据
train.insert(7, 'total_fee_min', train.iloc[:,2:6].min(axis=1))
test.insert(7, 'total_fee_min', test.iloc[:,2:6].min(axis=1))
del train['1_total_fee']
del train['2_total_fee']
del train['3_total_fee']
del train['4_total_fee']
del test['1_total_fee']
del test['2_total_fee']
del test['3_total_fee']
del test['4_total_fee']
#3个连续float型
#traffic系列,包含month_traffic, last_month_traffic, local_trafffic_month#注意存在拼写错误
train.insert(5, 'traffic_month_sum', train.iloc[:,4]+train.iloc[:,13]) #float64连续型数据
test.insert(5, 'traffic_month_sum', test.iloc[:,4]+test.iloc[:,13]) #float64连续型数据
traffic_month_max = []
for i in range(train.shape[0]):
if train['month_traffic'][i]>train['last_month_traffic'][i]:
traffic_month_max.append(train['month_traffic'][i])
else:
traffic_month_max.append(train['last_month_traffic'][i])
train.insert(6,column='traffic_month_max',value=traffic_month_max)
del train['month_traffic']
del train['last_month_traffic']
traffic_month_max_test = []
for i in range(test.shape[0]):
if test['month_traffic'][i]>test['last_month_traffic'][i]:
traffic_month_max_test.append(test['month_traffic'][i])
else:
traffic_month_max_test.append(test['last_month_traffic'][i])
test.insert(6,column='traffic_month_max',value=traffic_month_max_test)
del test['month_traffic']
del test['last_month_traffic']
#3个float型,caller_time系列
#all_data['service2_caller_time'].dtype
train.insert(17, 'call_time_max', train.iloc[:,15:17].max(axis=1))
train.insert(18, 'call_time_min', train.iloc[:,15:17].min(axis=1))
train.insert(19, 'call_time_local', train.iloc[:,18]+train.iloc[:,14])
test.insert(17, 'call_time_max', test.iloc[:,15:17].max(axis=1))
test.insert(18, 'call_time_min', test.iloc[:,15:17].min(axis=1))
test.insert(19, 'call_time_local', test.iloc[:,18]+test.iloc[:,14])
#缴费
#保留pay_num
train.insert(13, 'pay_num_pertimes', train.iloc[:,11]/train.iloc[:,12])
test.insert(13, 'pay_num_pertimes', test.iloc[:,11]/test.iloc[:,12])
del train['pay_times']
del test['pay_times']
#舍弃:complaint_level,former_complaint_num,former_complaint_fee,net_service(相关性很低)
del train['complaint_level']
del train['former_complaint_num']
del train['former_complaint_fee']
del train['net_service']
del test['complaint_level']
del test['former_complaint_num']
del test['former_complaint_fee']
del test['net_service']
#2个int型:age和gender,相关系数不高,但按常理似乎应该保留。
test['age'] = [float(test['age'][i]) for i in test['age']]
test['gender'] = [float(test['gender'][i]) for i in test['gender']]
# for i in :
# data[i] = data[i].astype(float)
# user_id删除
del train['user_id']
del test['user_id']
print(train.info())
print(test.info())
# 将DataFrame的数据转换成Array
train_data = (train.dropna()).values
test_data = (test.dropna()).values
# 前600000行的data_ar作为训练数据,之后的作为测试数据来训练模型
data_train = train_data
data_test = test_data
print ("Number of features used for training:\t",len(data_train),
"\nNumber of features used for testing:\t",len(data_test))
# 开始使用随机森林分类器
clf = RandomForestClassifier(n_estimators=100) # 定义决策树的个数为100
# # 用全部训练数据来做训练
# # target = data_train[:,21].ravel()
# # train = data_train[:,0:21]
model: object = clf.fit(data_train.astype('int')[:,0:21], data_train.astype('int')[:,21])
# # 用测试集数据来预测最终结果
pred = clf.predict(test_data[:,0:21])
# 计算准确度
acc = np.mean(pred == test_data[:,21].ravel()) *100
print("Accuracy of pure RandomForest classifier"
": \t", acc, "%")
# 计算得分
print("F1_score", f1_score(test_data[:,21], pred, average = 'weighted'))
# 1个二值离散变量,int型:service_type,是一个超强的分类特征。可以根据它把数据分为两部分
train_service1 = []
train_service4 = [] # 划分后的数据集
test_service1 = []
test_service4 = [] # 划分后的数据集
#print(len(data_ar))
for i in range(0,len(train.dropna())):
if train_data[i][0] == 1:
train_service1.append(train_data[i])
else:
train_service4.append(train_data[i])
for i in range(0,len(test.dropna())):
if test_data[i][0] == 1:
test_service1.append(test_data[i])
else:
test_service4.append(test_data[i])
print ("Number of features used for training(s1):\t",len(train_service1),
"\nNumber of features used for testing(s1):\t",len(test_service1),
"\nNumber of features used for training(s4):\t", len(train_service4),
"\nNumber of features used for testing(s4):\t", len(test_service4),)
# 将DataFrame的数据转换成Array
data_train_s1 = np.array(train_service1)
data_test_s1 = np.array(test_service1)
data_train_s4 = np.array(train_service4)
data_test_s4 = np.array(test_service4)
# 开始使用随机森林分类器
clf = RandomForestClassifier(n_estimators=100) # 定义决策树的个数为100
# # 用全部训练数据来做训练
# # target = data_train[:,21].ravel()
# # train = data_train[:,0:21]
model = clf.fit(data_train_s1.astype('int')[:,0:21], data_train_s1.astype('int')[:,21])
# # 用测试集数据来预测最终结果
pred = clf.predict(data_test_s1[:,0:21])
# 计算准确度
acc = np.mean(pred == data_test_s1[:,21].ravel()) *100
print("Accuracy of pure RandomForest classifier"
": \t", acc, "%")
# 计算得分
print("F1_score", f1_score(data_test_s1[:,21], pred, average = 'weighted'))
# 开始使用随机森林分类器
clf = RandomForestClassifier(n_estimators=100) # 定义决策树的个数为100
# # 用全部训练数据来做训练
# # target = data_train[:,21].ravel()
# # train = data_train[:,0:21]
model = clf.fit(data_train_s4.astype('int')[:,0:21], data_train_s4.astype('int')[:,21])
# # 用测试集数据来预测最终结果
pred = clf.predict(data_test_s4[:,0:21])
# 计算准确度
acc = np.mean(pred == data_test_s4[:,21].ravel()) *100
print("Accuracy of pure RandomForest classifier"
": \t", acc, "%")
# 计算得分
print("F1_score", f1_score(data_test_s4[:,21], pred, average = 'weighted'))
# 存放不同参数取值,以及对应的精度,每一个元素都是一个三元组(a, b, c)
results = []
# 最小叶子结点的参数取值
sample_leaf_options = list(range(1, 500, 3))
# 决策树个数参数取值
n_estimators_options = list(range(1, 1000, 5))
groud_truth = data_test_s4[:,21].ravel()
for leaf_size in sample_leaf_options:
for n_estimators_size in n_estimators_options:
alg = RandomForestClassifier(min_samples_leaf=leaf_size, n_estimators=n_estimators_size, random_state=50)
alg.fit(data_train_s4.astype('int')[:,0:21], data_train_s4.astype('int')[:,21])
predict = alg.predict(data_test_s4[:,0:21])
# 用一个三元组,分别记录当前的 min_samples_leaf,n_estimators, 和在测试数据集上的精度
results.append((leaf_size, n_estimators_size, (groud_truth == predict).mean()))
# 真实结果和预测结果进行比较,计算准确率
print((groud_truth == predict).mean())
# 打印精度最大的那一个三元组
print(max(results, key=lambda x: x[2])) | [
"sklearn.ensemble.RandomForestClassifier",
"warnings.filterwarnings",
"pandas.read_csv",
"sklearn.metrics.f1_score",
"numpy.array",
"seaborn.set"
] | [((241, 266), 'seaborn.set', 'sns.set', ([], {'color_codes': '(True)'}), '(color_codes=True)\n', (248, 266), True, 'import seaborn as sns\n'), ((284, 317), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (307, 317), False, 'import warnings\n'), ((350, 393), 'pandas.read_csv', 'pd.read_csv', (['"""E:\\\\机器学习课设数据集\\\\train_all.csv"""'], {}), "('E:\\\\机器学习课设数据集\\\\train_all.csv')\n", (361, 393), True, 'import pandas as pd\n'), ((401, 442), 'pandas.read_csv', 'pd.read_csv', (['"""E:\\\\机器学习课设数据集\\\\train_2.csv"""'], {}), "('E:\\\\机器学习课设数据集\\\\train_2.csv')\n", (412, 442), True, 'import pandas as pd\n'), ((5682, 5722), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (5704, 5722), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((7049, 7073), 'numpy.array', 'np.array', (['train_service1'], {}), '(train_service1)\n', (7057, 7073), True, 'import numpy as np\n'), ((7089, 7112), 'numpy.array', 'np.array', (['test_service1'], {}), '(test_service1)\n', (7097, 7112), True, 'import numpy as np\n'), ((7129, 7153), 'numpy.array', 'np.array', (['train_service4'], {}), '(train_service4)\n', (7137, 7153), True, 'import numpy as np\n'), ((7169, 7192), 'numpy.array', 'np.array', (['test_service4'], {}), '(test_service4)\n', (7177, 7192), True, 'import numpy as np\n'), ((7216, 7256), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (7238, 7256), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((7752, 7792), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (7774, 7792), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((6129, 6181), 'sklearn.metrics.f1_score', 'f1_score', (['test_data[:, 21]', 'pred'], {'average': '"""weighted"""'}), "(test_data[:, 21], pred, average='weighted')\n", (6137, 6181), False, 'from sklearn.metrics import f1_score\n'), ((7667, 7722), 'sklearn.metrics.f1_score', 'f1_score', (['data_test_s1[:, 21]', 'pred'], {'average': '"""weighted"""'}), "(data_test_s1[:, 21], pred, average='weighted')\n", (7675, 7722), False, 'from sklearn.metrics import f1_score\n'), ((8203, 8258), 'sklearn.metrics.f1_score', 'f1_score', (['data_test_s4[:, 21]', 'pred'], {'average': '"""weighted"""'}), "(data_test_s4[:, 21], pred, average='weighted')\n", (8211, 8258), False, 'from sklearn.metrics import f1_score\n'), ((8589, 8693), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'min_samples_leaf': 'leaf_size', 'n_estimators': 'n_estimators_size', 'random_state': '(50)'}), '(min_samples_leaf=leaf_size, n_estimators=\n n_estimators_size, random_state=50)\n', (8611, 8693), False, 'from sklearn.ensemble import RandomForestClassifier\n')] |
from sim3d import simulate_img3d
import h5py
from mainviewer import mainViewer
import numpy as np
import cv2
from moviepy.editor import ImageSequenceClip
from random import randrange, uniform
import math
from skimage.util.shape import view_as_blocks
from skimage import io
def write_hdf5(dataset, n, canvas, positions=False, metadata=None):
path = f'Data/{dataset}.hdf5'
with h5py.File(path, "a") as f:
dset = f.create_dataset(name=str(n), shape=canvas.shape, dtype='uint8', data = canvas, compression=1)
if positions: dset.attrs['positions'] = positions
def read_hdf5(dataset, n, positions=False):
path = f'Data/{dataset}.hdf5'
with h5py.File(path, "r") as f:
canvas = f[str(n)]
if positions:
positions = f[str(n)].attrs['positions']
return np.array(canvas), np.array(positions)
else:
return np.array(canvas)
def make_gif(canvas, file_name, fps = 7, positions=None, scale=None):
#decompose grayscale numpy array into RGB
new_canvas = np.array([np.stack((img,)*3, axis=-1) for img in canvas])
if positions is not None:
for z, y, x in positions:
z, y, x = math.floor(z), int(y), int(x)
if z==31:z=30
cv2.rectangle(new_canvas[z], (x - 1, y - 1), (x + 1, y + 1), (250,0,0), -1)
# cv2.circle(new_canvas[z], (x, y), 5, (0, 250, 0), 1)
if scale is not None:
im = new_canvas[0]
width = int(im.shape[1] * scale / 100)
height = int(im.shape[0] * scale / 100)
dim = (width, height)
# resize image
resized = [cv2.resize(img, dim, interpolation = cv2.INTER_AREA) for img in new_canvas]
new_canvas = resized
# write_gif(new_canvas, file_name, fps = fps)
clip = ImageSequenceClip(list(new_canvas), fps=fps)
clip.write_gif(file_name, fps=fps)
if __name__ == "__main__":
canvas_size=(32,128,128)
dataset = 'TF'
n_samples = 2
# for n in range(1,n_samples+1):
# print(f'{n}/{n_samples}')
# zoom = 0.75
# xykernel = randrange(1,4,2)
# gauss = (randrange(5,8,2),xykernel,xykernel)
# gauss = (5,1,1)
# brightness = randrange(180,250)
# noise = uniform(0.002, 0.008)
# canvas, positions, label = simulate_img3d(canvas_size, zoom, gauss, noise=noise, volfrac = 0.3)
# # mainViewer(canvas, positions=positions)
# # mainViewer(label, positions=positions)
# # write_hdf5(dataset, n, canvas, positions)
# # write_hdf5(dataset+'_labels', n, label)
# canvas, positions, label = None, None, None
for n in range(1,4):
canvas, positions = read_hdf5(dataset, n, positions=True)
label = read_hdf5(dataset+'_labels', n)
make_gif(canvas, f'output/Example/{dataset}_scan_{n}.gif', fps = 7, scale=300)
make_gif(label, f'output/Example/{dataset}_scan_{n}labels.gif', fps = 7, scale=300)
| [
"numpy.stack",
"h5py.File",
"math.floor",
"numpy.array",
"cv2.rectangle",
"cv2.resize"
] | [((379, 399), 'h5py.File', 'h5py.File', (['path', '"""a"""'], {}), "(path, 'a')\n", (388, 399), False, 'import h5py\n'), ((644, 664), 'h5py.File', 'h5py.File', (['path', '"""r"""'], {}), "(path, 'r')\n", (653, 664), False, 'import h5py\n'), ((820, 836), 'numpy.array', 'np.array', (['canvas'], {}), '(canvas)\n', (828, 836), True, 'import numpy as np\n'), ((979, 1008), 'numpy.stack', 'np.stack', (['((img,) * 3)'], {'axis': '(-1)'}), '((img,) * 3, axis=-1)\n', (987, 1008), True, 'import numpy as np\n'), ((1146, 1223), 'cv2.rectangle', 'cv2.rectangle', (['new_canvas[z]', '(x - 1, y - 1)', '(x + 1, y + 1)', '(250, 0, 0)', '(-1)'], {}), '(new_canvas[z], (x - 1, y - 1), (x + 1, y + 1), (250, 0, 0), -1)\n', (1159, 1223), False, 'import cv2\n'), ((1463, 1513), 'cv2.resize', 'cv2.resize', (['img', 'dim'], {'interpolation': 'cv2.INTER_AREA'}), '(img, dim, interpolation=cv2.INTER_AREA)\n', (1473, 1513), False, 'import cv2\n'), ((763, 779), 'numpy.array', 'np.array', (['canvas'], {}), '(canvas)\n', (771, 779), True, 'import numpy as np\n'), ((781, 800), 'numpy.array', 'np.array', (['positions'], {}), '(positions)\n', (789, 800), True, 'import numpy as np\n'), ((1096, 1109), 'math.floor', 'math.floor', (['z'], {}), '(z)\n', (1106, 1109), False, 'import math\n')] |
# This code is adopted from "https://github.com/Line290/FeatureAttack"
from __future__ import print_function
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd.gradcheck import zero_gradients
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
import sys
import datetime
import random
from models.wideresnet import *
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--model-path', type=str, help='model path')
# dataset dependent
parser.add_argument('--num_classes', default=10, type=int, help='num classes')
parser.add_argument('--dataset', default='cifar10', type=str,
help='dataset') # concat cascade
parser.add_argument('--batch_size_test',
default=200,
type=int,
help='batch size for testing')
parser.add_argument('--image_size', default=32, type=int, help='image size')
args = parser.parse_args()
if args.dataset == 'cifar10':
print('------------cifar10---------')
args.num_classes = 10
args.image_size = 32
epsilon = 8.0/255.0
elif args.dataset == 'cifar100':
print('----------cifar100---------')
args.num_classes = 100
args.image_size = 32
epsilon = 8.0/255.0
elif args.dataset == 'svhn':
print('------------svhn10---------')
args.num_classes = 10
args.image_size = 32
epsilon = 8.0/255.0
device = 'cuda' if torch.cuda.is_available() else 'cpu'
start_epoch = 0
# Data
print('==> Preparing data..')
if args.dataset == 'cifar10' or args.dataset == 'cifar100':
transform_test = transforms.Compose([
transforms.ToTensor(),
])
elif args.dataset == 'svhn':
transform_test = transforms.Compose([
transforms.ToTensor(),
])
if args.dataset == 'cifar10':
testset = torchvision.datasets.CIFAR10(root='../data',
train=False,
download=True,
transform=transform_test)
elif args.dataset == 'cifar100':
testset = torchvision.datasets.CIFAR100(root='../data',
train=False,
download=True,
transform=transform_test)
elif args.dataset == 'svhn':
testset = torchvision.datasets.SVHN(root='../data',
split='test',
download=True,
transform=transform_test)
testloader = torch.utils.data.DataLoader(testset,
batch_size=10000,
shuffle=False,
num_workers=20)
basic_net = WideResNet(depth=28,
num_classes=args.num_classes,
widen_factor=10)
net = basic_net.to(device)
net.load_state_dict(torch.load(args.model_path))
criterion = nn.CrossEntropyLoss()
config_feature_attack = {
'train': False,
'epsilon': epsilon,
'num_steps': 50,
'step_size': 1.0 / 255.0,
'random_start': True,
'early_stop': True,
'num_total_target_images': args.batch_size_test,
}
def pair_cos_dist(x, y):
cos = nn.CosineSimilarity(dim=-1, eps=1e-6)
c = torch.clamp(1 - cos(x, y), min=0)
return c
def attack(model, inputs, target_inputs, y, config):
step_size = config['step_size']
epsilon = config['epsilon']
num_steps = config['num_steps']
random_start = config['random_start']
early_stop = config['early_stop']
model.eval()
x = inputs.detach()
if random_start:
x = x + torch.zeros_like(x).uniform_(-epsilon, epsilon)
x = torch.clamp(x, 0.0, 1.0)
target_logits, target_feat = model(target_inputs, return_feature=True)
target_feat = target_feat.detach()
for i in range(num_steps):
x.requires_grad_()
zero_gradients(x)
if x.grad is not None:
x.grad.data.fill_(0)
logits_pred, feat = model(x, return_feature=True)
preds = logits_pred.argmax(1)
if early_stop:
num_not_corr = (preds != y).sum().item()
if num_not_corr > 0:
break
inver_loss = pair_cos_dist(feat, target_feat)
adv_loss = inver_loss.mean()
adv_loss.backward()
x_adv = x.data - step_size * torch.sign(x.grad.data)
x_adv = torch.min(torch.max(x_adv, inputs - epsilon), inputs + epsilon)
x_adv = torch.clamp(x_adv, 0.0, 1.0)
x = Variable(x_adv)
return x.detach(), preds
target_images_size = args.batch_size_test
print('target batch size is: ', target_images_size)
num_total_target_images = config_feature_attack['num_total_target_images']
net.eval()
untarget_success_count = 0
target_success_count = 0
total = 0
# load all test data
all_test_data, all_test_label = None, None
for test_data, test_label in testloader:
all_test_data, all_test_label = test_data, test_label
print(all_test_data.size(), all_test_label.size())
num_eval_imgs = all_test_data.size(0)
per_image_acc = np.zeros([num_eval_imgs])
for clean_idx in range(num_eval_imgs):
input, label_cpu = all_test_data[clean_idx].unsqueeze(0), all_test_label[clean_idx].unsqueeze(0)
start_time = time.time()
batch_idx_list = {}
other_label_test_idx = (all_test_label != label_cpu[0])
other_label_test_data = all_test_data[other_label_test_idx]
other_label_test_label = all_test_label[other_label_test_idx]
num_other_label_img = other_label_test_data.size(0)
# Setting candidate targeted images
candidate_indices = torch.zeros(num_total_target_images).long().random_(0, num_other_label_img)
num_batches = int(math.ceil(num_total_target_images / target_images_size))
# print(other_label_test_idx.size(), other_label_test_data.size(), other_label_test_label.size())
# Init index of image which be attacked successfully
adv_idx = 0
for i in range(num_batches):
bstart = i * target_images_size
bend = min(bstart + target_images_size, num_total_target_images)
target_inputs = other_label_test_data[candidate_indices[bstart:bend]]
target_labels_cpu = other_label_test_label[candidate_indices[bstart:bend]]
target_inputs, target_labels = target_inputs.to(device), target_labels_cpu.to(device)
input, label = input.to(device), label_cpu.to(device)
inputs = input.repeat(target_images_size, 1, 1, 1)
labels = label.repeat(target_images_size)
# print(inputs.size(), labels)
# print(target_inputs.size(), target_labels)
x_batch_adv, predicted = attack(net, inputs, target_inputs, labels, config_feature_attack)
print((x_batch_adv - inputs).max(), (x_batch_adv - inputs).min())
# print(predicted.size())
not_correct_idices = (predicted != labels).nonzero().view(-1)
not_corrent_num = not_correct_idices.size(0)
attack_success_num = predicted.eq(target_labels).sum().item()
per_image_acc[clean_idx] = (not_corrent_num == 0)
# At least one misclassified
if not_corrent_num != 0:
untarget_success_count += 1
if attack_success_num != 0:
target_success_count += 1
adv_idx = not_correct_idices[0]
break
total += 1
duration = time.time() - start_time
#x_adv.append(x_batch_adv[adv_idx].unsqueeze(0).cpu())
print(
"step %d, duration %.2f, aver untargeted attack success %.2f, aver targeted attack success %.2f"
% (clean_idx, duration, 100. * untarget_success_count / total, 100.*target_success_count / total))
sys.stdout.flush()
acc = 100. * untarget_success_count / total
print('Val acc:', acc)
print('Storing examples')
| [
"argparse.ArgumentParser",
"torchvision.datasets.CIFAR10",
"torch.nn.CosineSimilarity",
"sys.stdout.flush",
"torch.autograd.gradcheck.zero_gradients",
"torchvision.datasets.SVHN",
"torch.utils.data.DataLoader",
"torch.load",
"torch.sign",
"torch.zeros",
"torch.zeros_like",
"torch.autograd.Vari... | [((537, 562), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (560, 562), False, 'import argparse\n'), ((2735, 2824), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': '(10000)', 'shuffle': '(False)', 'num_workers': '(20)'}), '(testset, batch_size=10000, shuffle=False,\n num_workers=20)\n', (2762, 2824), False, 'import torch\n'), ((3162, 3183), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3181, 3183), True, 'import torch.nn as nn\n'), ((5310, 5335), 'numpy.zeros', 'np.zeros', (['[num_eval_imgs]'], {}), '([num_eval_imgs])\n', (5318, 5335), True, 'import numpy as np\n'), ((1567, 1592), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1590, 1592), False, 'import torch\n'), ((1953, 2055), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""../data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_test'}), "(root='../data', train=False, download=True,\n transform=transform_test)\n", (1981, 2055), False, 'import torchvision\n'), ((3120, 3147), 'torch.load', 'torch.load', (['args.model_path'], {}), '(args.model_path)\n', (3130, 3147), False, 'import torch\n'), ((3447, 3485), 'torch.nn.CosineSimilarity', 'nn.CosineSimilarity', ([], {'dim': '(-1)', 'eps': '(1e-06)'}), '(dim=-1, eps=1e-06)\n', (3466, 3485), True, 'import torch.nn as nn\n'), ((5493, 5504), 'time.time', 'time.time', ([], {}), '()\n', (5502, 5504), False, 'import time\n'), ((7902, 7920), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7918, 7920), False, 'import sys\n'), ((2228, 2331), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""../data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_test'}), "(root='../data', train=False, download=True,\n transform=transform_test)\n", (2257, 2331), False, 'import torchvision\n'), ((3917, 3941), 'torch.clamp', 'torch.clamp', (['x', '(0.0)', '(1.0)'], {}), '(x, 0.0, 1.0)\n', (3928, 3941), False, 'import torch\n'), ((4124, 4141), 'torch.autograd.gradcheck.zero_gradients', 'zero_gradients', (['x'], {}), '(x)\n', (4138, 4141), False, 'from torch.autograd.gradcheck import zero_gradients\n'), ((4709, 4737), 'torch.clamp', 'torch.clamp', (['x_adv', '(0.0)', '(1.0)'], {}), '(x_adv, 0.0, 1.0)\n', (4720, 4737), False, 'import torch\n'), ((4750, 4765), 'torch.autograd.Variable', 'Variable', (['x_adv'], {}), '(x_adv)\n', (4758, 4765), False, 'from torch.autograd import Variable\n'), ((7585, 7596), 'time.time', 'time.time', ([], {}), '()\n', (7594, 7596), False, 'import time\n'), ((1769, 1790), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1788, 1790), True, 'import torchvision.transforms as transforms\n'), ((2504, 2604), 'torchvision.datasets.SVHN', 'torchvision.datasets.SVHN', ([], {'root': '"""../data"""', 'split': '"""test"""', 'download': '(True)', 'transform': 'transform_test'}), "(root='../data', split='test', download=True,\n transform=transform_test)\n", (2529, 2604), False, 'import torchvision\n'), ((4639, 4673), 'torch.max', 'torch.max', (['x_adv', '(inputs - epsilon)'], {}), '(x_adv, inputs - epsilon)\n', (4648, 4673), False, 'import torch\n'), ((1878, 1899), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1897, 1899), True, 'import torchvision.transforms as transforms\n'), ((4589, 4612), 'torch.sign', 'torch.sign', (['x.grad.data'], {}), '(x.grad.data)\n', (4599, 4612), False, 'import torch\n'), ((3857, 3876), 'torch.zeros_like', 'torch.zeros_like', (['x'], {}), '(x)\n', (3873, 3876), False, 'import torch\n'), ((5840, 5876), 'torch.zeros', 'torch.zeros', (['num_total_target_images'], {}), '(num_total_target_images)\n', (5851, 5876), False, 'import torch\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import pandas as pd
import urllib
import numpy as np
import json
from tqdm.autonotebook import tqdm
#%matplotlib inline
tqdm.pandas()#tqdm)
# import jellyfish
import dask.dataframe as dd
# from dask.multiprocessing import get
from importlib import reload
import AddressCleanserUtils
reload(AddressCleanserUtils)
from AddressCleanserUtils import *
# import multiprocessing
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# In[ ]:
# In[3]:
starting_time = datetime.now()
# In[4]:
config_file = "config_batch"
address_file = "./address.csv.gz"
sample_size = None
import sys, getopt
opts, args = getopt.getopt(sys.argv[1:],"f:c:a:s:vq",[])
for opt, arg in opts:
if opt == "-c":
config_file = arg
if opt == "-a":
address_file = arg
if opt == "-f":
print("Run in jupyter ...", arg)
AddressCleanserUtils.within_jupyter=True
if opt == "-s":
sample_size = int(arg)
if opt == "-v": # Verbose
logger.setLevel(logging.DEBUG)
if opt == "-q": # quiet
logger.setLevel(logging.WARNING)
# In[24]:
if AddressCleanserUtils.within_jupyter :
log("Running in Jupyter, using hardcoded parameters")
# config_file = "config_best"
# address_file = "./best.csv.gz"
config_file = "config_batch"
address_file = "./address.csv.gz"
sample_size = 10000
AddressCleanserUtils.photon_host = "127.0.0.1:2322"
AddressCleanserUtils.libpostal_host = "172.18.0.3:6060"
# with_dask=False
# %matplotlib inline
# In[6]:
import importlib
log(f"Loading config file {config_file}")
config_module = importlib.import_module(config_file)
# In[7]:
# Check that some required variables are present in the configuration file
field_names = ["street_field","housenbr_field","city_field","postcode_field", "country_field", "addr_key_field"]
#other_var_names = ["photon_host","osm_host","libpostal_host", "regex_replacements"]
other_var_names = ["regex_replacements"]
for var_name in field_names + other_var_names:
assert var_name in dir(config_module), var_name + " not defined in config module " + config_file
# In[ ]:
# In[8]:
AddressCleanserUtils.street_field = config_module.street_field
AddressCleanserUtils.housenbr_field = config_module.housenbr_field
AddressCleanserUtils.city_field = config_module.city_field
AddressCleanserUtils.postcode_field = config_module.postcode_field
AddressCleanserUtils.country_field = config_module.country_field
AddressCleanserUtils.addr_key_field = config_module.addr_key_field
AddressCleanserUtils.regex_replacements = config_module.regex_replacements
AddressCleanserUtils.use_osm_parent = use_osm_parent
AddressCleanserUtils.with_rest_libpostal = with_rest_libpostal
# In[9]:
AddressCleanserUtils.pbar.register()
# In[10]:
# Check that Nominatim server is running properly
try:
osm = get_osm("Bruxelles")
assert osm[0]["namedetails"]["name:fr"] == "Bruxelles"
vlog("OSM working properly")
except Exception as e:
print("OSM not up & running")
print("OSM host: ", AddressCleanserUtils.osm_host)
raise e
# In[15]:
# In old version of Nominatim, page "details.php" could NOT return a JSON result, allowing to get place details from a place id
# In newer version, this has been added, allowing to get details about the parent of a place
# Is case "use_osm_parent" is true, check that "details.php" works correctly
if AddressCleanserUtils.use_osm_parent:
try :
osm_det = get_osm_details(osm[0]["place_id"])
assert osm_det["place_id"] == osm[0]["place_id"]
vlog("OSM details working properly")
except Exception as e:
print("OSM details not working")
print("OSM host: ", AddressCleanserUtils.osm_host)
raise e
# In[18]:
# Check that Photon server is running properly
try:
ph = get_photon("Bruxelles")
assert ph["features"][0]["properties"]["name"] == "Brussels"
vlog("Photon working properly")
except Exception as e:
print("Photon not up & running ; Start it with 'nohup java -jar photon-*.jar &'")
print("Photon host: ", AddressCleanserUtils.photon_host)
raise e
# In[25]:
# Check that Libpostal is running properly
try:
lpost = parse_address("Bruxelles")
assert lpost[0][0] == "bruxelles"
vlog("Libpostal working properly")
except Exception as e:
print("Libpostal not up & running ")
print("Libpostal: ", AddressCleanserUtils.libpostal_host)
raise e
# # Data preparation
# In[ ]:
# Get the addresses dataframe. Config module has to contain a "get_addresses(filename)" function, returning a dataframe, with
# column names defined by variables (defined in config module) : street_field, housenbr_field, city_field, postcode_field , addr_key_field
log("Getting addresses")
addresses = config_module.get_addresses(address_file)
log(f"Got {addresses.shape[0]} addresses")
log(addresses)
# In[14]:
if sample_size and sample_size < addresses.shape[0]:
log(f"Keep a sample of size {sample_size}")
addresses = addresses.sample(sample_size)
# In[15]:
# Check that all required fields are present in addresses dataframe
for field in field_names:
assert config_module.__getattribute__(field) in addresses, f"Field {field} missing in data !"
# In[16]:
# Check that the address identifier defined in config_module.addr_key_field is unique
assert addresses[addresses[config_module.addr_key_field].duplicated()].shape[0] == 0, "Key should be unique"
# In[17]:
vlog("Stripping and upper casing...")
addresses = addresses.apply(lambda col: col.fillna("").astype(str).str.strip().str.upper() if col.dtype.kind=='O' else col.astype(str) )
# # Main loop
# In[18]:
transformers_sequence = [ ["orig"],
["regex[init]"],
["nonum"],
["libpostal", "regex[lpost]"],
["libpostal", "regex[lpost]", "nonum"],
["libpostal", "regex[lpost]", "photon"],
["libpostal", "regex[lpost]", "photon", "nonum"],
["photon"],
["photon", "nonum"],
["nostreet"]
]
# In[19]:
def main_loop(chunk):
"""
Method "main_loop" processes the full cleansing sequence on a chunk of addresses :
- Apply a sequence of transformers (possibly empty)
- Sent the (transformed) addresses to Nominatim
- Parse and Check the results
- For the addresses with no (accepted) result, try the next sequence of transformers
"""
log(f"Chunk size: {chunk.shape[0]}")
vlog(chunk)
osm_addresses = pd.DataFrame()
rejected_addresses = pd.DataFrame()
stats = []
init_chunk_size = chunk.shape[0]
for transformers in transformers_sequence:
vlog("--------------------------")
vlog(f"| Transformers : { ';'.join(transformers) }")
vlog("--------------------------")
# display(chunk)
osm_results, rejected, step_stats = transform_and_process(chunk, transformers, config_module.addr_key_field,
config_module.street_field, config_module.housenbr_field,
config_module.city_field, config_module.postcode_field,
config_module.country_field,
check_osm_results=check_osm_results)
osm_addresses = osm_addresses.append(osm_results, sort=False).drop_duplicates()
rejected_addresses = rejected_addresses.append(rejected, sort=False).drop_duplicates()
vlog("Results: ")
vlog(osm_results.head())
vlog(osm_results.shape)
vlog(f"Match rate so far: {osm_addresses.shape[0] / init_chunk_size if init_chunk_size > 0 else '(empty chunk size)'}")
stats.append(step_stats)
vlog(step_stats)
chunk = chunk[~chunk[config_module.addr_key_field].isin(osm_results[config_module.addr_key_field])].copy()
ts = AddressCleanserUtils.timestats
tot = np.sum([ts[key] for key in ts])
if tot.total_seconds()>0:
for key in ts:
vlog(f"{key:12}: {ts[key]} ({100*ts[key]/tot:.3} %)")
vlog("")
vlog("")
vlog("####################")
vlog("")
vlog("")
log("Chunk results: ")
log(osm_addresses)
log(f"Chunk match rate: {(osm_addresses.shape[0] / init_chunk_size) if init_chunk_size > 0 else '(empty chunk size)'}")
log(pd.DataFrame(stats))
return osm_addresses, rejected_addresses, stats
# In[ ]:
# In[20]:
# Compute the number of chunks
min_nb_chunks= 4
if addresses.shape[0] > max_chunk_size * min_nb_chunks:
chunk_size = max_chunk_size
elif addresses.shape[0] < min_chunk_size * min_nb_chunks:
chunk_size = min_chunk_size
else:
chunk_size = int(np.sqrt(max_chunk_size *min_chunk_size))
log(f"Chunk_size: {chunk_size}")
# Do the main processing, with dask or simply in sequential chunks.
#
# Processing a chunk may require at some point a huge amount of memory. A single chunk with a few millions addresses may result in memory error ; this is why we split the main addresses dataframe is smaller chunks.
#
# In[21]:
stats = []
if with_dask :
from dask.diagnostics import Profiler, ResourceProfiler
#AddressCleanserUtils.with_dask = False
# Sorting : allow to increase the probability to have duplicates within a chunk
dd_to_process = dd.from_pandas(addresses.sort_values([config_module.postcode_field, config_module.street_field]).reset_index(drop=True),
chunksize=chunk_size)
dask_task = dd_to_process.map_partitions(main_loop)
with Profiler() as prof, ResourceProfiler() as rprof :
res = dask_task.compute(scheduler='processes')
log("All chunks done, gather all results...")
osm_addresses = pd.concat([chunk_osm_addresses for (chunk_osm_addresses, _, _) in res], sort=False)
rejected_addresses = pd.concat([chunk_rejected_addresses for (_, chunk_rejected_addresses, _) in res], sort=False)
for (_, _, chunk_stats) in res:
stats.extend(chunk_stats)
log(f"Global match rate: { osm_addresses.shape[0]/addresses.shape[0] } ")
else:
#AddressCleanserUtils.with_dask = True
osm_addresses = pd.DataFrame()
rejected_addresses = pd.DataFrame()
chunks_boundaries = range(chunk_size, addresses.shape[0] , chunk_size)
for chunk in tqdm(np.array_split(addresses.sort_values([config_module.postcode_field, config_module.street_field]), chunks_boundaries)):
chunk_osm_addresses, chunk_rejected_addresses, chunk_stats = main_loop(chunk)
osm_addresses = osm_addresses.append(chunk_osm_addresses, sort=False).drop_duplicates()
rejected_addresses = rejected_addresses.append(chunk_rejected_addresses, sort=False).drop_duplicates()
log(f"Global match rate so far: {osm_addresses.shape[0]/addresses.shape[0]}")
stats.extend(chunk_stats)
# In[22]:
# inclusion_test("NEU", "NEUCHATEAU")
# In[23]:
addresses
# In[24]:
# get_osm("6840 NEUFCHÂTEAU")
# In[25]:
if with_dask:
from dask.diagnostics import visualize
from bokeh.io import output_notebook, output_file
output_file("dask_stats.html")
# output_notebook()
visualize([prof, rprof])
# In[26]:
# osm_addresses.SIM_street_which.value_counts() /osm_addresses.shape[0] #.plot.pie()
# # Rejected addresses
# Give some statistics about rejected adresses.
# "rejected_addresses" contains two types of rejected addresses :
# - rejected_addresses["reject_reason"] == "mismatch" : by comparing field by field input address and output address, this addresses has been rejected
# - rejected_addresses["reject_reason"] == "tail" : when OSM returns several results, only one is kept in "osm_addresses", all the others are put in rejected_addresses
#
# Note that an addresse may have been rejected at a specific step (for a giver sequence of transformer), but not at another one.
# "rejected_addresses" may then contain a lot of addresses for which a result has been accepted further on.
#
# "rejected_addresses_final" contains the only addresses for which all results have been rejected.
#
# In[27]:
rejected_addresses_final = rejected_addresses[rejected_addresses["reject_reason"] == "mismatch"]
rejected_addresses_final = rejected_addresses_final[~rejected_addresses_final[config_module.addr_key_field].isin(osm_addresses[config_module.addr_key_field])]
# Needed with check_with_transformed = True (but doesn't hurt if not)
rejected_addresses_final = rejected_addresses_final.drop([config_module.street_field,
config_module.housenbr_field,
config_module.postcode_field,
config_module.city_field,
config_module.country_field],
axis=1
)
# print(rejected_addresses.keys())
# print(osm_addresses.keys())
# print(rejected_addresses.keys() & osm_addresses.keys())
rejected_addresses_final = rejected_addresses_final.merge(addresses).sort_values(["SIM_street", config_module.addr_key_field])[["method",
config_module.addr_key_field, "osm_addr_in",
config_module.street_field, config_module.housenbr_field, config_module.postcode_field, config_module.city_field, config_module.country_field,
"addr_out_street", "addr_out_city", "addr_out_number", "addr_out_postcode", "addr_out_other", "SIM_street", "SIM_zip"]].drop_duplicates()
log("Rejected addresses: ")
log(rejected_addresses_final)
# In[28]:
log(f"Number of unique rejected addresses: {rejected_addresses_final[config_module.addr_key_field].nunique()}")
# In[29]:
log(f"Number of unique city-streets in rejected addresses: {rejected_addresses_final[[config_module.postcode_field, config_module.street_field]].drop_duplicates().shape[0]}")
# In[30]:
rejected_addresses_final[rejected_addresses_final.addr_out_street.isnull()]
# In[31]:
rejected_addresses_final[rejected_addresses_final.addr_out_street.notnull()]#.drop(["method"], axis=1).drop_duplicates()
# In[32]:
# Swap street - city
log("Rejected addresses, but where it might have a swap between street and city")
str_cmp= street_compare(rejected_addresses_final[config_module.street_field], rejected_addresses_final.addr_out_city)
x= rejected_addresses_final[(str_cmp>0.5) & (rejected_addresses_final.addr_out_street.isnull()) & (rejected_addresses_final.SIM_zip >= 0.1)].drop_duplicates(subset=config_module.addr_key_field)
log(x)
log(f"Number of unique addresses: {x[config_module.addr_key_field].nunique()}")
# In[33]:
# Other mismatches
rejected_addresses_final[(str_cmp<=0.5) | (rejected_addresses_final.addr_out_street.notnull()) | (rejected_addresses_final.SIM_zip < 0.1)].drop_duplicates(subset=config_module.addr_key_field)
# # No match
# In[34]:
log("Addresses with no match (but some matches where rejected)")
log(addresses[~addresses[config_module.addr_key_field].isin(osm_addresses[config_module.addr_key_field]) & addresses[config_module.addr_key_field].isin(rejected_addresses[config_module.addr_key_field])])
# In[35]:
rejected_addresses
# In[36]:
log("Addresses with no match at all")
no_match = addresses[~addresses[config_module.addr_key_field].isin(osm_addresses[config_module.addr_key_field]) & ~addresses[config_module.addr_key_field].isin(rejected_addresses[config_module.addr_key_field])]
log(no_match)
# In[37]:
log(f"Number of unique city-streets in no match addresses: {no_match[[config_module.postcode_field, config_module.street_field]].drop_duplicates().shape[0]}")
# In[38]:
log("Main cities in no match addresses: ")
log(no_match[config_module.city_field].value_counts().head(10))
# In[39]:
log("Main streets in no match addresses: ")
log(no_match[config_module.street_field].value_counts().head(10))
# # Extra house number
# In many situation, OSM does not return a correct house number :
# - Either because the building is not known by OSM. In this case, house number is empty in result
# - Or because house number in input also contains information such as box, level...
#
# We then consider that house number is not reliable enough and compute our own house number field, named "extra_house_nbr"
# In[40]:
log("Add extra house number")
osm_addresses = add_extra_house_number(osm_addresses, addresses, street_field=config_module.street_field, housenbr_field=config_module.housenbr_field)
# In[41]:
# osm_addresses.drop("extra_house_nbr", axis=1, inplace=True)
# In[42]:
ex_hs_nb = osm_addresses[[config_module.addr_key_field, "osm_addr_in", "extra_house_nbr", "addr_out_number"]].replace("", np.NaN)
# In[43]:
log("Add new information: ")
log(ex_hs_nb[ex_hs_nb.addr_out_number.isnull() & ex_hs_nb.extra_house_nbr.notnull()])
# In[44]:
log("No number at all: ")
log(ex_hs_nb[ex_hs_nb.addr_out_number.isnull() & ex_hs_nb.extra_house_nbr.isnull()])
# In[45]:
log("Agreed: ")
log(ex_hs_nb[ex_hs_nb.addr_out_number.notnull() & ex_hs_nb.extra_house_nbr.notnull() & (ex_hs_nb.addr_out_number == ex_hs_nb.extra_house_nbr)])
# In[46]:
log("Disagreed: ")
log(ex_hs_nb[ex_hs_nb.addr_out_number.notnull() & ex_hs_nb.extra_house_nbr.notnull() & (ex_hs_nb.addr_out_number != ex_hs_nb.extra_house_nbr)])
# In[47]:
log("Error: ") # There were no number in input, but OSM found one
log(ex_hs_nb[ex_hs_nb.addr_out_number.notnull() & ex_hs_nb.extra_house_nbr.isnull()])
# In[48]:
extra_address_stats = {
"New information" : (ex_hs_nb.addr_out_number.isnull() & ex_hs_nb.extra_house_nbr.notnull()).sum(),
"No number at all": (ex_hs_nb.addr_out_number.isnull() & ex_hs_nb.extra_house_nbr.isnull() ).sum(),
"Agree" : (ex_hs_nb.addr_out_number.notnull() & ex_hs_nb.extra_house_nbr.notnull() & (ex_hs_nb.addr_out_number == ex_hs_nb.extra_house_nbr)).sum(),
"Disagree": (ex_hs_nb.addr_out_number.notnull() & ex_hs_nb.extra_house_nbr.notnull() & (ex_hs_nb.addr_out_number != ex_hs_nb.extra_house_nbr)).sum(),
"Error" : (ex_hs_nb.addr_out_number.notnull() & ex_hs_nb.extra_house_nbr.isnull()).sum()
}
extra_address_stats = pd.DataFrame(extra_address_stats, index=["Count"]).T
log(extra_address_stats)
# In[49]:
# extra_address_stats.Count.plot.pie(label="", autopct='%1.1f%%')
# In[50]:
assert extra_address_stats.Count.sum() == osm_addresses.shape[0]
# # Some stats
# In[51]:
_stats = pd.DataFrame(stats)[["method","todo", "sent", "match", "match_26", "reject_rec", "reject_addr", "reject_mism"]]
_stats = _stats.reset_index().groupby("method").sum().reset_index().sort_values("index").drop("index", axis=1)
# In[52]:
assert osm_addresses.shape[0] == _stats["match"].sum()
# In[53]:
log(f"Global match rate : {osm_addresses.shape[0]/addresses.shape[0]}")
# In[54]:
rejected_count = rejected_addresses[~rejected_addresses[config_module.addr_key_field].isin(osm_addresses[config_module.addr_key_field])][config_module.addr_key_field].nunique()
rejected_count
nomatch_count = addresses[~addresses[config_module.addr_key_field].isin(osm_addresses[config_module.addr_key_field]) & ~addresses[config_module.addr_key_field].isin(rejected_addresses[config_module.addr_key_field])].shape[0]
rejected_count, nomatch_count
# In[55]:
#rejected_addresses[~rejected_addresses[config_module.addr_key_field].isin(osm_addresses[config_module.addr_key_field])]
# In[56]:
# osm_addresses[osm_addresses.EntityNumber == "2.227.707.047"]
# In[57]:
missing_address_count = addresses.shape[0] - osm_addresses.shape[0]
assert rejected_count + nomatch_count == missing_address_count
# print("Missing : ", missing_address_count)
# In[58]:
_stats = _stats.append(pd.DataFrame([{"method": "reject", "todo": rejected_count, "match": rejected_count},
{"method": "nomatch", "todo": nomatch_count, "match": nomatch_count},
]), sort=False)
# In[59]:
_stats["match rate"] = _stats["match"]/_stats["sent"]
_stats["glob match rate"] = _stats["match"]/addresses.shape[0]
log(_stats[_stats.match > 0])#.sort_values("match", ascending=False)
# In[60]:
#
# In[61]:
if AddressCleanserUtils.within_jupyter:
import matplotlib.pyplot as plt
_stats.set_index("method").match.plot.pie()
plt.tight_layout()
# In[62]:
log(f"Place ranks: \n{osm_addresses.place_rank.value_counts().to_string()}")
# In[63]:
osm_addresses.place_rank.value_counts() / osm_addresses.shape[0]
# In[64]:
if AddressCleanserUtils.within_jupyter:
osm_addresses.place_rank.value_counts().plot.bar()
# In[65]:
if AddressCleanserUtils.within_jupyter:
osm_addresses.place_rank.value_counts().plot.pie()
# In[66]:
if AddressCleanserUtils.within_jupyter:
osm_addresses.addr_out_number.isnull().value_counts().plot.bar()
# In[67]:
if AddressCleanserUtils.within_jupyter:
addresses[config_module.housenbr_field].isnull().value_counts().plot.bar()
# In[68]:
# Remark : only works when dask is not used
# Gives times used of transformer, querying & processing osm, and checking results
if not with_dask:
ts = AddressCleanserUtils.timestats
tot = np.sum([ts[key] for key in ts])
for key in ts:
log(f"{key:12}: {ts[key]} ({100*ts[key]/tot:.3} %)")
# In[85]:
log("Country statistics")
x = addresses.merge(osm_addresses, how="outer") #[[config_module.country_field, "addr_out_country"]].value_counts()
log(pd.crosstab(x[config_module.country_field].fillna("[none]"), x["addr_out_country"].fillna("[none]"), margins=True))
# # Output
# In[ ]:
output_folder = address_file.rsplit(".", 1)[0]
import os
try:
os.mkdir(output_folder)
except OSError:
log ("Creation of the directory %s failed" % output_folder)
else:
log ("Successfully created the directory %s " % output_folder)
output_filename_xls = output_folder + "/match.xlsx"
output_filename_pkl = output_folder + "/match.pkl"
nomatch_filename = output_folder + "/nomatch.xlsx"
reject_filename = output_folder + "/reject.xlsx"
stats_filename = output_folder + "/stats.xlsx"
# In[ ]:
final_output = addresses.merge(osm_addresses, how="left")
log(f"Writing results on {output_filename_xls} ...")
try:
final_output.to_excel(output_filename_xls)
except Exception as e:
log("Failed ! ")
log(e)
log(f"Writing results on {output_filename_pkl} ...")
try:
final_output.to_pickle(output_filename_pkl)
except Exception as e:
log("Failed ! ")
log(e)
# In[ ]:
# In[ ]:
log(f"Writing rejected on {reject_filename} ...")
try:
rejected_addresses_final.sort_values([config_module.addr_key_field]).set_index([config_module.addr_key_field,
config_module.street_field,
config_module.housenbr_field,
config_module.postcode_field,
config_module.city_field,
config_module.country_field,
"method"]).to_excel(reject_filename)
except Exception as e:
log("Failed ! ")
log(e)
log(f"Writing nomatch on {nomatch_filename} ...")
try:
nomatch = addresses[~addresses[config_module.addr_key_field].isin(osm_addresses[config_module.addr_key_field]) & ~addresses[config_module.addr_key_field].isin(rejected_addresses[config_module.addr_key_field])]
nomatch.to_excel(nomatch_filename)
except Exception as e:
log("Failed ! ")
log(e)
# In[ ]:
log(f"Writing stats on {stats_filename} ...")
try:
with pd.ExcelWriter(stats_filename) as writer:
_stats.to_excel(writer, "match_rate")
pr_vc = osm_addresses.place_rank.value_counts()
pr_vc = pd.concat([pr_vc, pr_vc/ osm_addresses.shape[0]], axis=1)
pr_vc.columns = ["Count", "%"]
pr_vc.to_excel(writer, "place_rank")
except Exception as e:
log("Failed ! ")
log(e)
# In[ ]:
log("Done !")
log(f"Total time : {datetime.now() - starting_time}")
| [
"pandas.DataFrame",
"os.mkdir",
"numpy.sum",
"getopt.getopt",
"importlib.import_module",
"AddressCleanserUtils.pbar.register",
"pandas.ExcelWriter",
"dask.diagnostics.ResourceProfiler",
"tqdm.autonotebook.tqdm.pandas",
"bokeh.io.output_file",
"importlib.reload",
"dask.diagnostics.Profiler",
... | [((176, 189), 'tqdm.autonotebook.tqdm.pandas', 'tqdm.pandas', ([], {}), '()\n', (187, 189), False, 'from tqdm.autonotebook import tqdm\n'), ((345, 373), 'importlib.reload', 'reload', (['AddressCleanserUtils'], {}), '(AddressCleanserUtils)\n', (351, 373), False, 'from importlib import reload\n'), ((460, 479), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (477, 479), False, 'import logging\n'), ((808, 853), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '"""f:c:a:s:vq"""', '[]'], {}), "(sys.argv[1:], 'f:c:a:s:vq', [])\n", (821, 853), False, 'import sys, getopt\n'), ((1845, 1881), 'importlib.import_module', 'importlib.import_module', (['config_file'], {}), '(config_file)\n', (1868, 1881), False, 'import importlib\n'), ((2998, 3034), 'AddressCleanserUtils.pbar.register', 'AddressCleanserUtils.pbar.register', ([], {}), '()\n', (3032, 3034), False, 'import AddressCleanserUtils\n'), ((6977, 6991), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6989, 6991), True, 'import pandas as pd\n'), ((7019, 7033), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7031, 7033), True, 'import pandas as pd\n'), ((10443, 10529), 'pandas.concat', 'pd.concat', (['[chunk_osm_addresses for chunk_osm_addresses, _, _ in res]'], {'sort': '(False)'}), '([chunk_osm_addresses for chunk_osm_addresses, _, _ in res], sort=\n False)\n', (10452, 10529), True, 'import pandas as pd\n'), ((10562, 10657), 'pandas.concat', 'pd.concat', (['[chunk_rejected_addresses for _, chunk_rejected_addresses, _ in res]'], {'sort': '(False)'}), '([chunk_rejected_addresses for _, chunk_rejected_addresses, _ in\n res], sort=False)\n', (10571, 10657), True, 'import pandas as pd\n'), ((10886, 10900), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10898, 10900), True, 'import pandas as pd\n'), ((10928, 10942), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10940, 10942), True, 'import pandas as pd\n'), ((11834, 11864), 'bokeh.io.output_file', 'output_file', (['"""dask_stats.html"""'], {}), "('dask_stats.html')\n", (11845, 11864), False, 'from bokeh.io import output_notebook, output_file\n'), ((11893, 11917), 'dask.diagnostics.visualize', 'visualize', (['[prof, rprof]'], {}), '([prof, rprof])\n', (11902, 11917), False, 'from dask.diagnostics import visualize\n'), ((18987, 19037), 'pandas.DataFrame', 'pd.DataFrame', (['extra_address_stats'], {'index': "['Count']"}), "(extra_address_stats, index=['Count'])\n", (18999, 19037), True, 'import pandas as pd\n'), ((19264, 19283), 'pandas.DataFrame', 'pd.DataFrame', (['stats'], {}), '(stats)\n', (19276, 19283), True, 'import pandas as pd\n'), ((20546, 20709), 'pandas.DataFrame', 'pd.DataFrame', (["[{'method': 'reject', 'todo': rejected_count, 'match': rejected_count}, {\n 'method': 'nomatch', 'todo': nomatch_count, 'match': nomatch_count}]"], {}), "([{'method': 'reject', 'todo': rejected_count, 'match':\n rejected_count}, {'method': 'nomatch', 'todo': nomatch_count, 'match':\n nomatch_count}])\n", (20558, 20709), True, 'import pandas as pd\n'), ((21136, 21154), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (21152, 21154), True, 'import matplotlib.pyplot as plt\n'), ((22009, 22040), 'numpy.sum', 'np.sum', (['[ts[key] for key in ts]'], {}), '([ts[key] for key in ts])\n', (22015, 22040), True, 'import numpy as np\n'), ((22491, 22514), 'os.mkdir', 'os.mkdir', (['output_folder'], {}), '(output_folder)\n', (22499, 22514), False, 'import os\n'), ((511, 540), 'logging.getLogger', 'logging.getLogger', (['"""requests"""'], {}), "('requests')\n", (528, 540), False, 'import logging\n'), ((567, 595), 'logging.getLogger', 'logging.getLogger', (['"""urllib3"""'], {}), "('urllib3')\n", (584, 595), False, 'import logging\n'), ((8533, 8564), 'numpy.sum', 'np.sum', (['[ts[key] for key in ts]'], {}), '([ts[key] for key in ts])\n', (8539, 8564), True, 'import numpy as np\n'), ((9007, 9026), 'pandas.DataFrame', 'pd.DataFrame', (['stats'], {}), '(stats)\n', (9019, 9026), True, 'import pandas as pd\n'), ((10255, 10265), 'dask.diagnostics.Profiler', 'Profiler', ([], {}), '()\n', (10263, 10265), False, 'from dask.diagnostics import Profiler, ResourceProfiler\n'), ((10275, 10293), 'dask.diagnostics.ResourceProfiler', 'ResourceProfiler', ([], {}), '()\n', (10291, 10293), False, 'from dask.diagnostics import Profiler, ResourceProfiler\n'), ((24698, 24728), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['stats_filename'], {}), '(stats_filename)\n', (24712, 24728), True, 'import pandas as pd\n'), ((24867, 24925), 'pandas.concat', 'pd.concat', (['[pr_vc, pr_vc / osm_addresses.shape[0]]'], {'axis': '(1)'}), '([pr_vc, pr_vc / osm_addresses.shape[0]], axis=1)\n', (24876, 24925), True, 'import pandas as pd\n'), ((9378, 9418), 'numpy.sqrt', 'np.sqrt', (['(max_chunk_size * min_chunk_size)'], {}), '(max_chunk_size * min_chunk_size)\n', (9385, 9418), True, 'import numpy as np\n')] |
import cplex
import numpy as np
import warnings
import time
from .lp import Solution
def solve(formula, display=True, export=False, params={}):
cpx = cplex.Cplex()
obj = formula.obj.flatten()
linear = formula.linear
row = linear.shape[0]
spmat = [[linear.indices[linear.indptr[i]:linear.indptr[i + 1]].tolist(),
linear.data[linear.indptr[i]:linear.indptr[i + 1]].tolist()]
for i in range(row)]
sense = ['E' if s == 1 else 'L' for s in formula.sense]
const = formula.const
ub = formula.ub
lb = formula.lb
vtype = [cpx.variables.type.integer if vt == 'I' else
cpx.variables.type.binary if vt == 'B' else
cpx.variables.type.continuous for vt in formula.vtype]
if all(np.array(vtype) == cpx.variables.type.continuous):
cpx.variables.add(obj=formula.obj.flatten(),
lb=formula.lb, ub=formula.ub)
else:
cpx.variables.add(obj=formula.obj.flatten(),
lb=formula.lb, ub=formula.ub, types=vtype)
cpx.linear_constraints.add(lin_expr=spmat,
senses=sense, rhs=formula.const)
for cone in formula.qmat:
cone_data = [-1] + [1] * (len(cone) - 1)
cone = [int(index) for index in cone]
q = cplex.SparseTriple(ind1=cone, ind2=cone, val=cone_data)
cpx.quadratic_constraints.add(quad_expr=q)
if display:
print('Being solved by CPLEX...', flush=True)
time.sleep(0.2)
cpx.set_results_stream(None)
cpx.set_warning_stream(None)
try:
for param, value in params.items():
text = 'cpx.parameters.' + param + '.set({0})'.format(value)
eval(text)
except (TypeError, ValueError):
raise ValueError('Incorrect parameters or values.')
t0 = time.time()
cpx.solve()
stime = time.time() - t0
status = cpx.solution.get_status()
if display:
print('Solution status: {0}'.format(status))
print('Running time: {0:0.4f}s'.format(stime))
if status in [1, 6, 10, 11, 12, 13,
21, 22,
101, 102, 105, 107, 109, 111, 113, 116]:
obj_val = cpx.solution.get_objective_value()
x_sol = np.array(cpx.solution.get_values())
solution = Solution(obj_val, x_sol, status)
else:
warnings.warn('No feasible solution can be found.')
solution = None
return solution
| [
"cplex.Cplex",
"time.sleep",
"cplex.SparseTriple",
"time.time",
"numpy.array",
"warnings.warn"
] | [((158, 171), 'cplex.Cplex', 'cplex.Cplex', ([], {}), '()\n', (169, 171), False, 'import cplex\n'), ((1833, 1844), 'time.time', 'time.time', ([], {}), '()\n', (1842, 1844), False, 'import time\n'), ((1309, 1364), 'cplex.SparseTriple', 'cplex.SparseTriple', ([], {'ind1': 'cone', 'ind2': 'cone', 'val': 'cone_data'}), '(ind1=cone, ind2=cone, val=cone_data)\n', (1327, 1364), False, 'import cplex\n'), ((1495, 1510), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (1505, 1510), False, 'import time\n'), ((1873, 1884), 'time.time', 'time.time', ([], {}), '()\n', (1882, 1884), False, 'import time\n'), ((2354, 2405), 'warnings.warn', 'warnings.warn', (['"""No feasible solution can be found."""'], {}), "('No feasible solution can be found.')\n", (2367, 2405), False, 'import warnings\n'), ((768, 783), 'numpy.array', 'np.array', (['vtype'], {}), '(vtype)\n', (776, 783), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Train and run a character-level language model. The training set is
# a single ASCII text file.
import numpy as np
import pvml
import sys
# CONFIGURATION
HIDDEN_STATES = [256]
BATCH_SIZE = 16
TRAINING_STEPS = 10000
MAXLEN = 400
# ASCII characters in the 32-126 range are encoded as (code - 32).
# The start of a sequence is encoded as 128 - 32 = 96
# The end of a sequence is encoded as 129 - 32 = 97
# Any other character is encoded as 127 - 32 = 95
REPORT_EVERY = 1000
LEARNING_RATE = 0.01
SOS = 0
EOS = 1
UNK = 2
def encode1(c):
n = ord(c)
return (n - 32 if n >= 32 and n < 127 else UNK)
def encode(s):
codes = [SOS, *(encode1(c) for c in s), EOS]
return np.array(codes, dtype=np.uint8)
def decode(codes):
return "".join(chr(32 + c) if c != UNK else "?" for c in codes)
def read_data(filename, seqlen, delimiter=None):
with open(filename) as f:
text = f.read()
alphabet = ["<SOS>", "<EOS>", "<UNK>"] + sorted(set(text))
encoding = dict((c, n) for n, c in enumerate(alphabet))
if delimiter is not None:
text = text.split(delimiter)
else:
text = [text]
data = []
for seq in text:
if not seq:
continue
codes = [encoding.get(c, UNK) for c in seq]
extra = len(codes) % seqlen
codes.extend([EOS] * (seqlen - extra))
data.append(np.array(codes).reshape(-1, seqlen))
return np.concatenate(data, 0), alphabet
def one_hot_vectors(X, k):
U = X.reshape(-1)
H = np.zeros((U.size, k), dtype=np.uint8)
H[np.arange(U.size), U] = 1
return H.reshape(*X.shape, k)
def generate(rnn, maxlen, alphabet):
codes = [SOS]
last = SOS
X = np.zeros((1, 1, len(alphabet)), dtype=np.uint8)
init = None
while len(codes) < maxlen and last != EOS:
X.fill(0)
X[0, 0, last] = 1
Hs, P = rnn.forward(X, init)
init = [H[:, -1, :] for H in Hs[1:]]
last = np.random.choice(len(alphabet), p=P[0, -1, :])
codes.append(last)
return "".join(alphabet[c] for c in codes)
def train(training_file, seqlen, delimiter):
data, alphabet = read_data(sys.argv[1], seqlen, delimiter)
X = one_hot_vectors(data[:, :-1], len(alphabet))
Y = data[:, 1:]
rnn = pvml.RNN([len(alphabet), 256, len(alphabet)])
steps = 0
steps_per_call = REPORT_EVERY // BATCH_SIZE
while steps < TRAINING_STEPS:
rnn.train(X, Y, lr=LEARNING_RATE, steps=steps_per_call, batch=BATCH_SIZE)
P = rnn.forward(X)[1]
loss = rnn.loss(Y, P)
steps += steps_per_call
print(steps, loss)
print(generate(rnn, MAXLEN, alphabet))
print()
if __name__ == "__main__":
if len(sys.argv) < 2:
print("USAGE: ./language_model.py TRAINING_FILE [SEQUENCE_LENGTH] [DELIMITER]")
sys.exit()
training_file = sys.argv[1]
seqlen = (32 if len(sys.argv) < 3 else int(sys.argv[2]))
delimiter = (None if len(sys.argv) < 4 else sys.argv[3])
train(training_file, seqlen, delimiter)
| [
"numpy.concatenate",
"numpy.zeros",
"numpy.array",
"numpy.arange",
"sys.exit"
] | [((710, 741), 'numpy.array', 'np.array', (['codes'], {'dtype': 'np.uint8'}), '(codes, dtype=np.uint8)\n', (718, 741), True, 'import numpy as np\n'), ((1530, 1567), 'numpy.zeros', 'np.zeros', (['(U.size, k)'], {'dtype': 'np.uint8'}), '((U.size, k), dtype=np.uint8)\n', (1538, 1567), True, 'import numpy as np\n'), ((1437, 1460), 'numpy.concatenate', 'np.concatenate', (['data', '(0)'], {}), '(data, 0)\n', (1451, 1460), True, 'import numpy as np\n'), ((2849, 2859), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2857, 2859), False, 'import sys\n'), ((1574, 1591), 'numpy.arange', 'np.arange', (['U.size'], {}), '(U.size)\n', (1583, 1591), True, 'import numpy as np\n'), ((1389, 1404), 'numpy.array', 'np.array', (['codes'], {}), '(codes)\n', (1397, 1404), True, 'import numpy as np\n')] |
"""
===========================================================
Metrics and observables (:py:mod:`reservoirpy.observables`)
===========================================================
Metrics and observables for Reservoir Computing:
.. autosummary::
:toctree: generated/
spectral_radius
mse
rmse
nrmse
rsquare
"""
# Author: <NAME> at 01/06/2021 <<EMAIL>>
# Licence: MIT License
# Copyright: <NAME> (2018) <<EMAIL>>
import sys
if sys.version_info < (3, 8):
from typing_extensions import Literal
else:
from typing import Literal
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from scipy.sparse.linalg import eigs
from .type import Weights
def _check_arrays(y_true, y_pred):
y_true_array = np.asarray(y_true)
y_pred_array = np.asarray(y_pred)
if not y_true_array.shape == y_pred_array.shape:
raise ValueError(
f"Shape mismatch between y_true and y_pred: "
"{y_true_array.shape} != {y_pred_array.shape}"
)
return y_true_array, y_pred_array
def spectral_radius(W: Weights, maxiter: int = None) -> float:
"""Compute the spectral radius of a matrix `W`.
Spectral radius is defined as the maximum absolute
eigenvalue of `W`.
Parameters
----------
W : array-like (sparse or dense) of shape (N, N)
Matrix from which the spectral radius will
be computed.
maxiter : int, optional
Maximum number of Arnoldi update iterations allowed.
By default, is equal to `W.shape[0] * 20`.
See `Scipy documentation <https://docs.scipy.org/
doc/scipy/reference/generated/scipy.sparse.linalg.eigs.html>`_
for more informations.
Returns
-------
float
Spectral radius of `W`.
Raises
------
ArpackNoConvergence
When computing spectral radius on large
sparse matrices, it is possible that the
Fortran ARPACK algorithmn used to compute
eigenvalues don't converge towards precise
values. To avoid this problem, set the `maxiter`
parameter to an higher value. Be warned that
this may drastically increase the computation
time.
"""
if issparse(W):
if maxiter is None:
maxiter = W.shape[0] * 20
return max(
abs(eigs(W, k=1, which="LM", maxiter=maxiter, return_eigenvectors=False))
)
return max(abs(linalg.eig(W)[0]))
def mse(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""Mean squared error metric:
.. math::
\\frac{\\sum_{i=0}^{N-1} (y_i - \\hat{y}_i)^2}{N}
Parameters
----------
y_true : array-like of shape (N, features)
Ground truth values.
y_pred : array-like of shape (N, features)
Predicted values.
Returns
-------
float
Mean squared error.
"""
y_true_array, y_pred_array = _check_arrays(y_true, y_pred)
return float(np.mean((y_true_array - y_pred_array) ** 2))
def rmse(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""Root mean squared error metric:
.. math::
\\sqrt{\\frac{\\sum_{i=0}^{N-1} (y_i - \\hat{y}_i)^2}{N}}
Parameters
----------
y_true : array-like of shape (N, features)
Ground truth values.
y_pred : array-like of shape (N, features)
Predicted values.
Returns
-------
float
Root mean squared error.
"""
return np.sqrt(mse(y_true, y_pred))
def nrmse(
y_true: np.ndarray,
y_pred: np.ndarray,
norm: Literal["minmax", "var", "mean", "q1q3"] = "minmax",
norm_value: float = None,
) -> float:
"""Normalized mean squared error metric:
.. math::
\\frac{1}{\\lambda} * \\sqrt{\\frac{\\sum_{i=0}^{N-1} (y_i - \\hat{y}_i)^2}{N}}
where :math:`\\lambda` may be:
- :math:`\\max y - \\min y` (Peak-to-peak amplitude) if ``norm="minmax"``;
- :math:`\\mathrm{Var}(y)` (variance over time) if ``norm="var"``;
- :math:`\\mathbb{E}[y]` (mean over time) if ``norm="mean"``;
- :math:`Q_{3}(y) - Q_{1}(y)` (quartiles) if ``norm="q1q3"``;
- or any value passed to ``norm_value``.
Parameters
----------
y_true : array-like of shape (N, features)
Ground truth values.
y_pred : array-like of shape (N, features)
Predicted values.
norm : {"minmax", "var", "mean", "q1q3"}, default to "minmax"
Normalization method.
norm_value : float, optional
A normalization factor. If set, will override the ``norm`` parameter.
Returns
-------
float
Normalized mean squared error.
"""
error = rmse(y_true, y_pred)
if norm_value is not None:
return error / norm_value
else:
norms = {
"minmax": lambda y: y.ptp(),
"var": lambda y: y.var(),
"mean": lambda y: y.mean(),
"q1q3": lambda y: np.quantile(y, 0.75) - np.quantile(y, 0.25),
}
if norms.get(norm) is None:
raise ValueError(
f"Unknown normalization method. "
f"Available methods are {list(norms.keys())}."
)
else:
return error / norms[norm](np.asarray(y_true))
def rsquare(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""Coefficient of determination :math:`R^2`:
.. math::
1 - \\frac{\\sum^{N-1}_{i=0} (y - \\hat{y})^2}
{\\sum^{N-1}_{i=0} (y - \\bar{y})^2}
where :math:`\\bar{y}` is the mean value of ground truth.
Parameters
----------
y_true : array-like of shape (N, features)
Ground truth values.
y_pred : array-like of shape (N, features)
Predicted values.
Returns
-------
float
Coefficient of determination.
"""
y_true_array, y_pred_array = _check_arrays(y_true, y_pred)
d = (y_true_array - y_pred_array) ** 2
D = (y_true_array - y_pred_array.mean()) ** 2
return 1 - np.sum(d) / np.sum(D)
| [
"numpy.quantile",
"numpy.sum",
"scipy.sparse.issparse",
"numpy.asarray",
"scipy.linalg.eig",
"numpy.mean",
"scipy.sparse.linalg.eigs"
] | [((761, 779), 'numpy.asarray', 'np.asarray', (['y_true'], {}), '(y_true)\n', (771, 779), True, 'import numpy as np\n'), ((799, 817), 'numpy.asarray', 'np.asarray', (['y_pred'], {}), '(y_pred)\n', (809, 817), True, 'import numpy as np\n'), ((2222, 2233), 'scipy.sparse.issparse', 'issparse', (['W'], {}), '(W)\n', (2230, 2233), False, 'from scipy.sparse import issparse\n'), ((2956, 2999), 'numpy.mean', 'np.mean', (['((y_true_array - y_pred_array) ** 2)'], {}), '((y_true_array - y_pred_array) ** 2)\n', (2963, 2999), True, 'import numpy as np\n'), ((5968, 5977), 'numpy.sum', 'np.sum', (['d'], {}), '(d)\n', (5974, 5977), True, 'import numpy as np\n'), ((5980, 5989), 'numpy.sum', 'np.sum', (['D'], {}), '(D)\n', (5986, 5989), True, 'import numpy as np\n'), ((2338, 2406), 'scipy.sparse.linalg.eigs', 'eigs', (['W'], {'k': '(1)', 'which': '"""LM"""', 'maxiter': 'maxiter', 'return_eigenvectors': '(False)'}), "(W, k=1, which='LM', maxiter=maxiter, return_eigenvectors=False)\n", (2342, 2406), False, 'from scipy.sparse.linalg import eigs\n'), ((2438, 2451), 'scipy.linalg.eig', 'linalg.eig', (['W'], {}), '(W)\n', (2448, 2451), False, 'from scipy import linalg\n'), ((4921, 4941), 'numpy.quantile', 'np.quantile', (['y', '(0.75)'], {}), '(y, 0.75)\n', (4932, 4941), True, 'import numpy as np\n'), ((4944, 4964), 'numpy.quantile', 'np.quantile', (['y', '(0.25)'], {}), '(y, 0.25)\n', (4955, 4964), True, 'import numpy as np\n'), ((5223, 5241), 'numpy.asarray', 'np.asarray', (['y_true'], {}), '(y_true)\n', (5233, 5241), True, 'import numpy as np\n')] |
import numpy as np
import pybullet as p
from diy_gym.addons.addon import Addon
class OutOfBoundsPenalty(Addon):
def __init__(self, parent, config):
super(OutOfBoundsPenalty, self).__init__(parent, config)
self.source_model = parent#.models[config.get('source_model')]
self.min_xyz = config.get('min_xyz', [0., 0., 0.])
self.max_xyz = config.get('max_xyz', [0., 0., 0.])
self.source_frame_id = self.source_model.get_frame_id(
config.get('source_frame')) if 'source_frame' in config else -1
self.tolerance = config.get('tolerance', 0.05)
self.penalty = config.get('penalty', -1)
def outside(self):
"""Is it out of bounds."""
source_xyz = p.getLinkState(
self.source_model.uid,
self.source_frame_id)[4] if self.source_frame_id >= 0 else p.getBasePositionAndOrientation(
self.source_model.uid)[0]
source_xyz = np.array(source_xyz)
return (source_xyz<self.min_xyz).any() or (source_xyz>self.max_xyz).any()
def is_terminal(self):
if self.outside() > self.tolerance:
return self.penalty
else:
return 0
| [
"pybullet.getLinkState",
"pybullet.getBasePositionAndOrientation",
"numpy.array"
] | [((952, 972), 'numpy.array', 'np.array', (['source_xyz'], {}), '(source_xyz)\n', (960, 972), True, 'import numpy as np\n'), ((734, 793), 'pybullet.getLinkState', 'p.getLinkState', (['self.source_model.uid', 'self.source_frame_id'], {}), '(self.source_model.uid, self.source_frame_id)\n', (748, 793), True, 'import pybullet as p\n'), ((856, 910), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['self.source_model.uid'], {}), '(self.source_model.uid)\n', (887, 910), True, 'import pybullet as p\n')] |
import time,os,copy,argparse,subprocess, sys, pysam, datetime
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import multiprocessing as mp
import tensorflow as tf
from model_architect import *
from generate_SNP_pileups import get_snp_testing_candidates
from intervaltree import Interval, IntervalTree
from utils import *
if type(tf.contrib) != type(tf): tf.contrib._warning = None
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
num_to_base_map={0:'A',1:'G',2:'T',3:'C'}
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
snp_model_dict={'NanoCaller1':'release_data/ONT_models/SNPs/NanoCaller1_beta/model-rt-1',
'NanoCaller2':'release_data/ONT_models/SNPs/NanoCaller1_beta/model-rt-1',
'NanoCaller3':'release_data/clr_models/SNPs/NanoCaller3_beta/model-rt-100',
'ONT-HG001':'release_data/ONT_models/SNPs/HG001_guppy4.2.2_giab-3.3.2/model-1',
'ONT-HG001_GP2.3.8':'release_data/ONT_models/SNPs/HG001_guppy2.3.8_giab-3.3.2/model-100',
'ONT-HG001_GP2.3.8-4.2.2':'release_data/ONT_models/SNPs/HG001_guppy2.3.8_guppy4.2.2_giab-3.3.2/model-100',
'ONT-HG001-4_GP4.2.2':'release_data/ONT_models/SNPs/HG001_guppy4.2.2_giab-3.3.2_HG002-4_guppy4.2.2_giab-4.2.1/model-100',
'ONT-HG002':'release_data/ONT_models/SNPs/HG002_guppy4.2.2_giab-4.2.1/model-100',
'ONT-HG002_GP4.2.2_v3.3.2':'release_data/ONT_models/SNPs/HG002_guppy4.2.2_giab-3.3.2/model-100',
'ONT-HG002_GP2.3.4_v3.3.2':'release_data/ONT_models/SNPs/HG002_guppy2.3.4_giab-3.3.2/model-100',
'ONT-HG002_GP2.3.4_v4.2.1':'release_data/ONT_models/SNPs/HG002_guppy2.3.4_giab-4.2.1/model-100',
'ONT-HG002_r10.3':'release_data/ONT_models/SNPs/HG002_r10.3_guppy4.0.11_giab-4.2.1/model-100',
'ONT-HG002_bonito':'release_data/ONT_models/SNPs/HG002_bonito_giab-4.2.1/model-100',
'CCS-HG001':'release_data/hifi_models/SNPs/HG001_giab-3.3.2/model-100',
'CCS-HG002':'release_data/hifi_models/SNPs/HG002_giab-4.2.1/model-100',
'CCS-HG001-4':'release_data/hifi_models/SNPs/HG001_giab-3.3.2_HG002-4_giab-4.2.1/model-100',
'CLR-HG002':'release_data/clr_models/SNPs/HG002_giab-4.2.1/model-100'
}
def get_SNP_model(snp_model):
if os.path.exists(snp_model):
if os.path.isdir(snp_model):
snp_model_path=glob.glob(os.path.join(snp_model,'*.meta'))[0].rstrip('.meta')
elif snp_model in snp_model_dict:
dirname = os.path.dirname(__file__)
snp_model_path=os.path.join(dirname, snp_model_dict[snp_model])
else:
return None,None
coverage_path='%s.coverage' %snp_model_path
if os.path.exists(coverage_path):
train_coverage=float(open(coverage_path,'r').readlines()[0].rstrip('\n'))
else:
train_coverage=0
return snp_model_path, train_coverage
def test_model(params,pool):
print('%s: SNP calling started.' %(str(datetime.datetime.now())), flush=True)
vcf_path,prefix= params['vcf_path'], params['prefix']
chrom,start,end=params['chrom'], params['start'],params['end']
coverage=get_coverage(params, pool)
print('%s: Coverage=%.2fx.' %(str(datetime.datetime.now()), coverage), flush=True)
if coverage==0:
print('%s: No coverage found for the contig %s.' %(str(datetime.datetime.now()), chrom), flush=True)
return
n_input=[5,41,5]
tf.reset_default_graph()
weights,biases,tensors=get_tensors(n_input,0.0)
(x,GT_label,A_label, G_label, T_label, C_label,GT_score, A_score, G_score, T_score, C_score, accuracy_GT, accuracy_A, accuracy_G, accuracy_T, accuracy_C, prediction_accuracy_GT, prediction_accuracy_A, prediction_accuracy_G, prediction_accuracy_T, prediction_accuracy_C, prediction_GT, prediction_A, prediction_G, prediction_T, prediction_C, accuracy, cost, optimizer, cost_GT, cost_A, cost_G, cost_T, cost_C,A_ref,G_ref,T_ref,C_ref,prob_GT,prob_A,prob_G,prob_T,prob_C,keep)=tensors
model_path, train_coverage=get_SNP_model(params['snp_model'])
if model_path==None:
print('Invalid SNP model name or path', flush=True)
return
train_coverage=coverage if train_coverage==0 else train_coverage
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
sess.run(tf.local_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess, model_path)
batch_size=1000
neg_file=open(os.path.join(vcf_path,'%s.snp_stats' %prefix),'w')
neg_file.write('pos,ref,prob_GT,prob_A,prob_G,prob_T,prob_C,DP,freq\n')
with open(os.path.join(vcf_path,'%s.snps.vcf' %prefix),'w') as f:
f.write('##fileformat=VCFv4.2\n')
f.write('##FILTER=<ID=PASS,Description="All filters passed">\n')
c='##contig=<ID=%s>\n' %chrom
f.write('##contig=<ID=%s>\n' %chrom)
f.write('##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n')
f.write('##FORMAT=<ID=DP,Number=1,Type=Integer,Description="Depth">\n')
f.write('##FORMAT=<ID=FQ,Number=1,Type=Float,Description="Alternative Allele Frequency">\n')
f.write('#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT %s\n' %params['sample'])
in_dict_list=[]
for mbase in range(start,end,200000):
d = copy.deepcopy(params)
d['start']=mbase
d['end']=min(end,mbase+200000)
in_dict_list.append(d)
result=pool.imap_unordered(get_snp_testing_candidates, in_dict_list)
total_regions=len(in_dict_list)
completed=0
for res in result:
pos,test_ref,x_test,dp,freq=res
completed+=1
if len(pos)==0:
continue
x_test=x_test.astype(np.float32)
x_test[:,1:,:,:4]=x_test[:,1:,:,:4]*(train_coverage/coverage)
for batch in range(int(np.ceil(len(x_test)/batch_size))):
batch_freq=freq[batch*batch_size:min((batch+1)*batch_size,len(freq))]
batch_dp=dp[batch*batch_size:min((batch+1)*batch_size,len(dp))]
batch_pos = pos[batch*batch_size:min((batch+1)*batch_size,len(pos))]
batch_x = x_test[batch*batch_size:min((batch+1)*batch_size,len(x_test))]
batch_ref = test_ref[batch*batch_size:min((batch+1)*batch_size, len(test_ref))]
batch_prob_GT,batch_prob_A,batch_prob_G,batch_prob_C,batch_prob_T= sess.run([prob_GT,prob_A,prob_G,prob_C,prob_T],\
feed_dict={x: batch_x, A_ref:batch_ref[:,0][:,np.newaxis], G_ref:batch_ref[:,1][:,np.newaxis], T_ref:batch_ref[:,2][:,np.newaxis], C_ref:batch_ref[:,3][:,np.newaxis],keep:1.0})
batch_pred_GT=np.argmax(batch_prob_GT,axis=1)
batch_probs=np.hstack([batch_prob_A[:,1][:,np.newaxis], batch_prob_G[:,1][:,np.newaxis], batch_prob_T[:,1][:,np.newaxis], batch_prob_C[:,1][:,np.newaxis]])
batch_pred=np.argsort(batch_probs,axis=1)
batch_ref_vec=batch_ref
batch_ref=np.argmax(batch_ref,1)
batch_pred_GT=np.sum(batch_probs>=0.5,axis=1)
sort_probs=np.sort(batch_probs,axis=1)
for j in range(len(batch_pred_GT)):
if batch_pred_GT[j]>=2: # if het
pred1,pred2=batch_pred[j,-1],batch_pred[j,-2]
if pred1==batch_ref[j]:
s='%s\t%d\t.\t%s\t%s\t%.3f\t%s\t.\tGT:DP:FQ\t%s:%d:%.4f\n' %(chrom, batch_pos[j], num_to_base_map[batch_ref[j]], num_to_base_map[pred2], min(999,-100*np.log10(1e-10+ 1-batch_probs[j,pred2])),'PASS','0/1', batch_dp[j], batch_freq[j])
f.write(s)
elif pred2==batch_ref[j] and batch_probs[j,pred2]>=0.5:
s='%s\t%d\t.\t%s\t%s\t%.3f\t%s\t.\tGT:DP:FQ\t%s:%d:%.4f\n' %(chrom,batch_pos[j], num_to_base_map[batch_ref[j]], num_to_base_map[pred1], min(999,-100*np.log10(1e-10+ 1-batch_probs[j,pred2])),'PASS','1/0', batch_dp[j], batch_freq[j])
f.write(s)
elif pred2!=batch_ref[j] and pred1!=batch_ref[j] and batch_probs[j,pred2]>=0.5:
s='%s\t%d\t.\t%s\t%s,%s\t%.3f\t%s\t.\tGT:DP:FQ\t%s:%d:%.4f\n' %\
(chrom,batch_pos[j],num_to_base_map[batch_ref[j]],num_to_base_map[pred1],num_to_base_map[pred2],min(999,-100*np.log10(1e-10+ 1-batch_probs[j,pred2])),'PASS','1/2', batch_dp[j], batch_freq[j])
f.write(s)
elif batch_pred_GT[j]==1 and batch_ref[j]!=batch_pred[j,-1] and batch_probs[j,batch_pred[j,-1]]>=0.5:
pred1=batch_pred[j,-1]
s='%s\t%d\t.\t%s\t%s\t%.3f\t%s\t.\tGT:DP:FQ\t%s:%d:%.4f\n' %(chrom, batch_pos[j], num_to_base_map[batch_ref[j]], num_to_base_map[pred1], min(999,-100*np.log10(1e-10+ 1-batch_probs[j,pred1])),'PASS','1/1', batch_dp[j], batch_freq[j])
f.write(s)
neg_file.write('%d,%s,%.4f,%.4f,%.4f,%.4f,%.4f,%d,%.4f\n' %(batch_pos[j],num_to_base_map[batch_ref[j]], batch_prob_GT[j,0], batch_probs[j,0], batch_probs[j,1], batch_probs[j,2], batch_probs[j,3], batch_dp[j], batch_freq[j]))
print('%s: (%d/%d) regions completed.' %(str(datetime.datetime.now()), completed, total_regions),flush=True)
f.flush()
os.fsync(f.fileno())
neg_file.flush()
os.fsync(neg_file.fileno())
output_file=os.path.join(vcf_path,'%s.snps' %prefix)
run_cmd("bcftools sort %s.vcf|bgziptabix %s.vcf.gz" %(output_file, output_file))
return output_file
| [
"numpy.sum",
"numpy.argmax",
"tensorflow.reset_default_graph",
"tensorflow.local_variables_initializer",
"tensorflow.ConfigProto",
"numpy.argsort",
"os.path.join",
"os.path.dirname",
"os.path.exists",
"numpy.log10",
"datetime.datetime.now",
"copy.deepcopy",
"tensorflow.train.Saver",
"tenso... | [((406, 422), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (420, 422), True, 'import tensorflow as tf\n'), ((505, 567), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (539, 567), True, 'import tensorflow as tf\n'), ((2379, 2404), 'os.path.exists', 'os.path.exists', (['snp_model'], {}), '(snp_model)\n', (2393, 2404), False, 'import time, os, copy, argparse, subprocess, sys, pysam, datetime\n'), ((2797, 2826), 'os.path.exists', 'os.path.exists', (['coverage_path'], {}), '(coverage_path)\n', (2811, 2826), False, 'import time, os, copy, argparse, subprocess, sys, pysam, datetime\n'), ((3578, 3602), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (3600, 3602), True, 'import tensorflow as tf\n'), ((4426, 4459), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4457, 4459), True, 'import tensorflow as tf\n'), ((4471, 4483), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4481, 4483), True, 'import tensorflow as tf\n'), ((4562, 4578), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (4576, 4578), True, 'import tensorflow as tf\n'), ((9988, 10030), 'os.path.join', 'os.path.join', (['vcf_path', "('%s.snps' % prefix)"], {}), "(vcf_path, '%s.snps' % prefix)\n", (10000, 10030), False, 'import time, os, copy, argparse, subprocess, sys, pysam, datetime\n'), ((2417, 2441), 'os.path.isdir', 'os.path.isdir', (['snp_model'], {}), '(snp_model)\n', (2430, 2441), False, 'import time, os, copy, argparse, subprocess, sys, pysam, datetime\n'), ((4516, 4548), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (4546, 4548), True, 'import tensorflow as tf\n'), ((4662, 4709), 'os.path.join', 'os.path.join', (['vcf_path', "('%s.snp_stats' % prefix)"], {}), "(vcf_path, '%s.snp_stats' % prefix)\n", (4674, 4709), False, 'import time, os, copy, argparse, subprocess, sys, pysam, datetime\n'), ((2594, 2619), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2609, 2619), False, 'import time, os, copy, argparse, subprocess, sys, pysam, datetime\n'), ((2643, 2691), 'os.path.join', 'os.path.join', (['dirname', 'snp_model_dict[snp_model]'], {}), '(dirname, snp_model_dict[snp_model])\n', (2655, 2691), False, 'import time, os, copy, argparse, subprocess, sys, pysam, datetime\n'), ((4808, 4854), 'os.path.join', 'os.path.join', (['vcf_path', "('%s.snps.vcf' % prefix)"], {}), "(vcf_path, '%s.snps.vcf' % prefix)\n", (4820, 4854), False, 'import time, os, copy, argparse, subprocess, sys, pysam, datetime\n'), ((5518, 5539), 'copy.deepcopy', 'copy.deepcopy', (['params'], {}), '(params)\n', (5531, 5539), False, 'import time, os, copy, argparse, subprocess, sys, pysam, datetime\n'), ((3077, 3100), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3098, 3100), False, 'import time, os, copy, argparse, subprocess, sys, pysam, datetime\n'), ((7006, 7038), 'numpy.argmax', 'np.argmax', (['batch_prob_GT'], {'axis': '(1)'}), '(batch_prob_GT, axis=1)\n', (7015, 7038), True, 'import numpy as np\n'), ((7067, 7228), 'numpy.hstack', 'np.hstack', (['[batch_prob_A[:, 1][:, np.newaxis], batch_prob_G[:, 1][:, np.newaxis],\n batch_prob_T[:, 1][:, np.newaxis], batch_prob_C[:, 1][:, np.newaxis]]'], {}), '([batch_prob_A[:, 1][:, np.newaxis], batch_prob_G[:, 1][:, np.\n newaxis], batch_prob_T[:, 1][:, np.newaxis], batch_prob_C[:, 1][:, np.\n newaxis]])\n', (7076, 7228), True, 'import numpy as np\n'), ((7240, 7271), 'numpy.argsort', 'np.argsort', (['batch_probs'], {'axis': '(1)'}), '(batch_probs, axis=1)\n', (7250, 7271), True, 'import numpy as np\n'), ((7371, 7394), 'numpy.argmax', 'np.argmax', (['batch_ref', '(1)'], {}), '(batch_ref, 1)\n', (7380, 7394), True, 'import numpy as np\n'), ((7441, 7475), 'numpy.sum', 'np.sum', (['(batch_probs >= 0.5)'], {'axis': '(1)'}), '(batch_probs >= 0.5, axis=1)\n', (7447, 7475), True, 'import numpy as np\n'), ((7517, 7545), 'numpy.sort', 'np.sort', (['batch_probs'], {'axis': '(1)'}), '(batch_probs, axis=1)\n', (7524, 7545), True, 'import numpy as np\n'), ((3344, 3367), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3365, 3367), False, 'import time, os, copy, argparse, subprocess, sys, pysam, datetime\n'), ((3481, 3504), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3502, 3504), False, 'import time, os, copy, argparse, subprocess, sys, pysam, datetime\n'), ((2480, 2513), 'os.path.join', 'os.path.join', (['snp_model', '"""*.meta"""'], {}), "(snp_model, '*.meta')\n", (2492, 2513), False, 'import time, os, copy, argparse, subprocess, sys, pysam, datetime\n'), ((9779, 9802), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9800, 9802), False, 'import time, os, copy, argparse, subprocess, sys, pysam, datetime\n'), ((7984, 8027), 'numpy.log10', 'np.log10', (['(1e-10 + 1 - batch_probs[j, pred2])'], {}), '(1e-10 + 1 - batch_probs[j, pred2])\n', (7992, 8027), True, 'import numpy as np\n'), ((9315, 9358), 'numpy.log10', 'np.log10', (['(1e-10 + 1 - batch_probs[j, pred1])'], {}), '(1e-10 + 1 - batch_probs[j, pred1])\n', (9323, 9358), True, 'import numpy as np\n'), ((8384, 8427), 'numpy.log10', 'np.log10', (['(1e-10 + 1 - batch_probs[j, pred2])'], {}), '(1e-10 + 1 - batch_probs[j, pred2])\n', (8392, 8427), True, 'import numpy as np\n'), ((8845, 8888), 'numpy.log10', 'np.log10', (['(1e-10 + 1 - batch_probs[j, pred2])'], {}), '(1e-10 + 1 - batch_probs[j, pred2])\n', (8853, 8888), True, 'import numpy as np\n')] |
"""Angles and anomalies.
"""
import numpy as np
from astropy import units as u
from scipy import optimize
def _kepler_equation(E, M, ecc):
return E - ecc * np.sin(E) - M
def _kepler_equation_prime(E, M, ecc):
return 1 - ecc * np.cos(E)
def _kepler_equation_hyper(F, M, ecc):
return -F + ecc * np.sinh(F) - M
def _kepler_equation_prime_hyper(F, M, ecc):
return ecc * np.cosh(F) - 1
def _kepler_equation_parabolic(D, M, ecc):
return M_parabolic(ecc, D) - M
def _kepler_equation_prime_parabolic(D, M, ecc):
return M_parabolic_prime(ecc, D)
def M_parabolic(ecc, D, tolerance=1e-16):
"""Computes the Kepler equation r.h.s. in near-parabolic regime
Parameters
----------
D : float
Eccentric anomaly (rad).
ecc : float
Eccentricity,
tolerance : float (optional)
smallness of the last term in series
Returns
-------
M_parabolic : float
kepler equation r.h.s.
Notes
-----
Taken from Farnocchia, Davide, <NAME>, and <NAME>.
"Robust resolution of Kepler’s equation in all eccentricity regimes."
Celestial Mechanics and Dynamical Astronomy 116, no. 1 (2013): 21-34.
"""
x = (ecc - 1.0) / (ecc + 1.0) * (D ** 2)
small_term = False
S = 0.0
k = 0
while not small_term:
term = (ecc - 1.0 / (2.0 * k + 3.0)) * (x ** k)
small_term = np.abs(term) < tolerance
S += term
k += 1
return np.sqrt(2.0 / (1.0 + ecc)) * D + np.sqrt(2.0 / (1.0 + ecc) ** 3) * (D ** 3) * S
def M_parabolic_prime(ecc, D, tolerance=1e-16):
"""Computes derivative of the Kepler equation r.h.s. in near-parabolic regime
Parameters
----------
D : float
Eccentric anomaly (rad).
ecc : float
Eccentricity,
tolerance : float (optional)
smallness of the last term in series
Returns
-------
M_parabolic : float
derivative of kepler equation r.h.s.
Notes
-----
Taken from Farnocchia, Davide, <NAME>, and <NAME>.
"Robust resolution of Kepler’s equation in all eccentricity regimes."
Celestial Mechanics and Dynamical Astronomy 116, no. 1 (2013): 21-34.
"""
x = (ecc - 1.0) / (ecc + 1.0) * (D ** 2)
small_term = False
S_prime = 0.0
k = 0
while not small_term:
term = (ecc - 1.0 / (2.0 * k + 3.0)) * (2 * k + 3.0) * (x ** k)
small_term = np.abs(term) < tolerance
S_prime += term
k += 1
return np.sqrt(2.0 / (1.0 + ecc)) + np.sqrt(2.0 / (1.0 + ecc) ** 3) * (D ** 2) * S_prime
def D_to_nu(D, ecc):
"""True anomaly from parabolic eccentric anomaly.
Parameters
----------
D : float
Eccentric anomaly (rad).
ecc : float
Eccentricity.
Returns
-------
nu : float
True anomaly (rad).
Notes
-----
Taken from Farnocchia, Davide, <NAME>, and <NAME>.
"Robust resolution of Kepler’s equation in all eccentricity regimes."
Celestial Mechanics and Dynamical Astronomy 116, no. 1 (2013): 21-34.
"""
return 2.0 * np.arctan(D)
def nu_to_D(nu, ecc):
"""Parabolic eccentric anomaly from true anomaly.
Parameters
----------
nu : float
True anomaly (rad).
ecc : float
Eccentricity (>1).
Returns
-------
D : float
Hyperbolic eccentric anomaly.
Notes
-----
Taken from Farnocchia, Davide, <NAME>, and <NAME>.
"Robust resolution of Kepler’s equation in all eccentricity regimes."
Celestial Mechanics and Dynamical Astronomy 116, no. 1 (2013): 21-34.
"""
return np.tan(nu / 2.0)
def nu_to_E(nu, ecc):
"""Eccentric anomaly from true anomaly.
.. versionadded:: 0.4.0
Parameters
----------
nu : float
True anomaly (rad).
ecc : float
Eccentricity.
Returns
-------
E : float
Eccentric anomaly.
"""
E = 2 * np.arctan(np.sqrt((1 - ecc) / (1 + ecc)) * np.tan(nu / 2))
return E
def nu_to_F(nu, ecc):
"""Hyperbolic eccentric anomaly from true anomaly.
Parameters
----------
nu : float
True anomaly (rad).
ecc : float
Eccentricity (>1).
Returns
-------
F : float
Hyperbolic eccentric anomaly.
Note
-----
Taken from <NAME>. (2013). *Orbital mechanics for engineering students*. 167
"""
F = np.log((np.sqrt(ecc + 1) + np.sqrt(ecc - 1) * np.tan(nu / 2)) /
(np.sqrt(ecc + 1) - np.sqrt(ecc - 1) * np.tan(nu / 2))) * u.rad
return F
def E_to_nu(E, ecc):
"""True anomaly from eccentric anomaly.
.. versionadded:: 0.4.0
Parameters
----------
E : float
Eccentric anomaly (rad).
ecc : float
Eccentricity.
Returns
-------
nu : float
True anomaly (rad).
"""
nu = 2 * np.arctan(np.sqrt((1 + ecc) / (1 - ecc)) * np.tan(E / 2))
return nu
def F_to_nu(F, ecc):
"""True anomaly from hyperbolic eccentric anomaly.
Parameters
----------
F : float
Hyperbolic eccentric anomaly (rad).
ecc : float
Eccentricity (>1).
Returns
-------
nu : float
True anomaly (rad).
"""
with u.set_enabled_equivalencies(u.dimensionless_angles()):
nu = 2 * np.arctan((np.exp(F) * np.sqrt(ecc + 1) - np.sqrt(ecc + 1)) /
(np.exp(F) * np.sqrt(ecc - 1) + np.sqrt(ecc - 1)))
return nu
def M_to_E(M, ecc):
"""Eccentric anomaly from mean anomaly.
.. versionadded:: 0.4.0
Parameters
----------
M : float
Mean anomaly (rad).
ecc : float
Eccentricity.
Returns
-------
E : float
Eccentric anomaly.
"""
with u.set_enabled_equivalencies(u.dimensionless_angles()):
E = optimize.newton(_kepler_equation, M, _kepler_equation_prime,
args=(M, ecc))
return E
def M_to_F(M, ecc):
"""Hyperbolic eccentric anomaly from mean anomaly.
Parameters
----------
M : float
Mean anomaly (rad).
ecc : float
Eccentricity (>1).
Returns
-------
F : float
Hyperbolic eccentric anomaly.
"""
with u.set_enabled_equivalencies(u.dimensionless_angles()):
F = optimize.newton(_kepler_equation_hyper, np.arcsinh(M / ecc), _kepler_equation_prime_hyper,
args=(M, ecc), maxiter=100)
return F
def M_to_D(M, ecc):
"""Parabolic eccentric anomaly from mean anomaly.
Parameters
----------
M : float
Mean anomaly (rad).
ecc : float
Eccentricity (>1).
Returns
-------
D : float
Parabolic eccentric anomaly.
"""
with u.set_enabled_equivalencies(u.dimensionless_angles()):
B = 3.0 * M / 2.0
A = (B + (1.0 + B ** 2) ** (0.5)) ** (2.0 / 3.0)
guess = 2 * A * B / (1 + A + A ** 2)
D = optimize.newton(_kepler_equation_parabolic, guess, _kepler_equation_prime_parabolic,
args=(M, ecc), maxiter=100)
return D
def E_to_M(E, ecc):
"""Mean anomaly from eccentric anomaly.
.. versionadded:: 0.4.0
Parameters
----------
E : float
Eccentric anomaly (rad).
ecc : float
Eccentricity.
Returns
-------
M : float
Mean anomaly (rad).
"""
with u.set_enabled_equivalencies(u.dimensionless_angles()):
M = _kepler_equation(E, 0.0 * u.rad, ecc)
return M
def F_to_M(F, ecc):
"""Mean anomaly from eccentric anomaly.
Parameters
----------
F : float
Hyperbolic eccentric anomaly (rad).
ecc : float
Eccentricity (>1).
Returns
-------
M : float
Mean anomaly (rad).
"""
with u.set_enabled_equivalencies(u.dimensionless_angles()):
M = _kepler_equation_hyper(F, 0.0 * u.rad, ecc)
return M
def D_to_M(D, ecc):
"""Mean anomaly from eccentric anomaly.
Parameters
----------
D : float
Parabolic eccentric anomaly (rad).
ecc : float
Eccentricity.
Returns
-------
M : float
Mean anomaly (rad).
"""
with u.set_enabled_equivalencies(u.dimensionless_angles()):
M = _kepler_equation_parabolic(D, 0.0 * u.rad, ecc)
return M
def M_to_nu(M, ecc, delta=1e-2):
"""True anomaly from mean anomaly.
.. versionadded:: 0.4.0
Parameters
----------
M : float
Mean anomaly (rad).
ecc : float
Eccentricity.
delta : float (optional)
threshold of near-parabolic regime definition (from <NAME> et al)
Returns
-------
nu : float
True anomaly (rad).
Examples
--------
>>> nu = M_to_nu(np.radians(30.0), 0.06)
>>> np.rad2deg(nu)
33.673284930211658
"""
if ecc > 1 + delta:
F = M_to_F(M, ecc)
nu = F_to_nu(F, ecc)
elif ecc < 1 - delta:
E = M_to_E(M, ecc)
nu = E_to_nu(E, ecc)
else:
D = M_to_D(M, ecc)
nu = D_to_nu(D, ecc)
return nu
def nu_to_M(nu, ecc, delta=1e-2):
"""Mean anomaly from true anomaly.
.. versionadded:: 0.4.0
Parameters
----------
nu : float
True anomaly (rad).
ecc : float
Eccentricity.
Returns
-------
M : float
Mean anomaly (rad).
"""
if ecc > 1 + delta:
F = nu_to_F(nu, ecc)
M = F_to_M(F, ecc)
elif ecc < 1 - delta:
E = nu_to_E(nu, ecc)
M = E_to_M(E, ecc)
else:
D = nu_to_D(nu, ecc)
M = D_to_M(D, ecc)
return M
def fp_angle(nu, ecc):
"""Flight path angle.
.. versionadded:: 0.4.0
Parameters
----------
nu : float
True anomaly (rad).
ecc : float
Eccentricity.
Note
-----
Algorithm taken from Vallado 2007, pp. 113.
"""
return np.arctan2(ecc * np.sin(nu), 1 + ecc * np.cos(nu))
| [
"numpy.abs",
"numpy.tan",
"scipy.optimize.newton",
"numpy.arcsinh",
"numpy.cos",
"astropy.units.dimensionless_angles",
"numpy.cosh",
"numpy.arctan",
"numpy.sin",
"numpy.exp",
"numpy.sinh",
"numpy.sqrt"
] | [((3596, 3612), 'numpy.tan', 'np.tan', (['(nu / 2.0)'], {}), '(nu / 2.0)\n', (3602, 3612), True, 'import numpy as np\n'), ((2476, 2502), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (1.0 + ecc))'], {}), '(2.0 / (1.0 + ecc))\n', (2483, 2502), True, 'import numpy as np\n'), ((3068, 3080), 'numpy.arctan', 'np.arctan', (['D'], {}), '(D)\n', (3077, 3080), True, 'import numpy as np\n'), ((5781, 5856), 'scipy.optimize.newton', 'optimize.newton', (['_kepler_equation', 'M', '_kepler_equation_prime'], {'args': '(M, ecc)'}), '(_kepler_equation, M, _kepler_equation_prime, args=(M, ecc))\n', (5796, 5856), False, 'from scipy import optimize\n'), ((6894, 7010), 'scipy.optimize.newton', 'optimize.newton', (['_kepler_equation_parabolic', 'guess', '_kepler_equation_prime_parabolic'], {'args': '(M, ecc)', 'maxiter': '(100)'}), '(_kepler_equation_parabolic, guess,\n _kepler_equation_prime_parabolic, args=(M, ecc), maxiter=100)\n', (6909, 7010), False, 'from scipy import optimize\n'), ((241, 250), 'numpy.cos', 'np.cos', (['E'], {}), '(E)\n', (247, 250), True, 'import numpy as np\n'), ((393, 403), 'numpy.cosh', 'np.cosh', (['F'], {}), '(F)\n', (400, 403), True, 'import numpy as np\n'), ((1386, 1398), 'numpy.abs', 'np.abs', (['term'], {}), '(term)\n', (1392, 1398), True, 'import numpy as np\n'), ((1455, 1481), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (1.0 + ecc))'], {}), '(2.0 / (1.0 + ecc))\n', (1462, 1481), True, 'import numpy as np\n'), ((2401, 2413), 'numpy.abs', 'np.abs', (['term'], {}), '(term)\n', (2407, 2413), True, 'import numpy as np\n'), ((5226, 5250), 'astropy.units.dimensionless_angles', 'u.dimensionless_angles', ([], {}), '()\n', (5248, 5250), True, 'from astropy import units as u\n'), ((5742, 5766), 'astropy.units.dimensionless_angles', 'u.dimensionless_angles', ([], {}), '()\n', (5764, 5766), True, 'from astropy import units as u\n'), ((6214, 6238), 'astropy.units.dimensionless_angles', 'u.dimensionless_angles', ([], {}), '()\n', (6236, 6238), True, 'from astropy import units as u\n'), ((6293, 6312), 'numpy.arcsinh', 'np.arcsinh', (['(M / ecc)'], {}), '(M / ecc)\n', (6303, 6312), True, 'import numpy as np\n'), ((6727, 6751), 'astropy.units.dimensionless_angles', 'u.dimensionless_angles', ([], {}), '()\n', (6749, 6751), True, 'from astropy import units as u\n'), ((7372, 7396), 'astropy.units.dimensionless_angles', 'u.dimensionless_angles', ([], {}), '()\n', (7394, 7396), True, 'from astropy import units as u\n'), ((7773, 7797), 'astropy.units.dimensionless_angles', 'u.dimensionless_angles', ([], {}), '()\n', (7795, 7797), True, 'from astropy import units as u\n'), ((8174, 8198), 'astropy.units.dimensionless_angles', 'u.dimensionless_angles', ([], {}), '()\n', (8196, 8198), True, 'from astropy import units as u\n'), ((9857, 9867), 'numpy.sin', 'np.sin', (['nu'], {}), '(nu)\n', (9863, 9867), True, 'import numpy as np\n'), ((165, 174), 'numpy.sin', 'np.sin', (['E'], {}), '(E)\n', (171, 174), True, 'import numpy as np\n'), ((314, 324), 'numpy.sinh', 'np.sinh', (['F'], {}), '(F)\n', (321, 324), True, 'import numpy as np\n'), ((1488, 1519), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (1.0 + ecc) ** 3)'], {}), '(2.0 / (1.0 + ecc) ** 3)\n', (1495, 1519), True, 'import numpy as np\n'), ((2505, 2536), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (1.0 + ecc) ** 3)'], {}), '(2.0 / (1.0 + ecc) ** 3)\n', (2512, 2536), True, 'import numpy as np\n'), ((3919, 3949), 'numpy.sqrt', 'np.sqrt', (['((1 - ecc) / (1 + ecc))'], {}), '((1 - ecc) / (1 + ecc))\n', (3926, 3949), True, 'import numpy as np\n'), ((3952, 3966), 'numpy.tan', 'np.tan', (['(nu / 2)'], {}), '(nu / 2)\n', (3958, 3966), True, 'import numpy as np\n'), ((4840, 4870), 'numpy.sqrt', 'np.sqrt', (['((1 + ecc) / (1 - ecc))'], {}), '((1 + ecc) / (1 - ecc))\n', (4847, 4870), True, 'import numpy as np\n'), ((4873, 4886), 'numpy.tan', 'np.tan', (['(E / 2)'], {}), '(E / 2)\n', (4879, 4886), True, 'import numpy as np\n'), ((9879, 9889), 'numpy.cos', 'np.cos', (['nu'], {}), '(nu)\n', (9885, 9889), True, 'import numpy as np\n'), ((4380, 4396), 'numpy.sqrt', 'np.sqrt', (['(ecc + 1)'], {}), '(ecc + 1)\n', (4387, 4396), True, 'import numpy as np\n'), ((4452, 4468), 'numpy.sqrt', 'np.sqrt', (['(ecc + 1)'], {}), '(ecc + 1)\n', (4459, 4468), True, 'import numpy as np\n'), ((4399, 4415), 'numpy.sqrt', 'np.sqrt', (['(ecc - 1)'], {}), '(ecc - 1)\n', (4406, 4415), True, 'import numpy as np\n'), ((4418, 4432), 'numpy.tan', 'np.tan', (['(nu / 2)'], {}), '(nu / 2)\n', (4424, 4432), True, 'import numpy as np\n'), ((4471, 4487), 'numpy.sqrt', 'np.sqrt', (['(ecc - 1)'], {}), '(ecc - 1)\n', (4478, 4487), True, 'import numpy as np\n'), ((4490, 4504), 'numpy.tan', 'np.tan', (['(nu / 2)'], {}), '(nu / 2)\n', (4496, 4504), True, 'import numpy as np\n'), ((5312, 5328), 'numpy.sqrt', 'np.sqrt', (['(ecc + 1)'], {}), '(ecc + 1)\n', (5319, 5328), True, 'import numpy as np\n'), ((5391, 5407), 'numpy.sqrt', 'np.sqrt', (['(ecc - 1)'], {}), '(ecc - 1)\n', (5398, 5407), True, 'import numpy as np\n'), ((5281, 5290), 'numpy.exp', 'np.exp', (['F'], {}), '(F)\n', (5287, 5290), True, 'import numpy as np\n'), ((5293, 5309), 'numpy.sqrt', 'np.sqrt', (['(ecc + 1)'], {}), '(ecc + 1)\n', (5300, 5309), True, 'import numpy as np\n'), ((5360, 5369), 'numpy.exp', 'np.exp', (['F'], {}), '(F)\n', (5366, 5369), True, 'import numpy as np\n'), ((5372, 5388), 'numpy.sqrt', 'np.sqrt', (['(ecc - 1)'], {}), '(ecc - 1)\n', (5379, 5388), True, 'import numpy as np\n')] |
"""
MIT License
Copyright (c) 2020 <NAME> and <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import List, Any
import numpy as np
from .metrics import Metrics
from .components import IOMetrics, LRMetrics
class AdaS():
n_buffer = 2
def __init__(self, parameters: List[Any],
beta: float = 0.8, zeta: float = 1.,
p: int = 1, init_lr: float = 3e-2,
min_lr: float = 1e-20) -> None:
'''
parameters: list of torch.nn.Module.parameters()
beta: float: AdaS gain factor [0, 1)
eta: knowledge gain hyper-paramters [0, 1)
init_lr: initial learning rate > 0
min_lr: minimum possible learning rate > 0
'''
if beta < 0 or beta >= 1:
raise ValueError
# if zeta < 0 or zeta > 1:
# raise ValueError
if init_lr <= 0:
raise ValueError
if min_lr <= 0:
raise ValueError
self.metrics = metrics = Metrics(parameters=parameters, p=p)
self.init_lr = init_lr
self.min_lr = min_lr
self.beta = beta
self.zeta = zeta
self.historical_io_metrics = list()
init_lr_vector = np.repeat(a=init_lr,
repeats=len(metrics.layers_info))
self.lr_vector = init_lr_vector
self.velocity_moment_conv = np.zeros(metrics.number_of_conv)
self.acceleration_moment_conv = np.zeros(metrics.number_of_conv)
self.R_conv = np.zeros(metrics.number_of_conv)
self.velocity_moment_fc = np.zeros(metrics.number_of_fc)[0]
self.acceleration_moment_fc = np.zeros(metrics.number_of_fc)[0]
self.R_fc = np.zeros(metrics.number_of_fc)[0]
def step(self, epoch: int, io_metrics: IOMetrics = None) -> None:
if io_metrics is None:
io_metrics = self.metrics.evaluate(epoch)
self.historical_io_metrics.append(io_metrics)
if epoch == 0:
velocity_conv_rank = self.init_lr * \
np.ones(len(self.metrics.conv_indices))
velocity_fc_rank = self.init_lr * \
np.ones(len(self.metrics.fc_indices))[0]
# NOTE unused (below)
# acceleration_conv_rank = np.zeros(len(conv_indices))
# acceleration_fc_rank = np.zeros(len(fc_indices))[0]
# preserving_acceleration_conv = alpha
else:
n_replica = AdaS.n_buffer - min(epoch + 1, AdaS.n_buffer)
input_channel_replica = np.tile(
A=self.historical_io_metrics[0].input_channel_S,
reps=(n_replica, 1))
output_channel_replica = np.tile(
A=self.historical_io_metrics[0].output_channel_S,
reps=(n_replica, 1))
fc_channel_replica = np.tile(
A=self.historical_io_metrics[0].fc_S, reps=(n_replica, 1))
for iteration in range(AdaS.n_buffer - n_replica):
epoch_identifier = (epoch - AdaS.n_buffer +
n_replica + iteration + 1)
metric = self.historical_io_metrics[epoch_identifier]
input_channel_replica = np.concatenate((
input_channel_replica,
np.tile(
A=metric.input_channel_S,
reps=(1, 1))))
output_channel_replica = np.concatenate(
(output_channel_replica, np.tile(
A=metric.output_channel_S,
reps=(1, 1))))
fc_channel_replica = np.concatenate(
(fc_channel_replica, np.tile(
A=metric.fc_S,
reps=(1, 1))))
x_regression = np.linspace(start=0, stop=AdaS.n_buffer - 1,
num=AdaS.n_buffer)
channel_replica = (input_channel_replica +
output_channel_replica) / 2
# channel_replica = output_channel_replica
"""Calculate Rank Velocity"""
velocity_conv_rank = np.polyfit(
x=x_regression, y=channel_replica, deg=1)[0]
velocity_fc_rank = np.polyfit(
x=x_regression, y=fc_channel_replica, deg=1)[0][0]
self.R_conv = self.beta * self.R_conv + self.zeta * velocity_conv_rank
self.R_fc = self.beta * self.R_fc + self.zeta * velocity_fc_rank
self.R_conv = np.maximum(self.R_conv, self.min_lr)
self.R_fc = np.maximum(self.R_fc, self.min_lr)
call_indices_conv = np.concatenate(
(self.metrics.conv_indices, [self.metrics.fc_indices[0]]), axis=0)
for iteration_conv in range(len(call_indices_conv) - 1):
index_start = call_indices_conv[iteration_conv]
index_end = call_indices_conv[iteration_conv + 1]
self.lr_vector[index_start: index_end] = \
self.R_conv[iteration_conv]
call_indices_fc = np.concatenate(
(self.metrics.fc_indices,
[len(self.metrics.layers_info)]), axis=0)
for iteration_fc in range(len(call_indices_fc) - 1):
index_start = call_indices_fc[iteration_fc]
index_end = call_indices_fc[iteration_fc + 1]
self.lr_vector[index_start: index_end] = self.R_fc
return LRMetrics(rank_velocity=velocity_conv_rank.tolist(),
r_conv=self.R_conv.tolist())
| [
"numpy.maximum",
"numpy.polyfit",
"numpy.zeros",
"numpy.tile",
"numpy.linspace",
"numpy.concatenate"
] | [((2363, 2395), 'numpy.zeros', 'np.zeros', (['metrics.number_of_conv'], {}), '(metrics.number_of_conv)\n', (2371, 2395), True, 'import numpy as np\n'), ((2436, 2468), 'numpy.zeros', 'np.zeros', (['metrics.number_of_conv'], {}), '(metrics.number_of_conv)\n', (2444, 2468), True, 'import numpy as np\n'), ((2491, 2523), 'numpy.zeros', 'np.zeros', (['metrics.number_of_conv'], {}), '(metrics.number_of_conv)\n', (2499, 2523), True, 'import numpy as np\n'), ((5468, 5504), 'numpy.maximum', 'np.maximum', (['self.R_conv', 'self.min_lr'], {}), '(self.R_conv, self.min_lr)\n', (5478, 5504), True, 'import numpy as np\n'), ((5525, 5559), 'numpy.maximum', 'np.maximum', (['self.R_fc', 'self.min_lr'], {}), '(self.R_fc, self.min_lr)\n', (5535, 5559), True, 'import numpy as np\n'), ((5589, 5674), 'numpy.concatenate', 'np.concatenate', (['(self.metrics.conv_indices, [self.metrics.fc_indices[0]])'], {'axis': '(0)'}), '((self.metrics.conv_indices, [self.metrics.fc_indices[0]]),\n axis=0)\n', (5603, 5674), True, 'import numpy as np\n'), ((2558, 2588), 'numpy.zeros', 'np.zeros', (['metrics.number_of_fc'], {}), '(metrics.number_of_fc)\n', (2566, 2588), True, 'import numpy as np\n'), ((2630, 2660), 'numpy.zeros', 'np.zeros', (['metrics.number_of_fc'], {}), '(metrics.number_of_fc)\n', (2638, 2660), True, 'import numpy as np\n'), ((2684, 2714), 'numpy.zeros', 'np.zeros', (['metrics.number_of_fc'], {}), '(metrics.number_of_fc)\n', (2692, 2714), True, 'import numpy as np\n'), ((3500, 3577), 'numpy.tile', 'np.tile', ([], {'A': 'self.historical_io_metrics[0].input_channel_S', 'reps': '(n_replica, 1)'}), '(A=self.historical_io_metrics[0].input_channel_S, reps=(n_replica, 1))\n', (3507, 3577), True, 'import numpy as np\n'), ((3648, 3726), 'numpy.tile', 'np.tile', ([], {'A': 'self.historical_io_metrics[0].output_channel_S', 'reps': '(n_replica, 1)'}), '(A=self.historical_io_metrics[0].output_channel_S, reps=(n_replica, 1))\n', (3655, 3726), True, 'import numpy as np\n'), ((3793, 3859), 'numpy.tile', 'np.tile', ([], {'A': 'self.historical_io_metrics[0].fc_S', 'reps': '(n_replica, 1)'}), '(A=self.historical_io_metrics[0].fc_S, reps=(n_replica, 1))\n', (3800, 3859), True, 'import numpy as np\n'), ((4760, 4823), 'numpy.linspace', 'np.linspace', ([], {'start': '(0)', 'stop': '(AdaS.n_buffer - 1)', 'num': 'AdaS.n_buffer'}), '(start=0, stop=AdaS.n_buffer - 1, num=AdaS.n_buffer)\n', (4771, 4823), True, 'import numpy as np\n'), ((5109, 5161), 'numpy.polyfit', 'np.polyfit', ([], {'x': 'x_regression', 'y': 'channel_replica', 'deg': '(1)'}), '(x=x_regression, y=channel_replica, deg=1)\n', (5119, 5161), True, 'import numpy as np\n'), ((5213, 5268), 'numpy.polyfit', 'np.polyfit', ([], {'x': 'x_regression', 'y': 'fc_channel_replica', 'deg': '(1)'}), '(x=x_regression, y=fc_channel_replica, deg=1)\n', (5223, 5268), True, 'import numpy as np\n'), ((4253, 4299), 'numpy.tile', 'np.tile', ([], {'A': 'metric.input_channel_S', 'reps': '(1, 1)'}), '(A=metric.input_channel_S, reps=(1, 1))\n', (4260, 4299), True, 'import numpy as np\n'), ((4453, 4500), 'numpy.tile', 'np.tile', ([], {'A': 'metric.output_channel_S', 'reps': '(1, 1)'}), '(A=metric.output_channel_S, reps=(1, 1))\n', (4460, 4500), True, 'import numpy as np\n'), ((4646, 4681), 'numpy.tile', 'np.tile', ([], {'A': 'metric.fc_S', 'reps': '(1, 1)'}), '(A=metric.fc_S, reps=(1, 1))\n', (4653, 4681), True, 'import numpy as np\n')] |
import sys
import numpy as np
import numpy.random as rnd
from keras import backend as K
import src.model.objectives as obj
import src.utils.metrics as mtr
rnd.seed(123)
_EPSILON = K.epsilon()
allobj = [obj.mfom_eer_normalized,
obj.pooled_mfom_eer,
obj.mfom_microf1,
obj.mfom_macrof1,
obj.mfom_cprime]
def mfom_eer_normalized_np(y_true, y_pred):
"""
Class-wise MFoM-EER numpy version
"""
s = y_true.shape
y_true = np.reshape(y_true, (-1, s[-1]))
y_pred = np.reshape(y_pred, (-1, s[-1]))
y_neg = 1 - y_true
# number of positive samples per each class
P = np.sum(y_true, axis=0)
# number of negative samples per each class
N = np.sum(y_neg, axis=0)
# smooth false negative and false positive
fn = y_pred * y_true
fp = (1. - y_pred) * y_neg
fnr = np.log(np.sum(fn, axis=0) + 1.) - np.log(P + 1.)
fpr = np.log(np.sum(fp, axis=0) + 1.) - np.log(N + 1.)
fnr = np.exp(fnr)
fpr = np.exp(fpr)
smooth_eer = fpr + .5 * np.abs(fnr - fpr) # dim = number of classes
return np.mean(smooth_eer)
def pooled_mfom_eer_np(y_true, y_pred):
"""
Pooled MFoM-EER numpy version
"""
y_neg = 1 - y_true
# number of positive samples per each class
P = np.sum(y_true)
# number of negative samples per each class
N = np.sum(y_neg)
fn = y_pred * y_true
fp = (1. - y_pred) * y_neg
fnr = np.sum(fn) / P
fpr = np.sum(fp) / N
smooth_eer = fpr + .5 * np.abs(fnr - fpr)
return smooth_eer
def mfom_microf1_np(y_true, y_pred):
y_neg = 1 - y_true
tp = np.sum((1. - y_pred) * y_true)
fp = np.sum((1. - y_pred) * y_neg)
fn = np.sum(y_pred * y_true)
numen = 2. * tp
denum = fp + fn + 2. * tp
smooth_f1 = numen / denum
return 1.0 - smooth_f1
def mfom_macrof1_np(y_true, y_pred):
"""
Class-wise F1
"""
s = y_true.shape
y_true = np.reshape(y_true, (-1, s[-1]))
y_pred = np.reshape(y_pred, (-1, s[-1]))
y_neg = 1 - y_true
# smooth counters per class
tp = np.sum((1. - y_pred) * y_true, axis=0)
fn = np.sum(y_pred * y_true, axis=0)
fp = np.sum((1. - y_pred) * y_neg, axis=0)
numen = 2. * tp
denum = fp + fn + 2. * tp
smooth_f1 = np.exp(np.log(numen + 1.) - np.log(denum + 1.))
error_f1 = 1.0 - smooth_f1
return np.mean(error_f1)
def mfom_cprime_np(y_true, y_pred, ptar=0.01):
y_neg = 1 - y_true
P = np.sum(y_true)
N = np.sum(y_neg)
fn = y_pred * y_true
fp = (1. - y_pred) * y_neg
# === pooled
fnr = np.sum(fn) / P
fpr = np.sum(fp) / N
smooth_eer = ptar * fnr + (1. - ptar) * fpr
return smooth_eer
def check_shape(shape, fun):
y_true = rnd.choice([0, 1], size=shape, p=[2. / 3, 1. / 3])
y_pred = rnd.uniform(0, 1, shape)
fun_np = getattr(sys.modules[__name__], fun.__name__ + '_np')
res_np = fun_np(y_true, y_pred)
res_k = K.eval(fun(K.variable(y_true), K.variable(y_pred)))
print(res_k, res_np)
assert res_k.shape == res_np.shape
assert np.isclose(res_k, res_np)
print('pEER: %.3f' % mtr.eer(y_true.flatten(), y_pred.flatten()))
def test_objective_shapes():
shape_list = [(6), (6, 7), (5, 6, 7), (8, 5, 6, 7), (9, 8, 5, 6, 7)]
for sh in shape_list:
for fun in allobj:
print(fun.__name__)
check_shape(sh, fun)
print('=' * 10)
if __name__ == '__main__':
test_objective_shapes()
| [
"numpy.random.uniform",
"numpy.sum",
"numpy.random.seed",
"numpy.log",
"numpy.abs",
"keras.backend.epsilon",
"numpy.isclose",
"numpy.mean",
"numpy.exp",
"numpy.reshape",
"numpy.random.choice",
"keras.backend.variable"
] | [((156, 169), 'numpy.random.seed', 'rnd.seed', (['(123)'], {}), '(123)\n', (164, 169), True, 'import numpy.random as rnd\n'), ((181, 192), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (190, 192), True, 'from keras import backend as K\n'), ((476, 507), 'numpy.reshape', 'np.reshape', (['y_true', '(-1, s[-1])'], {}), '(y_true, (-1, s[-1]))\n', (486, 507), True, 'import numpy as np\n'), ((521, 552), 'numpy.reshape', 'np.reshape', (['y_pred', '(-1, s[-1])'], {}), '(y_pred, (-1, s[-1]))\n', (531, 552), True, 'import numpy as np\n'), ((632, 654), 'numpy.sum', 'np.sum', (['y_true'], {'axis': '(0)'}), '(y_true, axis=0)\n', (638, 654), True, 'import numpy as np\n'), ((711, 732), 'numpy.sum', 'np.sum', (['y_neg'], {'axis': '(0)'}), '(y_neg, axis=0)\n', (717, 732), True, 'import numpy as np\n'), ((964, 975), 'numpy.exp', 'np.exp', (['fnr'], {}), '(fnr)\n', (970, 975), True, 'import numpy as np\n'), ((986, 997), 'numpy.exp', 'np.exp', (['fpr'], {}), '(fpr)\n', (992, 997), True, 'import numpy as np\n'), ((1082, 1101), 'numpy.mean', 'np.mean', (['smooth_eer'], {}), '(smooth_eer)\n', (1089, 1101), True, 'import numpy as np\n'), ((1273, 1287), 'numpy.sum', 'np.sum', (['y_true'], {}), '(y_true)\n', (1279, 1287), True, 'import numpy as np\n'), ((1344, 1357), 'numpy.sum', 'np.sum', (['y_neg'], {}), '(y_neg)\n', (1350, 1357), True, 'import numpy as np\n'), ((1603, 1634), 'numpy.sum', 'np.sum', (['((1.0 - y_pred) * y_true)'], {}), '((1.0 - y_pred) * y_true)\n', (1609, 1634), True, 'import numpy as np\n'), ((1643, 1673), 'numpy.sum', 'np.sum', (['((1.0 - y_pred) * y_neg)'], {}), '((1.0 - y_pred) * y_neg)\n', (1649, 1673), True, 'import numpy as np\n'), ((1682, 1705), 'numpy.sum', 'np.sum', (['(y_pred * y_true)'], {}), '(y_pred * y_true)\n', (1688, 1705), True, 'import numpy as np\n'), ((1924, 1955), 'numpy.reshape', 'np.reshape', (['y_true', '(-1, s[-1])'], {}), '(y_true, (-1, s[-1]))\n', (1934, 1955), True, 'import numpy as np\n'), ((1969, 2000), 'numpy.reshape', 'np.reshape', (['y_pred', '(-1, s[-1])'], {}), '(y_pred, (-1, s[-1]))\n', (1979, 2000), True, 'import numpy as np\n'), ((2065, 2104), 'numpy.sum', 'np.sum', (['((1.0 - y_pred) * y_true)'], {'axis': '(0)'}), '((1.0 - y_pred) * y_true, axis=0)\n', (2071, 2104), True, 'import numpy as np\n'), ((2113, 2144), 'numpy.sum', 'np.sum', (['(y_pred * y_true)'], {'axis': '(0)'}), '(y_pred * y_true, axis=0)\n', (2119, 2144), True, 'import numpy as np\n'), ((2154, 2192), 'numpy.sum', 'np.sum', (['((1.0 - y_pred) * y_neg)'], {'axis': '(0)'}), '((1.0 - y_pred) * y_neg, axis=0)\n', (2160, 2192), True, 'import numpy as np\n'), ((2349, 2366), 'numpy.mean', 'np.mean', (['error_f1'], {}), '(error_f1)\n', (2356, 2366), True, 'import numpy as np\n'), ((2447, 2461), 'numpy.sum', 'np.sum', (['y_true'], {}), '(y_true)\n', (2453, 2461), True, 'import numpy as np\n'), ((2470, 2483), 'numpy.sum', 'np.sum', (['y_neg'], {}), '(y_neg)\n', (2476, 2483), True, 'import numpy as np\n'), ((2721, 2773), 'numpy.random.choice', 'rnd.choice', (['[0, 1]'], {'size': 'shape', 'p': '[2.0 / 3, 1.0 / 3]'}), '([0, 1], size=shape, p=[2.0 / 3, 1.0 / 3])\n', (2731, 2773), True, 'import numpy.random as rnd\n'), ((2785, 2809), 'numpy.random.uniform', 'rnd.uniform', (['(0)', '(1)', 'shape'], {}), '(0, 1, shape)\n', (2796, 2809), True, 'import numpy.random as rnd\n'), ((3052, 3077), 'numpy.isclose', 'np.isclose', (['res_k', 'res_np'], {}), '(res_k, res_np)\n', (3062, 3077), True, 'import numpy as np\n'), ((880, 895), 'numpy.log', 'np.log', (['(P + 1.0)'], {}), '(P + 1.0)\n', (886, 895), True, 'import numpy as np\n'), ((939, 954), 'numpy.log', 'np.log', (['(N + 1.0)'], {}), '(N + 1.0)\n', (945, 954), True, 'import numpy as np\n'), ((1424, 1434), 'numpy.sum', 'np.sum', (['fn'], {}), '(fn)\n', (1430, 1434), True, 'import numpy as np\n'), ((1449, 1459), 'numpy.sum', 'np.sum', (['fp'], {}), '(fp)\n', (1455, 1459), True, 'import numpy as np\n'), ((2567, 2577), 'numpy.sum', 'np.sum', (['fn'], {}), '(fn)\n', (2573, 2577), True, 'import numpy as np\n'), ((2592, 2602), 'numpy.sum', 'np.sum', (['fp'], {}), '(fp)\n', (2598, 2602), True, 'import numpy as np\n'), ((1026, 1043), 'numpy.abs', 'np.abs', (['(fnr - fpr)'], {}), '(fnr - fpr)\n', (1032, 1043), True, 'import numpy as np\n'), ((1492, 1509), 'numpy.abs', 'np.abs', (['(fnr - fpr)'], {}), '(fnr - fpr)\n', (1498, 1509), True, 'import numpy as np\n'), ((2266, 2285), 'numpy.log', 'np.log', (['(numen + 1.0)'], {}), '(numen + 1.0)\n', (2272, 2285), True, 'import numpy as np\n'), ((2287, 2306), 'numpy.log', 'np.log', (['(denum + 1.0)'], {}), '(denum + 1.0)\n', (2293, 2306), True, 'import numpy as np\n'), ((2935, 2953), 'keras.backend.variable', 'K.variable', (['y_true'], {}), '(y_true)\n', (2945, 2953), True, 'from keras import backend as K\n'), ((2955, 2973), 'keras.backend.variable', 'K.variable', (['y_pred'], {}), '(y_pred)\n', (2965, 2973), True, 'from keras import backend as K\n'), ((853, 871), 'numpy.sum', 'np.sum', (['fn'], {'axis': '(0)'}), '(fn, axis=0)\n', (859, 871), True, 'import numpy as np\n'), ((912, 930), 'numpy.sum', 'np.sum', (['fp'], {'axis': '(0)'}), '(fp, axis=0)\n', (918, 930), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 11 16:39:01 2018
@author: gotamist
"""
import numpy as np
from data_generator import AudioGenerator
#import re
from itertools import chain
#from textblob import TextBlob as tb
#import kenlm
import panphon.distance
from fuzzy import DMetaphone
import pyphen
def create_DMetaphone_list(wordset):
dm = DMetaphone(5)
codedict = {}
codeset = set()
for word in wordset:
strings2 = [ str(code)[2:-1] for code in dm(word) if code is not None ]
codedict[word] = strings2
codeset.update( set(strings2) )
return codedict, codeset
def code2words( code_dict, codeset ):
''' create a lookup table where for every Metaphone code, you store all
the words that have that code. The inputs, code_dict and codeset, come
for a specific corpus and are created by create_DMetaphone_list()'''
c2w_dict = {}
for word, code_list in code_dict.items():
for code in code_list:
if code in c2w_dict.keys():
c2w_dict[ code ].append(word)
else:
c2w_dict[ code ] = [word]
return c2w_dict
def num_syllables_in_word(word):
dic = pyphen.Pyphen(lang='en')
x=dic.inserted( word )
return x.count('-')+1
def count_syllables_using_vowels( word ):
vowels = ['a','e','i','o','u']
count = 0
for i in range(len(word)):
if word[i] in vowels:
count += 1
if i > 2:
if word[i] == 'y' and word[i-1] not in vowels:
count += 1
return count
def sound_neighborhood( string, codeset, code2words_dict, edit_dist ):
'''Find a phonological neighborhood around given string from a
given codeset and code2words dictionary (both of which correspond to a corpus)'''
dm = DMetaphone(5)
dm_codes = [ str(code)[2:-1] for code in dm(string) if code is not None ]
code_nbd = set()
for code in dm_codes:
code_nbd.update( get_neighborhood( code, codeset, edit_dist) )
sound_nbd = []
for similar_code in code_nbd:
sound_nbd.extend( code2words_dict[similar_code] )
return sound_nbd
def sound_bigram_predict(input_sentence, train_dictionary, predict_dictionary, lmodel, radius=1.5): #input is a string
# assumes that the output of the DNN is of the right length
codedict, codeset = create_DMetaphone_list(predict_dictionary)
code2words_dict = code2words( codedict, codeset )
inp = input_sentence.split()
nbd0 = [ inp[0] ] if inp[0] in train_dictionary else sound_neighborhood( inp[0], codeset, code2words_dict, radius )
nbd1 = sound_neighborhood( inp[1], codeset, code2words_dict, radius )
tg={}
for first_word in nbd0:
for second_word in nbd1:
bigram = first_word+' '+second_word #+' '+third_word
tg[ bigram ]=lmodel.score( bigram, bos = True, eos = False)
pred=max(tg, key=tg.get)
output = pred.split()
for i in range(2,len(inp)):
phrases={}
nbd = [ inp[i] ] if inp[i] in train_dictionary else sound_neighborhood( inp[i], codeset, code2words_dict, radius )
for word in nbd:
candidate=output[-1]+' '+word
phrases[ word ]=lmodel.score( candidate, bos = False, eos = False)
next_word=max(phrases, key=phrases.get)
output.append( next_word )
pred=pred+' '+next_word
return pred
def sound_trigram_predict(input_sentence, train_dictionary, predict_dictionary, lmodel, radius=1.5): #input is a string
#assumes that the output of the DNN is of the right length
codedict, codeset = create_DMetaphone_list(predict_dictionary)
code2words_dict = code2words( codedict, codeset )
inp = input_sentence.split()
#construct the first trigram
#Note that the shortest sentence in this dataset has three words
nbd0 = [ inp[0] ] if inp[0] in train_dictionary else sound_neighborhood( inp[0], codeset, code2words_dict, radius )
nbd1 = sound_neighborhood( inp[1], codeset, code2words_dict, radius )
nbd2 = sound_neighborhood( inp[2], codeset, code2words_dict, radius )
tg={}
for first_word in nbd0:
for second_word in nbd1:
for third_word in nbd2:
trigram = first_word+' '+second_word+' '+third_word
tg[ trigram ]=lmodel.score(trigram, bos = True, eos = False)
pred=max(tg, key=tg.get)
output = pred.split()
for i in range(3,len(inp)):
phrases={}
nbd = [ inp[i] ] if inp[i] in train_dictionary else sound_neighborhood( inp[i], codeset, code2words_dict, radius )
# nbd = get_neighborhood( inp[i], dictionary, 2)
for word in nbd:
candidate=output[-2]+' '+output[-1]+' '+word
phrases[ word ]=lmodel.score( candidate, bos = False, eos = False)
next_word=max(phrases, key=phrases.get)
output.append( next_word )
pred=pred+' '+next_word
return pred
def levenshtein(seq1, seq2):
# Thanks for this function to <NAME> at
# https://stackabuse.com/levenshtein-distance-and-text-similarity-in-python/
size_x = len(seq1) + 1
size_y = len(seq2) + 1
matrix = np.zeros ((size_x, size_y))
for x in range(size_x):
matrix [x, 0] = x
for y in range(size_y):
matrix [0, y] = y
for x in range(1, size_x):
for y in range(1, size_y):
if seq1[x-1] == seq2[y-1]:
matrix [x,y] = min(
matrix[x-1, y] + 1,
matrix[x-1, y-1],
matrix[x, y-1] + 1
)
else:
matrix [x,y] = min(
matrix[x-1,y] + 1,
matrix[x-1,y-1] + 1,
matrix[x,y-1] + 1
)
# print (matrix)
return (matrix[size_x - 1, size_y - 1])
def generate_corpus(desc_file):
#outputs a list of sentences
data_sentences = AudioGenerator()
data_sentences.load_train_data(desc_file=desc_file)
sentences = data_sentences.train_texts
return sentences
def wordset_from_corpus(sent_list):
word_list = []
for sent in sent_list:
word_list.append(sent.split())
long_word_list = [word for word in chain.from_iterable(word_list)]
# words = re.findall('\w+', long_string)
return set(long_word_list)
#st = generate_corpus("./train_corpus.json")
#train_words = wordset_from_corpus(st)
#valid_words = wordset_from_corpus(generate_corpus("./valid_corpus.json") )
#unseen_words =[word for word in valid_words if word not in train_words]
#hmmm...733 such unseen words (not many are proper nouns).
# Need a larger wordset than just the train_words (try nltk)
#st_small= generate_corpus("./small_train_corpus.json")
#with open('small_corpus_lines.txt', 'w') as filehandle:
# filehandle.writelines("%s\n" % sentence for sentence in st_small)
def get_neighborhood(string, wordset, distance):
"""Finds all words from a set of words that are within a specified Levenshtein
Distance from a given string"""
nbd = [word for word in wordset if levenshtein(string, word) <= distance ]
return set( nbd )
def dolgopolsky_neighborhood(string, wordset, distance):
"""Finds all words from a set of words that are within a specified Dolgopolsky
Distance from a given string"""
dst = panphon.distance.Distance()
nbd = [word for word in wordset if dst.dogol_prime_distance(string, word) <= distance ]
return set( nbd )
#nbd = get_neighborhood('helium', train_words, 2) #tested, 5 words found
#nbd = get_neighborhood('helium', english, 2) #tested, 43 words found
#sample_true = 'far up the lake eighteen miles above the town the eye of this cheerful camp follower of booms had spied out a graft'
#sample_input_sent = 'far ut the lake eightteen mils abo the town to ey of dis cherple can flolowor o bons had xpide ut a graft'
#sample_blob='far ut the lake eighteen miss ago the town to by of dis chere can follower o bons had side ut a graft'
#inp = input_sent.split()
#kenmodel = kenlm.Model('corpus_360_lines.arpa')
#ken5model = kenlm.Model('5_gram_corpus_360.binary')
def lm_predict(input_sentence, dictionary, lmodel): #input is a string
#assumes that the output of the DNN is of the right length
"""this function keeps adding words that maximize the probability of sentence
all the way from the beginning until the new word"""
inp = input_sentence.split()
#construct the first trigram
#Note that the shortest sentence in this dataset has three words
nbd0 = get_neighborhood( inp[0], dictionary, 2)
nbd1 = get_neighborhood( inp[1], dictionary, 2)
nbd2 = get_neighborhood( inp[2], dictionary, 2)
tg={}
for first_word in nbd0:
for second_word in nbd1:
for third_word in nbd2:
trigram = first_word+' '+second_word+' '+third_word
tg[ trigram ]=lmodel.score(trigram, bos = True, eos = False)
pred=max(tg, key=tg.get)
for i in range(3,len(inp)):
phrases={}
nbd = [ inp[i] ] if inp[i] in dictionary else get_neighborhood( inp[i], dictionary, 2)
nbd = get_neighborhood( inp[i], dictionary, 2)
for word in nbd:
candidate=pred+' '+word
phrases[ candidate ]=lmodel.score( candidate, bos = True, eos = False)
pred=max(phrases, key=phrases.get)
return pred
# 'far to the lake eighteen pins so the town to ku of dis cheaply can o fans had pile ku a graft'
def trigram_predict(input_sentence, train_dictionary, predict_dictionary, lmodel, radius=1.5): #input is a string
#assumes that the output of the DNN is of the right length
inp = input_sentence.split()
#construct the first trigram
#Note that the shortest sentence in this dataset has three words
nbd0 = [ inp[0] ] if inp[0] in train_dictionary else get_neighborhood( inp[0], predict_dictionary, radius)
nbd1 = get_neighborhood( inp[1], predict_dictionary, radius)
nbd2 = get_neighborhood( inp[2], predict_dictionary, radius)
tg={}
for first_word in nbd0:
for second_word in nbd1:
for third_word in nbd2:
trigram = first_word+' '+second_word+' '+third_word
tg[ trigram ]=lmodel.score(trigram, bos = True, eos = False)
pred=max(tg, key=tg.get)
output = pred.split()
for i in range(3,len(inp)):
phrases={}
nbd = [ inp[i] ] if inp[i] in train_dictionary else get_neighborhood( inp[i], predict_dictionary, radius)
# nbd = get_neighborhood( inp[i], dictionary, 2)
for word in nbd:
candidate=output[-2]+' '+output[-1]+' '+word
phrases[ word ]=lmodel.score( candidate, bos = False, eos = False)
next_word=max(phrases, key=phrases.get)
output.append( next_word )
pred=pred+' '+next_word
return pred
# 'far to the lake eighteen miles above the town to be of dis cheaply can can o one had side of a graft'
def cumul_sweep(input_sentence, intermediate, dictionary):
inp = input_sentence.split()
inter=intermediate.split()
for i in range(3,len(inp)):
phrases={}
nbd = get_neighborhood( inter[i], dictionary, 2)
u_nbd=nbd.union( get_neighborhood( inp[i], dictionary, 2) )
for word in u_nbd:
candidate=pred+' '+word
phrases[ word ]=lmodel.score( candidate, bos = False, eos = False)
next_word=max(phrases, key=phrases.get)
pred=pred+' '+next_word
return pred
def trigram_dolgopolsky_predict(input_sentence, train_dictionary, predict_dictionary, lmodel, radius=1.5): #input is a string
#assumes that the output of the DNN is of the right length
inp = input_sentence.split()
#construct the first trigram
#Note that the shortest sentence in this dataset has three words
#for the second word, use bigram prob from kenlm
nbd0 = [ inp[0] ] if inp[0] in train_dictionary else dolgopolsky_neighborhood( inp[0], predict_dictionary, radius)
nbd1 = dolgopolsky_neighborhood( inp[1], predict_dictionary, radius)
nbd2 = dolgopolsky_neighborhood( inp[2], predict_dictionary, radius)
tg={}
for first_word in nbd0:
for second_word in nbd1:
for third_word in nbd2:
trigram = first_word+' '+second_word+' '+third_word
tg[ trigram ]=lmodel.score(trigram, bos = True, eos = False)
pred=max(tg, key=tg.get)
output = pred.split()
for i in range(3,len(inp)):
phrases={}
nbd = [ inp[i] ] if inp[i] in train_dictionary else dolgopolsky_neighborhood( inp[i], predict_dictionary, radius)
for word in nbd:
candidate=output[-2]+' '+output[-1]+' '+word
phrases[ word ]=lmodel.score( candidate, bos = False, eos = False)
next_word=max(phrases, key=phrases.get)
output.append( next_word )
pred=pred+' '+next_word
return pred
#test_dolgo=trigram_dolgoposlky_predict(sample_input_sent, train_dictionary=train_words, predict_dictionary=english, radius=1.5)
#print( test_dolgo )
def bigram_predict(input_sentence, train_dictionary, predict_dictionary, lmodel, radius=1.5): #input is a string
#assumes that the output of the DNN is of the right length
inp = input_sentence.split()
nbd0 = [ inp[0] ] if inp[0] in train_dictionary else get_neighborhood( inp[0], predict_dictionary, radius)
nbd1 = get_neighborhood( inp[1], predict_dictionary, radius)
# nbd2 = get_neighborhood( inp[2], predict_dictionary, radius)
tg={}
for first_word in nbd0:
for second_word in nbd1:
# for third_word in nbd2:
bigram = first_word+' '+second_word #+' '+third_word
tg[ bigram ]=lmodel.score( bigram, bos = True, eos = False)
pred=max(tg, key=tg.get)
output = pred.split()
for i in range(2,len(inp)):
phrases={}
nbd = [ inp[i] ] if inp[i] in train_dictionary else get_neighborhood( inp[i], predict_dictionary, radius)
for word in nbd:
candidate=output[-1]+' '+word
phrases[ word ]=lmodel.score( candidate, bos = False, eos = False)
next_word=max(phrases, key=phrases.get)
output.append( next_word )
pred=pred+' '+next_word
return pred
def bigram_dolgopolsky_predict(input_sentence, train_dictionary, predict_dictionary, lmodel, radius=1.5): #input is a string
#assumes that the output of the DNN is of the right length
inp = input_sentence.split()
#construct the first trigram
#Note that the shortest sentence in this dataset has three words
#for the second word, use bigram prob from kenlm
nbd0 = [ inp[0] ] if inp[0] in train_dictionary else dolgopolsky_neighborhood( inp[0], predict_dictionary, radius)
nbd1 = dolgopolsky_neighborhood( inp[1], predict_dictionary, radius)
# nbd2 = dolgopolsky_neighborhood( inp[2], predict_dictionary, radius)
tg={}
for first_word in nbd0:
for second_word in nbd1:
# for third_word in nbd2:
bigram = first_word+' '+second_word
tg[ bigram ]=lmodel.score(bigram, bos = True, eos = False)
pred=max(tg, key=tg.get)
output = pred.split()
for i in range(3,len(inp)):
phrases={}
nbd = [ inp[i] ] if inp[i] in train_dictionary else dolgopolsky_neighborhood( inp[i], predict_dictionary, radius)
for word in nbd:
candidate=output[-1]+' '+word
phrases[ word ]=lmodel.score( candidate, bos = False, eos = False)
next_word=max(phrases, key=phrases.get)
output.append( next_word )
pred=pred+' '+next_word
return pred
#for key, value in sorted(bg_scores.iteritems(), key=lambda (k,v): (v,k)):
# print "%s: %s" % (key, value)
#newD = dict(sorted(bg.items(), key=operator.itemgetter(1), reverse=True)[:5])
#x = sorted(tg, key=tg.get, reverse=True)[:5]
## use the lines below to generate the txt on which to train kenlm
## the arpa file will be generated from this
#with open('corpus_360_lines.txt', 'w') as filehandle:
# filehandle.writelines("%s\n" % sentence for sentence in st)
#Test kenlm using the python module contributed to kenlm by <NAME>.
# pip install https://github.com/kpu/kenlm/archive/master.zip
# see more here https://github.com/kpu/kenlm
#import kenlm
#model = kenlm.Model('corpus_360_lines.arpa')
# print(model.score('play the matter', bos = True, eos = True))
| [
"numpy.zeros",
"pyphen.Pyphen",
"data_generator.AudioGenerator",
"fuzzy.DMetaphone",
"itertools.chain.from_iterable"
] | [((375, 388), 'fuzzy.DMetaphone', 'DMetaphone', (['(5)'], {}), '(5)\n', (385, 388), False, 'from fuzzy import DMetaphone\n'), ((1234, 1258), 'pyphen.Pyphen', 'pyphen.Pyphen', ([], {'lang': '"""en"""'}), "(lang='en')\n", (1247, 1258), False, 'import pyphen\n'), ((1858, 1871), 'fuzzy.DMetaphone', 'DMetaphone', (['(5)'], {}), '(5)\n', (1868, 1871), False, 'from fuzzy import DMetaphone\n'), ((5305, 5331), 'numpy.zeros', 'np.zeros', (['(size_x, size_y)'], {}), '((size_x, size_y))\n', (5313, 5331), True, 'import numpy as np\n'), ((6059, 6075), 'data_generator.AudioGenerator', 'AudioGenerator', ([], {}), '()\n', (6073, 6075), False, 'from data_generator import AudioGenerator\n'), ((6357, 6387), 'itertools.chain.from_iterable', 'chain.from_iterable', (['word_list'], {}), '(word_list)\n', (6376, 6387), False, 'from itertools import chain\n')] |
import numpy as np
from tqdm import tqdm
# import scipy.misc as scm
from PIL import Image
from matplotlib.pyplot import imread
import os
from utils import crop_center
import h5py
import pdb
from utils import read_data_file
class DataLoader:
def __init__(self):
return
def load_images_and_labels(self, imgs_names, image_dir, n_class, file_names_dict, num_channel=3,
do_center_crop=False):
print("Error! load_images_and_labels should be overwritten in child class")
raise NotImplementedError
class ArrayLoader(DataLoader):
def __init__(self, images, labels, input_size=64):
DataLoader.__init__(self)
self.images = images
self.labels = labels
self.input_size = input_size
def load_images_and_labels(self, imgs_names, image_dir, n_class, file_names_dict, num_channel=3,
do_center_crop=False):
# img_names is the indices of images/labels to be returned
del image_dir, n_class, file_names_dict, num_channel, do_center_crop
return self.images[imgs_names], self.labels[imgs_names]
class ImageLabelLoader(DataLoader):
def __init__(self, input_size=128):
DataLoader.__init__(self)
self.input_size = input_size
def load_images_and_labels(self, imgs_names, image_dir, n_class, file_names_dict, num_channel=3,
do_center_crop=False):
imgs = np.zeros((imgs_names.shape[0], self.input_size, self.input_size, num_channel), dtype=np.float32)
labels = np.zeros((imgs_names.shape[0], n_class), dtype=np.float32)
for i, img_name in tqdm(enumerate(imgs_names)):
img = imread(os.path.join(image_dir, img_name))
if do_center_crop and self.input_size == 128:
img = crop_center(img, 150, 150)
img = np.array(Image.fromarray(img).resize((self.input_size, self.input_size)))
# img = scm.imresize(img, [self.input_size, self.input_size, num_channel]) # not supported by scipy>=1.4
img = np.reshape(img, [self.input_size, self.input_size, num_channel])
img = img / 255.0
img = img - 0.5
img = img * 2.0
imgs[i] = img
try:
labels[i] = file_names_dict[img_name]
except:
print(img_name)
labels[np.where(labels == -1)] = 0
return imgs, labels
class ShapesLoader(DataLoader):
def __init__(self, dbg_mode=False, dbg_size=32,
dbg_image_label_dict='./output/classifier/shapes-redcolor/explainer_input/list_attr_3_5000.txt',
dbg_img_indices=[]):
self.input_size = 64
shapes_dir = os.path.join('data', 'shapes')
self.dbg_mode = dbg_mode
dataset = h5py.File(os.path.join(shapes_dir, '3dshapes.h5'), 'r')
if self.dbg_mode:
print('Debug mode activated. #{} samples from the shapes dataset will be considered.'.format(dbg_size))
if len(dbg_img_indices) == 0:
_, file_names_dict = read_data_file(dbg_image_label_dict)
_tmp_list = list(file_names_dict.keys())[:dbg_size]
else:
_tmp_list = dbg_img_indices[:dbg_size]
self.tmp_list = list(np.sort([int(ind) for ind in _tmp_list]))
self.images = np.array(dataset['images'][self.tmp_list])
else:
self.images = np.array(dataset['images']) # array shape [480000, 64, 64, 3], uint8 in range(256)
self.images = self.images / 255.0
self.images = self.images - 0.5
self.images = self.images * 2.0
self.attributes = np.array(dataset['labels'])
self._image_shape = self.images.shape[1:] # [64, 64, 3]
self._label_shape = self.attributes.shape[1:] # [6]
self._n_samples = self.attributes.shape[0] # 10 * 10 * 10 * 8 * 4 * 15 = 480000
self._FACTORS_IN_ORDER = ['floor_hue', 'wall_hue', 'object_hue', 'scale', 'shape',
'orientation']
self._NUM_VALUES_PER_FACTOR = {'floor_hue': 10, 'wall_hue': 10, 'object_hue': 10,
'scale': 8, 'shape': 4, 'orientation': 15}
# same color label
# self.labels = (self.attributes[:, 0] == self.attributes[:, 1]) & (
# self.attributes[:, 0] == self.attributes[:, 2])
def load_images_and_labels(self, imgs_names, image_dir, n_class, file_names_dict, num_channel=3,
do_center_crop=False):
assert n_class == 1
assert num_channel == 3
del image_dir, do_center_crop
# Currently not handling resizing etc
# cur_labels = self.labels[imgs_names]
labels = np.zeros((imgs_names.shape[0], n_class), dtype=np.float32)
for i, img_name in tqdm(enumerate(imgs_names)):
labels[i] = file_names_dict[str(img_name)]
if self.dbg_mode:
tmp_inds = [self.tmp_list.index(int(ind)) for ind in imgs_names]
return self.images[tmp_inds], labels
else:
return self.images[imgs_names.astype(np.int32)], labels
| [
"utils.crop_center",
"numpy.zeros",
"utils.read_data_file",
"numpy.where",
"numpy.array",
"numpy.reshape",
"PIL.Image.fromarray",
"os.path.join"
] | [((1458, 1558), 'numpy.zeros', 'np.zeros', (['(imgs_names.shape[0], self.input_size, self.input_size, num_channel)'], {'dtype': 'np.float32'}), '((imgs_names.shape[0], self.input_size, self.input_size,\n num_channel), dtype=np.float32)\n', (1466, 1558), True, 'import numpy as np\n'), ((1572, 1630), 'numpy.zeros', 'np.zeros', (['(imgs_names.shape[0], n_class)'], {'dtype': 'np.float32'}), '((imgs_names.shape[0], n_class), dtype=np.float32)\n', (1580, 1630), True, 'import numpy as np\n'), ((2741, 2771), 'os.path.join', 'os.path.join', (['"""data"""', '"""shapes"""'], {}), "('data', 'shapes')\n", (2753, 2771), False, 'import os\n'), ((3694, 3721), 'numpy.array', 'np.array', (["dataset['labels']"], {}), "(dataset['labels'])\n", (3702, 3721), True, 'import numpy as np\n'), ((4788, 4846), 'numpy.zeros', 'np.zeros', (['(imgs_names.shape[0], n_class)'], {'dtype': 'np.float32'}), '((imgs_names.shape[0], n_class), dtype=np.float32)\n', (4796, 4846), True, 'import numpy as np\n'), ((2082, 2146), 'numpy.reshape', 'np.reshape', (['img', '[self.input_size, self.input_size, num_channel]'], {}), '(img, [self.input_size, self.input_size, num_channel])\n', (2092, 2146), True, 'import numpy as np\n'), ((2397, 2419), 'numpy.where', 'np.where', (['(labels == -1)'], {}), '(labels == -1)\n', (2405, 2419), True, 'import numpy as np\n'), ((2833, 2872), 'os.path.join', 'os.path.join', (['shapes_dir', '"""3dshapes.h5"""'], {}), "(shapes_dir, '3dshapes.h5')\n", (2845, 2872), False, 'import os\n'), ((3379, 3421), 'numpy.array', 'np.array', (["dataset['images'][self.tmp_list]"], {}), "(dataset['images'][self.tmp_list])\n", (3387, 3421), True, 'import numpy as np\n'), ((3462, 3489), 'numpy.array', 'np.array', (["dataset['images']"], {}), "(dataset['images'])\n", (3470, 3489), True, 'import numpy as np\n'), ((1713, 1746), 'os.path.join', 'os.path.join', (['image_dir', 'img_name'], {}), '(image_dir, img_name)\n', (1725, 1746), False, 'import os\n'), ((1828, 1854), 'utils.crop_center', 'crop_center', (['img', '(150)', '(150)'], {}), '(img, 150, 150)\n', (1839, 1854), False, 'from utils import crop_center\n'), ((3100, 3136), 'utils.read_data_file', 'read_data_file', (['dbg_image_label_dict'], {}), '(dbg_image_label_dict)\n', (3114, 3136), False, 'from utils import read_data_file\n'), ((1882, 1902), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (1897, 1902), False, 'from PIL import Image\n')] |
#!/usr/bin/env python
"""how_much_cpu.py for Ramses
by <NAME>
This script looks at PBS outputs in the current working directory, assuming they are generated by
Ramses runs, and estimates the total amount of CPU hours spent on the current run.
If matplotlib is installed, it also saves a PDF with a plot of the amount of CPU hours used versus
the scalefactor of the simulation.
"""
from __future__ import print_function
import glob, re
outputs = glob.glob("*.o*")
times = []
nprocs = []
scalefactors = []
time = None
cumutime = 0.0
if len(outputs)==0:
print("No outputs found. Change directory to your run folder and rerun how_much_cpu.py")
exit(1)
for o in sorted(outputs):
time = None
for l in open(o,'r'):
if "Total running" in l:
time = float(re.search("[0-9.]+",l).group(0))
times.append(time+cumutime)
scalefactors.append(scalefactor)
if "Fine step" in l:
try:
scalefactor = float(re.search(r"a= ([0-9.E\-]+)",l).group(1))
except ValueError:
pass
elif "nproc" in l:
nproc = int(re.search("nproc\s+=\s+([0-9]+)", l).group(1))
if time is not None:
cumutime+=time
time = times[-1]
hour = int(time/60/60)
mins = int(time/60)-hour*60
print("Total wall-time %.0fh %.0fm"%(hour,mins))
print("Number of processors %d"%nproc)
print("Total CPU time %.0fh"%((time/60/60)*nproc))
try:
import matplotlib, numpy as np
matplotlib.use('pdf')
import pylab as p
p.plot(nproc*np.array(times)/60/60,scalefactors)
p.xlabel("CPU hours")
p.ylabel("Scalefactor")
p.savefig("how_much_cpu.pdf")
print("Wrote a report as how_much_cpu.pdf")
except ImportError:
print("Was not able to import matplotlib - no .pdf output produced")
| [
"pylab.ylabel",
"pylab.savefig",
"matplotlib.use",
"numpy.array",
"pylab.xlabel",
"glob.glob",
"re.search"
] | [((452, 469), 'glob.glob', 'glob.glob', (['"""*.o*"""'], {}), "('*.o*')\n", (461, 469), False, 'import glob, re\n'), ((1485, 1506), 'matplotlib.use', 'matplotlib.use', (['"""pdf"""'], {}), "('pdf')\n", (1499, 1506), False, 'import matplotlib, numpy as np\n'), ((1588, 1609), 'pylab.xlabel', 'p.xlabel', (['"""CPU hours"""'], {}), "('CPU hours')\n", (1596, 1609), True, 'import pylab as p\n'), ((1614, 1637), 'pylab.ylabel', 'p.ylabel', (['"""Scalefactor"""'], {}), "('Scalefactor')\n", (1622, 1637), True, 'import pylab as p\n'), ((1642, 1671), 'pylab.savefig', 'p.savefig', (['"""how_much_cpu.pdf"""'], {}), "('how_much_cpu.pdf')\n", (1651, 1671), True, 'import pylab as p\n'), ((1548, 1563), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (1556, 1563), True, 'import matplotlib, numpy as np\n'), ((791, 814), 're.search', 're.search', (['"""[0-9.]+"""', 'l'], {}), "('[0-9.]+', l)\n", (800, 814), False, 'import glob, re\n'), ((991, 1023), 're.search', 're.search', (['"""a= ([0-9.E\\\\-]+)"""', 'l'], {}), "('a= ([0-9.E\\\\-]+)', l)\n", (1000, 1023), False, 'import glob, re\n'), ((1136, 1174), 're.search', 're.search', (['"""nproc\\\\s+=\\\\s+([0-9]+)"""', 'l'], {}), "('nproc\\\\s+=\\\\s+([0-9]+)', l)\n", (1145, 1174), False, 'import glob, re\n')] |
# -*- coding: utf-8 -*-
# @Author: jadesauve
# @Date: 2020-04-23 12:59:57
# @Last Modified by: jadesauve
# @Last Modified time: 2020-04-23 13:50:48
"""
read data from 2017-01-0118.ctd
save lists of depth and temp
plot
"""
# imports
import matplotlib.pyplot as plt
import numpy as np
# path
in_dir = '/Users/jadesauve/Coding/data/CTD/' #MODIFY
# define the input filename
in_fn = in_dir + '2017-01-0118.ctd'
#depth,temp = np.genfromtxt(in_fn,skip_header=572,usecols=[1,2])
# define a counter
i=0
# define lists to hold the data
depth=[]
temp=[]
# open the file
with open(in_fn, 'r', errors='ignore') as f:
for line in f:
if i >= 570:
lst = line.split()
# add the data to the list
depth.append(float(lst[1]))
temp.append(float(lst[2]))
i+=1
# convert the lists to arrays
temp = np.array(temp)
depth = np.array(depth)
# plot
plt.figure()
plt.plot(temp,-depth)
plt.show()
| [
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
] | [((804, 818), 'numpy.array', 'np.array', (['temp'], {}), '(temp)\n', (812, 818), True, 'import numpy as np\n'), ((827, 842), 'numpy.array', 'np.array', (['depth'], {}), '(depth)\n', (835, 842), True, 'import numpy as np\n'), ((851, 863), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (861, 863), True, 'import matplotlib.pyplot as plt\n'), ((864, 886), 'matplotlib.pyplot.plot', 'plt.plot', (['temp', '(-depth)'], {}), '(temp, -depth)\n', (872, 886), True, 'import matplotlib.pyplot as plt\n'), ((886, 896), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (894, 896), True, 'import matplotlib.pyplot as plt\n')] |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import pi
import cv2
import scipy.misc
import tensorflow as tf
DATA_FOLDER = "/home/ajay/Applied_course/self_driving_car/Autopilot-TensorFlow-master/driving_dataset/"
DATA_FILE = os.path.join(DATA_FOLDER, "data.txt")
x = []
y = []
train_batch_pointer = 0
test_batch_pointer = 0
with open(DATA_FILE) as f:
for line in f:
image_name, angle = line.split()
image_path = os.path.join(DATA_FOLDER, image_name)
x.append(image_path)
angle_radians = float(angle) * (pi / 180) #converting angle into radians
y.append(angle_radians)
y = np.array(y)
split_ratio = int(len(x) * 0.8)
train_image = x[:split_ratio]
train_angle = y[:split_ratio]
test_images = x[split_ratio:]
test_angle = y[split_ratio:]
def weightVariable(shape):
initial = tf.truncated_normal(shape = shape, stddev = 0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def convolution(previous_input, filter_input, strides):
return tf.nn.conv2d(previous_input, filter_input, strides = [1, strides, strides, 1], padding = "VALID")
x_input = tf.placeholder(tf.float32, shape = [None, 66, 200, 3], name = "Plc_1")
y_true = tf.placeholder(tf.float32, name = "Plc_2")
input_shape = x_input
#Convolution Layers
#First convolution layer
W_Conv1 = weightVariable([5,5,3,24])
B_Conv1 = bias_variable([24])
Conv1 = tf.nn.relu(convolution(input_shape, W_Conv1, 2) + B_Conv1)
#strides = 2
#Output size: 31*98*24
#Second convolution layer
W_Conv2 = weightVariable([5,5,24,36])
B_Conv2 = bias_variable([36])
Conv2 = tf.nn.relu(convolution(Conv1, W_Conv2, 2) + B_Conv2)
#strides = 2
#Output size: 14*47*36
#Third convolution layer
W_Conv3 = weightVariable([5,5,36,48])
B_Conv3 = bias_variable([48])
Conv3 = tf.nn.relu(convolution(Conv2, W_Conv3, 2) + B_Conv3)
#strides = 2
#Output size: 5*22*48
#Fourth convolution layer
W_Conv4 = weightVariable([3,3,48,64])
B_Conv4 = bias_variable([64])
Conv4 = tf.nn.relu(convolution(Conv3, W_Conv4, 1) + B_Conv4)
#strides = 1
#Output size: 3*20*64
#Fifth convolution layer
W_Conv5 = weightVariable([3,3,64,64])
B_Conv5 = bias_variable([64])
Conv5 = tf.nn.relu(convolution(Conv4, W_Conv5, 1) + B_Conv5)
#strides = 1
#Output size: 1*18*64
#Fully-Connected Dense Layers
keep_prob = tf.placeholder(tf.float32)
#First FC-Dense
#Input = 1*18*64 = 1152
W_FC1 = weightVariable([1152, 1164])
B_FC1 = bias_variable([1164])
FC1_Flatten = tf.reshape(Conv5, [-1, 1152]) #here, -1 indicates 1. It means that the shape of FC1_Flatten will be 1*1152
Output_FC1 = tf.nn.relu(tf.matmul(FC1_Flatten, W_FC1) + B_FC1) #so, here shape of FC1_Flatten is 1*1152 and shape of W_FC1 will
#be 1152*1164. Therefore, there will be a matrix multiplication of matrices: (1*1152) * (1152*1164) = (1*1164).
Output_FC1_drop = tf.nn.dropout(Output_FC1, keep_prob)
#Second FC-Dense
#Input = 1*1164 = 1164
W_FC2 = weightVariable([1164, 100])
B_FC2 = bias_variable([100])
Output_FC2 = tf.nn.relu(tf.matmul(Output_FC1_drop, W_FC2) + B_FC2) #so, here shape of Output_FC1_drop is 1*1164 and shape of
#W_FC2 will be 1164*100. Therefore, there will be a matrix multiplication of matrices: (1*1164) * (1164*100) = (1*100).
Output_FC2_drop = tf.nn.dropout(Output_FC2, keep_prob)
#Third FC-Dense
#Input = 1*100 = 100
W_FC3 = weightVariable([100, 50])
B_FC3 = bias_variable([50])
Output_FC3 = tf.nn.relu(tf.matmul(Output_FC2_drop, W_FC3) + B_FC3) #so, here shape of Output_FC2_drop is 1*100 and shape of
#W_FC3 will be 100*50. Therefore, there will be a matrix multiplication of matrices: (1*100) * (100*50) = (1*50).
Output_FC3_drop = tf.nn.dropout(Output_FC3, keep_prob)
#Fourth FC-Dense
#Input = 1*50 = 50
W_FC4 = weightVariable([50, 10])
B_FC4 = bias_variable([10])
Output_FC4 = tf.nn.relu(tf.matmul(Output_FC3_drop, W_FC4) + B_FC4) #so, here shape of Output_FC3_drop is 1*50 and shape of
#W_FC4 will be 50*10. Therefore, there will be a matrix multiplication of matrices: (1*50) * (50*10) = (1*10).
Output_FC4_drop = tf.nn.dropout(Output_FC4, keep_prob)
#Final Output to one neuron with linear/identity function
#Input = 1*10 = 10
W_FC5 = weightVariable([10, 1])
B_FC5 = bias_variable([1])
y_predicted = tf.identity(tf.matmul(Output_FC4_drop, W_FC5) + B_FC5)
sess = tf.InteractiveSession()
saver = tf.train.Saver()
saver.restore(sess, "/home/ajay/Downloads/model.ckpt")
img = cv2.imread('steering_wheel_image.jpg', 0)
rows, cols = img.shape
i = 0
while(cv2.waitKey(50) != ord("q")):
image_read = cv2.imread(test_images[i])
cv2.imshow('Frame Window', image_read)
image = ((cv2.resize(image_read[-150:], (200, 66)) / 255.0).reshape((1, 66, 200, 3)))
degrees = sess.run(y_predicted, feed_dict = {x_input: image, keep_prob: 1.0})[0][0] *180 / pi
print("Predicted degrees: "+str(degrees))
M = cv2.getRotationMatrix2D((cols/2,rows/2), -degrees, 1)
dst = cv2.warpAffine(src = img, M = M, dsize = (cols, rows))
cv2.imshow("Steering Wheel", dst)
i += 1
cv2.destroyAllWindows()
| [
"tensorflow.reshape",
"cv2.warpAffine",
"tensorflow.matmul",
"tensorflow.Variable",
"tensorflow.nn.conv2d",
"tensorflow.InteractiveSession",
"cv2.imshow",
"os.path.join",
"cv2.getRotationMatrix2D",
"tensorflow.truncated_normal",
"tensorflow.placeholder",
"cv2.destroyAllWindows",
"cv2.resize"... | [((272, 309), 'os.path.join', 'os.path.join', (['DATA_FOLDER', '"""data.txt"""'], {}), "(DATA_FOLDER, 'data.txt')\n", (284, 309), False, 'import os\n'), ((669, 680), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (677, 680), True, 'import numpy as np\n'), ((1238, 1304), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 66, 200, 3]', 'name': '"""Plc_1"""'}), "(tf.float32, shape=[None, 66, 200, 3], name='Plc_1')\n", (1252, 1304), True, 'import tensorflow as tf\n'), ((1318, 1358), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""Plc_2"""'}), "(tf.float32, name='Plc_2')\n", (1332, 1358), True, 'import tensorflow as tf\n'), ((2406, 2432), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (2420, 2432), True, 'import tensorflow as tf\n'), ((2554, 2583), 'tensorflow.reshape', 'tf.reshape', (['Conv5', '[-1, 1152]'], {}), '(Conv5, [-1, 1152])\n', (2564, 2583), True, 'import tensorflow as tf\n'), ((2919, 2955), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['Output_FC1', 'keep_prob'], {}), '(Output_FC1, keep_prob)\n', (2932, 2955), True, 'import tensorflow as tf\n'), ((3325, 3361), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['Output_FC2', 'keep_prob'], {}), '(Output_FC2, keep_prob)\n', (3338, 3361), True, 'import tensorflow as tf\n'), ((3718, 3754), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['Output_FC3', 'keep_prob'], {}), '(Output_FC3, keep_prob)\n', (3731, 3754), True, 'import tensorflow as tf\n'), ((4105, 4141), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['Output_FC4', 'keep_prob'], {}), '(Output_FC4, keep_prob)\n', (4118, 4141), True, 'import tensorflow as tf\n'), ((4356, 4379), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (4377, 4379), True, 'import tensorflow as tf\n'), ((4388, 4404), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (4402, 4404), True, 'import tensorflow as tf\n'), ((4467, 4508), 'cv2.imread', 'cv2.imread', (['"""steering_wheel_image.jpg"""', '(0)'], {}), "('steering_wheel_image.jpg', 0)\n", (4477, 4508), False, 'import cv2\n'), ((5073, 5096), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5094, 5096), False, 'import cv2\n'), ((877, 921), 'tensorflow.truncated_normal', 'tf.truncated_normal', ([], {'shape': 'shape', 'stddev': '(0.1)'}), '(shape=shape, stddev=0.1)\n', (896, 921), True, 'import tensorflow as tf\n'), ((937, 957), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (948, 957), True, 'import tensorflow as tf\n'), ((999, 1028), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), '(0.1, shape=shape)\n', (1010, 1028), True, 'import tensorflow as tf\n'), ((1040, 1060), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (1051, 1060), True, 'import tensorflow as tf\n'), ((1129, 1226), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['previous_input', 'filter_input'], {'strides': '[1, strides, strides, 1]', 'padding': '"""VALID"""'}), "(previous_input, filter_input, strides=[1, strides, strides, 1],\n padding='VALID')\n", (1141, 1226), True, 'import tensorflow as tf\n'), ((4545, 4560), 'cv2.waitKey', 'cv2.waitKey', (['(50)'], {}), '(50)\n', (4556, 4560), False, 'import cv2\n'), ((4592, 4618), 'cv2.imread', 'cv2.imread', (['test_images[i]'], {}), '(test_images[i])\n', (4602, 4618), False, 'import cv2\n'), ((4623, 4661), 'cv2.imshow', 'cv2.imshow', (['"""Frame Window"""', 'image_read'], {}), "('Frame Window', image_read)\n", (4633, 4661), False, 'import cv2\n'), ((4904, 4962), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cols / 2, rows / 2)', '(-degrees)', '(1)'], {}), '((cols / 2, rows / 2), -degrees, 1)\n', (4927, 4962), False, 'import cv2\n'), ((4968, 5016), 'cv2.warpAffine', 'cv2.warpAffine', ([], {'src': 'img', 'M': 'M', 'dsize': '(cols, rows)'}), '(src=img, M=M, dsize=(cols, rows))\n', (4982, 5016), False, 'import cv2\n'), ((5027, 5060), 'cv2.imshow', 'cv2.imshow', (['"""Steering Wheel"""', 'dst'], {}), "('Steering Wheel', dst)\n", (5037, 5060), False, 'import cv2\n'), ((483, 520), 'os.path.join', 'os.path.join', (['DATA_FOLDER', 'image_name'], {}), '(DATA_FOLDER, image_name)\n', (495, 520), False, 'import os\n'), ((2685, 2714), 'tensorflow.matmul', 'tf.matmul', (['FC1_Flatten', 'W_FC1'], {}), '(FC1_Flatten, W_FC1)\n', (2694, 2714), True, 'import tensorflow as tf\n'), ((3086, 3119), 'tensorflow.matmul', 'tf.matmul', (['Output_FC1_drop', 'W_FC2'], {}), '(Output_FC1_drop, W_FC2)\n', (3095, 3119), True, 'import tensorflow as tf\n'), ((3486, 3519), 'tensorflow.matmul', 'tf.matmul', (['Output_FC2_drop', 'W_FC3'], {}), '(Output_FC2_drop, W_FC3)\n', (3495, 3519), True, 'import tensorflow as tf\n'), ((3877, 3910), 'tensorflow.matmul', 'tf.matmul', (['Output_FC3_drop', 'W_FC4'], {}), '(Output_FC3_drop, W_FC4)\n', (3886, 3910), True, 'import tensorflow as tf\n'), ((4305, 4338), 'tensorflow.matmul', 'tf.matmul', (['Output_FC4_drop', 'W_FC5'], {}), '(Output_FC4_drop, W_FC5)\n', (4314, 4338), True, 'import tensorflow as tf\n'), ((4676, 4716), 'cv2.resize', 'cv2.resize', (['image_read[-150:]', '(200, 66)'], {}), '(image_read[-150:], (200, 66))\n', (4686, 4716), False, 'import cv2\n')] |
import gym
import time
import numpy as np
from absl import flags
import sys, os
import torch
from abp import SADQAdaptive
from abp.utils import clear_summary_path
from abp.explanations import PDX
from tensorboardX import SummaryWriter
from gym.envs.registration import register
from sc2env.environments.tug_of_war_2p import TugOfWar
#from sc2env.xai_replay.recorder.recorder import XaiReplayRecorder
from tqdm import tqdm
from copy import deepcopy
from random import randint
np.set_printoptions(precision = 2)
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
def run_task(evaluation_config, network_config, reinforce_config, map_name = None, train_forever = False):
if (use_cuda):
print("~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("| USING CUDA |")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~")
else:
print("~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("| NOT USING CUDA |")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~")
flags.FLAGS(sys.argv[:1])
# at the end of the reward type name:
# 1 means for player 1 is positive, for player 2 is negative
# 2 means for player 2 is positive, for player 1 is negative
reward_types = ['player_1_get_damage_2',
'player_2_get_damage_1',
'player_1_win_1',
'player_2_win_2']
max_episode_steps = 35
#state = env.reset()
choices = [0,1,2,3]
#pdx_explanation = PDX()
replay_dimension = evaluation_config.xai_replay_dimension
env = TugOfWar(reward_types, map_name = map_name, \
generate_xai_replay = evaluation_config.generate_xai_replay, xai_replay_dimension = replay_dimension)
combine_sa = env.combine_sa
state_1, state_2 = env.reset()
if not reinforce_config.is_random_agent_1:
agent_1 = SADQAdaptive(name = "TugOfWar",
state_length = len(state_1),
network_config = network_config,
reinforce_config = reinforce_config)
print("sadq agent 1")
else:
print("random agent 1")
if not reinforce_config.is_random_agent_2:
agent_2 = SADQAdaptive(name = "TugOfWar",
state_length = len(state_2),
network_config = network_config,
reinforce_config = reinforce_config)
print("sadq agent 2")
else:
print("random agent 2")
training_summaries_path = evaluation_config.summaries_path + "/train"
clear_summary_path(training_summaries_path)
train_summary_writer = SummaryWriter(training_summaries_path)
test_summaries_path = evaluation_config.summaries_path + "/test"
clear_summary_path(test_summaries_path)
test_summary_writer = SummaryWriter(test_summaries_path)
random_enemy = True
enemy_update = 30
round_num = 0
privous_5_result = []
all_experiences = []
if not reinforce_config.is_random_agent_2:
agent_2.load_model(agent_1.eval_model)
while True:
if len(privous_5_result) >= 5 and \
sum(privous_5_result) / 5 > 11000 and \
not reinforce_config.is_random_agent_2:
print("replace enemy agent's weight with self agent")
random_enemy = False
f = open("result_self_play.txt", "a+")
f.write("Update agent\n")
f.close()
agent_2.load_model(agent_1.eval_model)
agent_1.steps = 0
agent_1.best_reward_mean = 0
agent_1.save(force = True, appendix = "update_" + str(round_num))
if not reinforce_config.is_random_agent_2:
agent_2.disable_learning()
round_num += 1
print("=======================================================================")
print("===============================Now training============================")
print("=======================================================================")
print("Now training.")
if random_enemy:
print("enemy is random")
for episode in tqdm(range(evaluation_config.training_episodes)):
# for episode in range(1):
# break
if reinforce_config.collecting_experience:
break
state_1, state_2 = env.reset()
total_reward = 0
skiping = True
done = False
steps = 0
# print(list(env.denormalization(state_1)))
# print(list(env.denormalization(state_2)))
while skiping:
state_1, state_2, done, dp = env.step([], 0)
if dp or done:
break
while not done and steps < max_episode_steps:
steps += 1
# Decision point
# print('state:')
# print("=======================================================================")
# print(list(env.denormalization(state_1)))
# print(list(env.denormalization(state_2)))
actions_1 = env.get_big_A(env.denormalization(state_1)[env.miner_index])
actions_2 = env.get_big_A(env.denormalization(state_2)[env.miner_index])
if not reinforce_config.is_random_agent_1:
combine_states_1 = combine_sa(state_1, actions_1, 1)
choice_1, _ = agent_1.predict(combine_states_1)
# print(combine_states_1)
else:
choice_1 = randint(0, len(actions_1) - 1)
if not reinforce_config.is_random_agent_2 and not random_enemy:
combine_states_2 = combine_sa(state_2, actions_2, 2)
choice_2, _ = agent_2.predict(combine_states_2)
else:
choice_2 = randint(0, len(actions_2) - 1)
# print("action list:")
# print(actions_1)
# print(actions_2)
# # assign action
# print("choice:")
# print(actions_1[choice_1])
# print(actions_2[choice_2])
# print("after state:")
# print(env.denormalization(combine_states_1[choice_1]).tolist())
# print(env.denormalization(combine_states_2[choice_2]).tolist())
# input('pause')
env.step(list(actions_1[choice_1]), 1)
env.step(list(actions_2[choice_2]), 2)
# env.step((0,0,0,1), 1)
# env.step((0,0,0,1), 2)
while skiping:
state_1, state_2, done, dp = env.step([], 0)
# input('time_step')
if dp or done:
break
reward_1, reward_2 = env.sperate_reward(env.decomposed_rewards)
# print('reward:')
# print(reward_1)
# print(reward_2)
for r1, r2 in zip(reward_1, reward_2):
if not reinforce_config.is_random_agent_1:
agent_1.reward(r1)
# if not reinforce_config.is_random_agent_2:
# agent_2.reward(r2)
if not reinforce_config.is_random_agent_1:
agent_1.end_episode(env.end_state_1)
# if not reinforce_config.is_random_agent_2:
# agent_2.end_episode(np.hstack((env.end_state_2, np.zeros(4))))
test_summary_writer.add_scalar(tag = "Train/Episode Reward", scalar_value = total_reward,
global_step = episode + 1)
train_summary_writer.add_scalar(tag = "Train/Steps to choosing Enemies", scalar_value = steps + 1,
global_step = episode + 1)
if not reinforce_config.is_random_agent_1:
agent_1.disable_learning(is_save = not reinforce_config.collecting_experience)
if not reinforce_config.is_random_agent_2:
agent_2.disable_learning()
total_rewwards_list = []
# Test Episodes
print("======================================================================")
print("===============================Now testing============================")
print("======================================================================")
for episode in tqdm(range(evaluation_config.test_episodes)):
state = env.reset()
total_reward_1 = 0
done = False
skiping = True
steps = 0
previous_state_1 = None
previous_state_2 = None
previous_action_1 = None
previous_action_2 = None
# print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%Starting episode%%%%%%%%%%%%%%%%%%%%%%%%%")
while skiping:
# start_time = time.time()
state_1, state_2, done, dp = env.step([], 0)
if dp or done:
# print(time.time() - start_time)
break
# input("done stepping to finish prior action")
while not done and steps < max_episode_steps:
steps += 1
# # Decision point
# print('state:')
# print(list(env.denormalization(state_1)))
# print(list(env.denormalization(state_2)))
# print("Get actions time:")
# start_time = time.time()
actions_1 = env.get_big_A(env.denormalization(state_1)[env.miner_index])
actions_2 = env.get_big_A(env.denormalization(state_2)[env.miner_index])
# print(time.time() - start_time)
combine_states_1 = combine_sa(state_1, actions_1, 1)
if not reinforce_config.is_random_agent_1:
start_time = time.time()
choice_1, _ = agent_1.predict(combine_states_1)
print(time.time() - start_time)
else:
choice_1 = randint(0, len(actions_1) - 1)
combine_states_2 = combine_sa(state_2, actions_2, 2)
if not reinforce_config.is_random_agent_2 and not random_enemy:
choice_2, _ = agent_2.predict(combine_states_2)
else:
choice_2 = randint(0, len(actions_2) - 1)
# input('stepped with command 2')
#######
#experience collecting
######
if reinforce_config.collecting_experience:
if previous_state_1 is not None and previous_state_2 is not None and previous_action_1 is not None and previous_action_2 is not None:
previous_state_1[5:9] = previous_state_2[0:4] # Include player 2's action
# print(previous_state_1[env.miner_index])
denorm_previous_state_1 = env.denormalization(previous_state_1)
denorm_previous_state_1[env.miner_index] += denorm_previous_state_1[3] * 50 + 100
# print(previous_state_1[env.miner_index])
experience = [
denorm_previous_state_1,
np.append(env.denormalization(state_1), previous_reward_1)
]
#print(experience)
all_experiences.append(experience)
if ((len(all_experiences)) % 100 == 0) and reinforce_config.collecting_experience:
torch.save(all_experiences, 'abp/examples/pysc2/tug_of_war/all_experience.pt')
# pretty_print(len(all_experiences) - 1, all_experiences)
# print()
# input("pause")
previous_state_1 = deepcopy(combine_states_1[choice_1])
previous_state_2 = deepcopy(combine_states_2[choice_2])
env.step(list(actions_1[choice_1]), 1)
# input('stepped with command 1')
env.step(list(actions_2[choice_2]), 2)
previous_action_1 = deepcopy(actions_1[choice_1])
previous_action_2 = deepcopy(actions_2[choice_2])
while skiping:
# print("Get actions time:")
# start_time = time.time()
state_1, state_2, done, dp = env.step([], 0)
#input(' step wating for done signal')
if dp or done:
# print(time.time() - start_time)
break
# input('done stepping after collecting experience')
current_reward_1 = 0
reward_1, reward_2 = env.sperate_reward(env.decomposed_rewards)
for r1 in reward_1:
current_reward_1 += r1
total_reward_1 += current_reward_1
previous_reward_1 = current_reward_1
if reinforce_config.collecting_experience:
previous_state_1[5:9] = previous_state_2[0:4] # Include player 2's action
denorm_previous_state_1 = env.denormalization(previous_state_1)
denorm_previous_state_1[env.miner_index] += denorm_previous_state_1[3] * 50 + 100
# print(previous_state_1[env.miner_index])
experience = [
denorm_previous_state_1,
np.append(env.denormalization(state_1), previous_reward_1)
]
all_experiences.append(experience)
if ((len(all_experiences)) % 100 == 0) and reinforce_config.collecting_experience:
torch.save(all_experiences, 'abp/examples/pysc2/tug_of_war/all_experience.pt')
# pretty_print(len(all_experiences) - 1, all_experiences)
# print()
# input("pause")
total_rewwards_list.append(total_reward_1)
test_summary_writer.add_scalar(tag="Test/Episode Reward", scalar_value=total_reward_1,
global_step=episode + 1)
test_summary_writer.add_scalar(tag="Test/Steps to choosing Enemies", scalar_value=steps + 1,
global_step=episode + 1)
# if reinforce_config.collecting_experience:
# break
#print(test.size())
tr = sum(total_rewwards_list) / evaluation_config.test_episodes
print("total reward:")
print(tr)
privous_5_result.append(tr)
if len(privous_5_result) > 5:
del privous_5_result[0]
f = open("result_self_play.txt", "a+")
f.write(str(tr) + "\n")
f.close()
if not reinforce_config.is_random_agent_1:
agent_1.enable_learning()
# if not reinforce_config.is_random_agent_2:
# agent_2.enable_learning()
# if reinforce_config.collecting_experience:
# torch.save(all_experiences, 'abp/examples/pysc2/tug_of_war/all_experiences_2.pt')
def pretty_print(i,data):
# data = np.stack(np.array(data))
# print(len(data[i+1][]))
print("---------------------------------------------- input --------------------------------------------------------------------")
print("i:\t" + str(i) + "\t\tfriendly nexus: " + str(data[i][0][4]) + "\t\tenemey nexus: " + str(data[i][0][9]))
# print("i+1:\t" + str(i+1) + "\t\tfriendly nexus: " + str(data[i+1][0][4]) + "\t\tenemey nexus: " + str(data[i+1][0][9]))
print("\tmarine: " + str(data[i][0][0]) + "\tvikings: " + str(data[i][0][1]) + "\tcolossus: " + str(data[i][0][2]) + "\tpylons: " + str(data[i][0][3]) + "\tE marine: " + str(data[i][0][5]) + "\tE vikings: " + str(data[i][0][6]) + "\tE colossus: " + str(data[i][0][7]) + "\tE pylons: " + str(data[i][0][8]))
print('on feild:')
print("\tmarine: " + str(data[i][0][11]) + "\tvikings: " + str(data[i][0][12]) + "\tcolossus: " + str(data[i][0][13]) + "\tE marine: " + str(data[i][0][14]) + "\tE vikings: " + str(data[i][0][15]) + "\tE colossus: " + str(data[i][0][16]))
print('mineral:' + str(data[i][0][10]))
# print('reward:' + str(data[i][0][17]))
# print("\tmarine: " + str(data[i+1][0][0]) + "\tvikings: " + str(data[i+1][0][1]) + "\tcolossus: " + str(data[i+1][0][2]) + "\tpylons: " + str(data[i+1][0][3]) + "\tE marine: " + str(data[i+1][0][5]) + "\tE vikings: " + str(data[i+1][0][6]) + "\tE colossus: " + str(data[i+1][0][7]) + "\tE pylons: " + str(data[i+1][0][8]))
print("-------------------------------------------------------------------------------------------------------------------------")
print("---------------------------------------------- output ------------------------------------------------------------------------")
print("i:\t" + str(i) + "\t\tfriendly nexus: " + str(data[i][1][4]) + "\t\tenemey nexus: " + str(data[i][1][9]))
# print("i+1:\t" + str(i+1) + "\t\tfriendly nexus: " + str(data[i+1][1][4]) + "\t\tenemey nexus: " + str(data[i+1][1][9]))
print("\tmarine: " + str(data[i][1][0]) + "\tvikings: " + str(data[i][1][1]) + "\tcolossus: " + str(data[i][1][2]) + "\tpylons: " + str(data[i][1][3]) + "\tE marine: " + str(data[i][1][5]) + "\tE vikings: " + str(data[i][1][6]) + "\tE colossus: " + str(data[i][1][7]) + "\tE pylons: " + str(data[i][1][8]))
print('on feild:')
print("\tmarine: " + str(data[i][1][11]) + "\tvikings: " + str(data[i][1][12]) + "\tcolossus: " + str(data[i][1][13]) + "\tE marine: " + str(data[i][1][14]) + "\tE vikings: " + str(data[i][1][15]) + "\tE colossus: " + str(data[i][1][16]))
print('mineral:' + str(data[i][1][10]))
print('reward:' + str(data[i][1][17]))
# print("\tmarine: " + str(data[i+1][1][0]) + "\tvikings: " + str(data[i+1][1][1]) + "\tcolossus: " + str(data[i+1][1][2]) + "\tpylons: " + str(data[i+1][1][3]) + "\tE marine: " + str(data[i+1][1][5]) + "\tE vikings: " + str(data[i+1][1][6]) + "\tE colossus: " + str(data[i+1][1][7]) + "\tE pylons: " + str(data[i+1][1][8]))
print("-------------------------------------------------------------------------------------------------------------------------")
| [
"tensorboardX.SummaryWriter",
"numpy.set_printoptions",
"copy.deepcopy",
"abp.utils.clear_summary_path",
"time.time",
"torch.save",
"sc2env.environments.tug_of_war_2p.TugOfWar",
"torch.cuda.is_available",
"absl.flags.FLAGS"
] | [((477, 509), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (496, 509), True, 'import numpy as np\n'), ((523, 548), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (546, 548), False, 'import torch\n'), ((1026, 1051), 'absl.flags.FLAGS', 'flags.FLAGS', (['sys.argv[:1]'], {}), '(sys.argv[:1])\n', (1037, 1051), False, 'from absl import flags\n'), ((1578, 1727), 'sc2env.environments.tug_of_war_2p.TugOfWar', 'TugOfWar', (['reward_types'], {'map_name': 'map_name', 'generate_xai_replay': 'evaluation_config.generate_xai_replay', 'xai_replay_dimension': 'replay_dimension'}), '(reward_types, map_name=map_name, generate_xai_replay=\n evaluation_config.generate_xai_replay, xai_replay_dimension=\n replay_dimension)\n', (1586, 1727), False, 'from sc2env.environments.tug_of_war_2p import TugOfWar\n'), ((2597, 2640), 'abp.utils.clear_summary_path', 'clear_summary_path', (['training_summaries_path'], {}), '(training_summaries_path)\n', (2615, 2640), False, 'from abp.utils import clear_summary_path\n'), ((2668, 2706), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['training_summaries_path'], {}), '(training_summaries_path)\n', (2681, 2706), False, 'from tensorboardX import SummaryWriter\n'), ((2781, 2820), 'abp.utils.clear_summary_path', 'clear_summary_path', (['test_summaries_path'], {}), '(test_summaries_path)\n', (2799, 2820), False, 'from abp.utils import clear_summary_path\n'), ((2847, 2881), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['test_summaries_path'], {}), '(test_summaries_path)\n', (2860, 2881), False, 'from tensorboardX import SummaryWriter\n'), ((12559, 12588), 'copy.deepcopy', 'deepcopy', (['actions_1[choice_1]'], {}), '(actions_1[choice_1])\n', (12567, 12588), False, 'from copy import deepcopy\n'), ((12625, 12654), 'copy.deepcopy', 'deepcopy', (['actions_2[choice_2]'], {}), '(actions_2[choice_2])\n', (12633, 12654), False, 'from copy import deepcopy\n'), ((10150, 10161), 'time.time', 'time.time', ([], {}), '()\n', (10159, 10161), False, 'import time\n'), ((12250, 12286), 'copy.deepcopy', 'deepcopy', (['combine_states_1[choice_1]'], {}), '(combine_states_1[choice_1])\n', (12258, 12286), False, 'from copy import deepcopy\n'), ((12326, 12362), 'copy.deepcopy', 'deepcopy', (['combine_states_2[choice_2]'], {}), '(combine_states_2[choice_2])\n', (12334, 12362), False, 'from copy import deepcopy\n'), ((14154, 14232), 'torch.save', 'torch.save', (['all_experiences', '"""abp/examples/pysc2/tug_of_war/all_experience.pt"""'], {}), "(all_experiences, 'abp/examples/pysc2/tug_of_war/all_experience.pt')\n", (14164, 14232), False, 'import torch\n'), ((10257, 10268), 'time.time', 'time.time', ([], {}), '()\n', (10266, 10268), False, 'import time\n'), ((11950, 12028), 'torch.save', 'torch.save', (['all_experiences', '"""abp/examples/pysc2/tug_of_war/all_experience.pt"""'], {}), "(all_experiences, 'abp/examples/pysc2/tug_of_war/all_experience.pt')\n", (11960, 12028), False, 'import torch\n')] |
from .utils import score_all_prots
from copy import copy
import numpy as np
import pandas as pd
import pickle as pkl
import random
def obj_score(subst, mini=False):
"""
Objective function to optimize a matrix.
Input: substitution matrix for which to test objective score
Output: objective score of that matrix
"""
base_dir = "/Users/student/Documents/BMI206/bmi203-3"
gapO = -6
gapE = -5
# Test matrix on positive and negative scores
print("\tScoring positives...")
filepath = base_dir + "/HW3_due_02_23/Pospairs.txt"
if mini == True:
filepath = base_dir + "/output/Pos_max.txt"
pos_scores = score_all_prots(subst, filepath, gapO, gapE)
print("\tScoring negatives...")
filepath = base_dir + "/HW3_due_02_23/Negpairs.txt"
if mini == True:
filepath = base_dir + "/output/Neg_max.txt"
neg_scores = score_all_prots(subst, filepath, gapO, gapE)
neg_scores.sort(reverse=True)
# Select FPR cut-off points to plot
print("\tFinding cutoff...")
score = 0.0
for c in np.arange(0, 0.31, 0.1):
# Find the number of negatives to keep for a FPR of i
n_to_keep = int(round(c * len(neg_scores)))
keep = neg_scores[0:n_to_keep]
# If there were any to keep at all, return them
if len(keep) > 0:
cutoff = keep[-1]
else:
#If we aren't accepting any negative alignments, the cutoff is the highest neg alignment score +1
cutoff = neg_scores[0] + 1
# Find TPR at this cutoff, add to score
score += (sum(i >= cutoff for i in pos_scores)/len(pos_scores))
return score
def random_permut(start_mat, max_iter):
"""
"""
base_dir = "/Users/student/Documents/BMI206/bmi203-3"
# Initialize
#start_mat is pandas
#res = [[pandas, score],[pandas,score]]
print("Calculating the objective score of the original BLOSUM matrix...")
prev_best = obj_score(start_mat)#, mini=True)
print("Original score: ", prev_best)
# Interesting function of symmetry: we try addition of one step and addition of two steps at the same time
print("Calculating first permutation of the BLOSUM matrix...")
for c in range(0,max_iter):
names = list(start_mat.columns.values)
del names[-1] # we don't care about the *
step = random.sample([-5,-4,-3,-2,-1,1,2,3,4,5],1)
x = copy(start_mat) # make a shallow copy of the last matrix to be seen
i,j = random.sample(names, 2) # choose a random cell to alter step size (excluding * cells)
print("Looking at row ", i, ", col ",j)
print("Value at this index: ", x.loc[i,j])
x.loc[i,j] = x.loc[i,j] + step # add the chosen step to this parameter
x.loc[i,j] = x.loc[i,j] # keep symmetric!
score = obj_score(x)#, mini=True) # score this new matrix
print("Score of new matrix: ", score)
if score > prev_best:
start_mat = x
prev_best = score
print("Accepting new matrix.")
if score >= 4.0:
print("Maximized function.")
return(x, score)
c += 1
print("Reached max iteration of ", max_iter)
return(x, score)
| [
"random.sample",
"numpy.arange",
"copy.copy"
] | [((1064, 1087), 'numpy.arange', 'np.arange', (['(0)', '(0.31)', '(0.1)'], {}), '(0, 0.31, 0.1)\n', (1073, 1087), True, 'import numpy as np\n'), ((2355, 2408), 'random.sample', 'random.sample', (['[-5, -4, -3, -2, -1, 1, 2, 3, 4, 5]', '(1)'], {}), '([-5, -4, -3, -2, -1, 1, 2, 3, 4, 5], 1)\n', (2368, 2408), False, 'import random\n'), ((2411, 2426), 'copy.copy', 'copy', (['start_mat'], {}), '(start_mat)\n', (2415, 2426), False, 'from copy import copy\n'), ((2513, 2536), 'random.sample', 'random.sample', (['names', '(2)'], {}), '(names, 2)\n', (2526, 2536), False, 'import random\n')] |
from sklearn import metrics
from sklearn.utils.multiclass import unique_labels
import numpy as np
def classification_report(y_true, y_pred, labels=None, target_names=None):
"""Build a text report showing the main classification metrics
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 0]
>>> y_pred = [0, 0, 2, 2, 0]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.67 1.00 0.80 2
class 1 0.00 0.00 0.00 1
class 2 1.00 1.00 1.00 2
<BLANKLINE>
avg / total 0.67 0.80 0.72 5
<BLANKLINE>
"""
labels = unique_labels(y_true, y_pred)
# if labels is None:
# labels = unique_labels(y_true, y_pred)
# else:
# labels = np.asarray(labels, dtype=np.int)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['{}'.format(l) for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading))
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = metrics.precision_recall_fscore_support(y_true, y_pred)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["%0.2f" % float(v)]
values += ["%d" % int(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["%0.2f" % float(v)]
values += ['%d' % np.sum(s)]
report += fmt % tuple(values)
return report | [
"sklearn.utils.multiclass.unique_labels",
"numpy.sum",
"numpy.average",
"sklearn.metrics.precision_recall_fscore_support"
] | [((1421, 1450), 'sklearn.utils.multiclass.unique_labels', 'unique_labels', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1434, 1450), False, 'from sklearn.utils.multiclass import unique_labels\n'), ((2164, 2219), 'sklearn.metrics.precision_recall_fscore_support', 'metrics.precision_recall_fscore_support', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2203, 2219), False, 'from sklearn import metrics\n'), ((2542, 2566), 'numpy.average', 'np.average', (['p'], {'weights': 's'}), '(p, weights=s)\n', (2552, 2566), True, 'import numpy as np\n'), ((2582, 2606), 'numpy.average', 'np.average', (['r'], {'weights': 's'}), '(r, weights=s)\n', (2592, 2606), True, 'import numpy as np\n'), ((2622, 2647), 'numpy.average', 'np.average', (['f1'], {'weights': 's'}), '(f1, weights=s)\n', (2632, 2647), True, 'import numpy as np\n'), ((2711, 2720), 'numpy.sum', 'np.sum', (['s'], {}), '(s)\n', (2717, 2720), True, 'import numpy as np\n')] |
# coding: utf-8
"""
Automated Tool for Optimized Modelling (ATOM)
Author: Mavs
Description: Unit tests for ensembles.py
"""
import numpy as np
import pytest
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from atom.ensembles import (
StackingClassifier, StackingRegressor, VotingClassifier, VotingRegressor,
)
from atom.pipeline import Pipeline
from atom.utils import check_is_fitted
from .utils import X_bin, X_reg, y_bin, y_reg
@pytest.fixture
def classifiers():
"""Get a list of classifiers for the ensemble."""
return [
("lda", LinearDiscriminantAnalysis().fit(X_bin, y_bin)),
("placeholder1", "drop"),
("pl", Pipeline(
[("scaler", StandardScaler()), ("et", ExtraTreesClassifier())]
).fit(X_bin, y_bin)),
]
@pytest.fixture
def regressors():
"""Get a list of regressors for the ensemble."""
return [
("ols", LinearRegression()),
("placeholder1", "drop"),
("pl", Pipeline([("scaler", StandardScaler()), ("et", ExtraTreesRegressor())])),
]
# Stacking ========================================================= >>
def test_stacking_classifier(classifiers):
"""Assert that stacking classifiers work."""
stack = StackingClassifier(estimators=classifiers, cv=KFold())
assert not check_is_fitted(stack, False)
stack.fit(X_bin, y_bin)
assert check_is_fitted(stack, False)
assert len(stack.estimators_) == 2
assert stack.estimators_[0] is classifiers[0][1] # Fitted is same
assert stack.estimators_[1] is not classifiers[1][1] # Unfitted changes
def test_stacking_regressor(regressors):
"""Assert that stacking regressors."""
stack = StackingRegressor(estimators=regressors)
assert not check_is_fitted(stack, False)
stack.fit(X_reg, y_reg)
assert check_is_fitted(stack, False)
assert len(stack.estimators_) == 2
# Voting =========================================================== >>
def test_voting_initialized_fitted(classifiers):
"""Assert that the model can be fit at initialization."""
vote = VotingClassifier(estimators=classifiers)
assert check_is_fitted(vote, False)
def test_voting_multilabel(classifiers):
"""Assert that an error is raised for multilabel targets."""
vote = VotingClassifier(estimators=classifiers)
with pytest.raises(NotImplementedError, match=r".*Multilabel.*"):
vote.fit(X_bin, np.array([[0, 1], [1, 0]]))
def test_voting_invalid_type(classifiers):
"""Assert that an error is raised when voting type is invalid."""
vote = VotingClassifier(estimators=classifiers, voting="invalid")
with pytest.raises(ValueError, match=r".*must be 'soft'.*"):
vote.fit(X_bin, y_bin)
def test_voting_invalid_weights(classifiers):
"""Assert that an error is raised when weights have invalid length."""
vote = VotingClassifier(estimators=classifiers, weights=[0, 1])
with pytest.raises(ValueError, match=r".*estimators and weights.*"):
vote.fit(X_bin, y_bin)
def test_voting_mixed_fit_and_not(classifiers):
"""Assert that fitted and non-fitted models can be used both."""
estimators = classifiers.copy()
estimators.append(("not_fitted_lda", LinearDiscriminantAnalysis()))
vote = VotingClassifier(estimators=estimators)
assert not check_is_fitted(vote, False)
vote.fit(X_bin, y_bin)
assert check_is_fitted(vote, False)
assert len(vote.estimators_) == 3
assert vote.estimators_[0] is estimators[0][1] # Fitted is same
assert vote.estimators_[2] is not estimators[2][1] # Unfitted changes
@pytest.mark.parametrize("voting", ["soft", "hard"])
def test_voting_predict(classifiers, voting):
"""Assert that the predict method doesn't use the encoder."""
vote = VotingClassifier(estimators=classifiers, voting=voting)
assert isinstance(vote.predict(X_bin), np.ndarray)
def test_voting_regressor(regressors):
"""Assert that the regressor works."""
vote = VotingRegressor(estimators=regressors)
assert not check_is_fitted(vote, False)
vote.fit(X_reg, y_reg)
assert check_is_fitted(vote, False)
| [
"atom.utils.check_is_fitted",
"sklearn.preprocessing.StandardScaler",
"atom.ensembles.VotingRegressor",
"sklearn.model_selection.KFold",
"sklearn.linear_model.LinearRegression",
"sklearn.ensemble.ExtraTreesClassifier",
"pytest.raises",
"numpy.array",
"sklearn.discriminant_analysis.LinearDiscriminant... | [((3817, 3868), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""voting"""', "['soft', 'hard']"], {}), "('voting', ['soft', 'hard'])\n", (3840, 3868), False, 'import pytest\n'), ((1597, 1626), 'atom.utils.check_is_fitted', 'check_is_fitted', (['stack', '(False)'], {}), '(stack, False)\n', (1612, 1626), False, 'from atom.utils import check_is_fitted\n'), ((1912, 1952), 'atom.ensembles.StackingRegressor', 'StackingRegressor', ([], {'estimators': 'regressors'}), '(estimators=regressors)\n', (1929, 1952), False, 'from atom.ensembles import StackingClassifier, StackingRegressor, VotingClassifier, VotingRegressor\n'), ((2037, 2066), 'atom.utils.check_is_fitted', 'check_is_fitted', (['stack', '(False)'], {}), '(stack, False)\n', (2052, 2066), False, 'from atom.utils import check_is_fitted\n'), ((2303, 2343), 'atom.ensembles.VotingClassifier', 'VotingClassifier', ([], {'estimators': 'classifiers'}), '(estimators=classifiers)\n', (2319, 2343), False, 'from atom.ensembles import StackingClassifier, StackingRegressor, VotingClassifier, VotingRegressor\n'), ((2355, 2383), 'atom.utils.check_is_fitted', 'check_is_fitted', (['vote', '(False)'], {}), '(vote, False)\n', (2370, 2383), False, 'from atom.utils import check_is_fitted\n'), ((2503, 2543), 'atom.ensembles.VotingClassifier', 'VotingClassifier', ([], {'estimators': 'classifiers'}), '(estimators=classifiers)\n', (2519, 2543), False, 'from atom.ensembles import StackingClassifier, StackingRegressor, VotingClassifier, VotingRegressor\n'), ((2792, 2850), 'atom.ensembles.VotingClassifier', 'VotingClassifier', ([], {'estimators': 'classifiers', 'voting': '"""invalid"""'}), "(estimators=classifiers, voting='invalid')\n", (2808, 2850), False, 'from atom.ensembles import StackingClassifier, StackingRegressor, VotingClassifier, VotingRegressor\n'), ((3081, 3137), 'atom.ensembles.VotingClassifier', 'VotingClassifier', ([], {'estimators': 'classifiers', 'weights': '[0, 1]'}), '(estimators=classifiers, weights=[0, 1])\n', (3097, 3137), False, 'from atom.ensembles import StackingClassifier, StackingRegressor, VotingClassifier, VotingRegressor\n'), ((3481, 3520), 'atom.ensembles.VotingClassifier', 'VotingClassifier', ([], {'estimators': 'estimators'}), '(estimators=estimators)\n', (3497, 3520), False, 'from atom.ensembles import StackingClassifier, StackingRegressor, VotingClassifier, VotingRegressor\n'), ((3603, 3631), 'atom.utils.check_is_fitted', 'check_is_fitted', (['vote', '(False)'], {}), '(vote, False)\n', (3618, 3631), False, 'from atom.utils import check_is_fitted\n'), ((3992, 4047), 'atom.ensembles.VotingClassifier', 'VotingClassifier', ([], {'estimators': 'classifiers', 'voting': 'voting'}), '(estimators=classifiers, voting=voting)\n', (4008, 4047), False, 'from atom.ensembles import StackingClassifier, StackingRegressor, VotingClassifier, VotingRegressor\n'), ((4198, 4236), 'atom.ensembles.VotingRegressor', 'VotingRegressor', ([], {'estimators': 'regressors'}), '(estimators=regressors)\n', (4213, 4236), False, 'from atom.ensembles import StackingClassifier, StackingRegressor, VotingClassifier, VotingRegressor\n'), ((4319, 4347), 'atom.utils.check_is_fitted', 'check_is_fitted', (['vote', '(False)'], {}), '(vote, False)\n', (4334, 4347), False, 'from atom.utils import check_is_fitted\n'), ((1528, 1557), 'atom.utils.check_is_fitted', 'check_is_fitted', (['stack', '(False)'], {}), '(stack, False)\n', (1543, 1557), False, 'from atom.utils import check_is_fitted\n'), ((1968, 1997), 'atom.utils.check_is_fitted', 'check_is_fitted', (['stack', '(False)'], {}), '(stack, False)\n', (1983, 1997), False, 'from atom.utils import check_is_fitted\n'), ((2553, 2611), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {'match': '""".*Multilabel.*"""'}), "(NotImplementedError, match='.*Multilabel.*')\n", (2566, 2611), False, 'import pytest\n'), ((2860, 2913), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""".*must be \'soft\'.*"""'}), '(ValueError, match=".*must be \'soft\'.*")\n', (2873, 2913), False, 'import pytest\n'), ((3147, 3208), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""".*estimators and weights.*"""'}), "(ValueError, match='.*estimators and weights.*')\n", (3160, 3208), False, 'import pytest\n'), ((3536, 3564), 'atom.utils.check_is_fitted', 'check_is_fitted', (['vote', '(False)'], {}), '(vote, False)\n', (3551, 3564), False, 'from atom.utils import check_is_fitted\n'), ((4252, 4280), 'atom.utils.check_is_fitted', 'check_is_fitted', (['vote', '(False)'], {}), '(vote, False)\n', (4267, 4280), False, 'from atom.utils import check_is_fitted\n'), ((1129, 1147), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1145, 1147), False, 'from sklearn.linear_model import LinearRegression\n'), ((1504, 1511), 'sklearn.model_selection.KFold', 'KFold', ([], {}), '()\n', (1509, 1511), False, 'from sklearn.model_selection import KFold\n'), ((2638, 2664), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (2646, 2664), True, 'import numpy as np\n'), ((3438, 3466), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {}), '()\n', (3464, 3466), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n'), ((792, 820), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {}), '()\n', (818, 820), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n'), ((1220, 1236), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1234, 1236), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1246, 1267), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (1265, 1267), False, 'from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor\n'), ((924, 940), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (938, 940), False, 'from sklearn.preprocessing import StandardScaler\n'), ((950, 972), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {}), '()\n', (970, 972), False, 'from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor\n')] |
import rospy
import enum
import time
import numpy as np
from control_node import HiwinRobotInterface
from collision_avoidance.srv import collision_avoid, collision_avoidRequest
from hand_eye.srv import eye2base, eye2baseRequest
from hand_eye.srv import save_pcd, save_pcdRequest
pic_pos = \
[[11.5333, 27.6935, 14.078700000000001, 179.948, 10.215, -0.04],
[11.119, 11.5573, 14.078700000000001, -155.677, 9.338, 4.16],
[11.119, 45.8423, 15.5243, 162.071, 8.982, -2.503],
[20.0209, 29.7892, 13.6829, -179.401, 20.484, 0.484],
[-1.6163, 27.2584, 10.5365, 178.176, -5.075, -0.821],
[11.2913, 30.077499999999997, 3.8148000000000004, 176.897, 9.752, -0.733],
[11.2913, 48.3532, 0.1746, 147.166, 8.127, -5.457],
[11.2913, 14.063300000000002, -1.8908999999999998, -136.398, 7.255, 6.574],
[7.5134, 26.818099999999998, -2.06, 179.442, -22.966, -0.352],
[20.6853, 26.818099999999998, 0.048799999999999996, 179.502, 41.557, -0.951]]
class Arm_status(enum.IntEnum):
Idle = 1
Isbusy = 2
class State(enum.IntEnum):
move = 0
take_pic = 1
finish = 2
class EasyCATest:
def __init__(self):
self.arm_move = False
self.state = State.move
self.pos = np.array(pic_pos)
def hand_eye_client(self, req):
rospy.wait_for_service('/robot/eye_trans2base')
try:
ez_ca = rospy.ServiceProxy('/robot/eye_trans2base', eye2base)
res = ez_ca(req)
return res
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
def get_pcd_client(self, req):
rospy.wait_for_service('/get_pcd')
try:
ez_ca = rospy.ServiceProxy('/get_pcd', save_pcd)
res = ez_ca(req)
return res
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
def Mission_Trigger(self):
if self.arm_move == True and robot_ctr.get_robot_motion_state() == Arm_status.Isbusy:
self.arm_move = False
# if Arm_state_flag == Arm_status.Idle and Sent_data_flag == 1:
if robot_ctr.get_robot_motion_state() == Arm_status.Idle and self.arm_move == False:
if self.state == State.move:
print('ffffffffffffffffffffffffffffffffffffffffffffffff')
pos = self.pos[0]
# position = [pos[0], pos[1], pos[2], pos[3], pos[4], pos[5]]
print(pos)
pos[1] -= 3
robot_ctr.Set_ptp_speed(10)
robot_ctr.Step_AbsPTPCmd(pos)
self.pos = np.delete(self.pos, 0, 0)
self.state = State.take_pic
self.arm_move = True
elif self.state == State.take_pic:
time.sleep(1)
req = eye2baseRequest()
req.ini_pose = np.array(np.identity(4)).reshape(-1)
trans = self.hand_eye_client(req).tar_pose
req = save_pcdRequest()
req.curr_trans = np.array(trans)
req.name = 'mdfk'
if len(self.pos) > 0:
self.state = State.move
req.save_mix = False
else:
self.state = State.finish
req.save_mix = True
self.get_pcd_client(req)
if __name__ == "__main__":
rospy.init_node('get_pcd')
robot_ctr = HiwinRobotInterface(robot_ip="192.168.0.1", connection_level=1,name="manipulator")
robot_ctr.connect()
robot_ctr.Set_operation_mode(0)
# set tool & base coor
tool_coor = [0,0,0,0,0,0]
base_coor = [0,0,0,0,0,0]
robot_ctr.Set_base_number(5)
# base_result = robot_ctr.Define_base(1,base_coor)
robot_ctr.Set_tool_number(10)
# tool_result = robot_ctr.Define_tool(1,tool_coor)
robot_ctr.Set_operation_mode(1)
robot_ctr.Set_override_ratio(100)
poses = []
strtage = EasyCATest()
while strtage.state != State.finish and not rospy.is_shutdown():
strtage.Mission_Trigger()
time.sleep(0.1)
| [
"control_node.HiwinRobotInterface",
"rospy.ServiceProxy",
"hand_eye.srv.save_pcdRequest",
"hand_eye.srv.eye2baseRequest",
"time.sleep",
"numpy.identity",
"rospy.is_shutdown",
"numpy.array",
"rospy.init_node",
"rospy.wait_for_service",
"numpy.delete"
] | [((3369, 3395), 'rospy.init_node', 'rospy.init_node', (['"""get_pcd"""'], {}), "('get_pcd')\n", (3384, 3395), False, 'import rospy\n'), ((3412, 3500), 'control_node.HiwinRobotInterface', 'HiwinRobotInterface', ([], {'robot_ip': '"""192.168.0.1"""', 'connection_level': '(1)', 'name': '"""manipulator"""'}), "(robot_ip='192.168.0.1', connection_level=1, name=\n 'manipulator')\n", (3431, 3500), False, 'from control_node import HiwinRobotInterface\n'), ((1181, 1198), 'numpy.array', 'np.array', (['pic_pos'], {}), '(pic_pos)\n', (1189, 1198), True, 'import numpy as np\n'), ((1244, 1291), 'rospy.wait_for_service', 'rospy.wait_for_service', (['"""/robot/eye_trans2base"""'], {}), "('/robot/eye_trans2base')\n", (1266, 1291), False, 'import rospy\n'), ((1566, 1600), 'rospy.wait_for_service', 'rospy.wait_for_service', (['"""/get_pcd"""'], {}), "('/get_pcd')\n", (1588, 1600), False, 'import rospy\n'), ((4048, 4063), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (4058, 4063), False, 'import time\n'), ((1325, 1378), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/robot/eye_trans2base"""', 'eye2base'], {}), "('/robot/eye_trans2base', eye2base)\n", (1343, 1378), False, 'import rospy\n'), ((1634, 1674), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/get_pcd"""', 'save_pcd'], {}), "('/get_pcd', save_pcd)\n", (1652, 1674), False, 'import rospy\n'), ((3985, 4004), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (4002, 4004), False, 'import rospy\n'), ((2559, 2584), 'numpy.delete', 'np.delete', (['self.pos', '(0)', '(0)'], {}), '(self.pos, 0, 0)\n', (2568, 2584), True, 'import numpy as np\n'), ((2730, 2743), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2740, 2743), False, 'import time\n'), ((2766, 2783), 'hand_eye.srv.eye2baseRequest', 'eye2baseRequest', ([], {}), '()\n', (2781, 2783), False, 'from hand_eye.srv import eye2base, eye2baseRequest\n'), ((2933, 2950), 'hand_eye.srv.save_pcdRequest', 'save_pcdRequest', ([], {}), '()\n', (2948, 2950), False, 'from hand_eye.srv import save_pcd, save_pcdRequest\n'), ((2984, 2999), 'numpy.array', 'np.array', (['trans'], {}), '(trans)\n', (2992, 2999), True, 'import numpy as np\n'), ((2824, 2838), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (2835, 2838), True, 'import numpy as np\n')] |
import numpy as np
def full_jacob_pb(jac_t, jac_r):
return np.vstack(
(jac_t[0], jac_t[1], jac_t[2], jac_r[0], jac_r[1], jac_r[2])) | [
"numpy.vstack"
] | [((64, 135), 'numpy.vstack', 'np.vstack', (['(jac_t[0], jac_t[1], jac_t[2], jac_r[0], jac_r[1], jac_r[2])'], {}), '((jac_t[0], jac_t[1], jac_t[2], jac_r[0], jac_r[1], jac_r[2]))\n', (73, 135), True, 'import numpy as np\n')] |
import os
from collections import Counter
import numpy as np
from monty.serialization import loadfn
from pymatgen import Composition
from pymatgen.core.periodic_table import get_el_sp
DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../data")
OXIDES_PATH = os.path.join(DATA_DIR, "binary_oxides_entries_dict.json")
BINARY_OXDIES_ENTRIES = loadfn(OXIDES_PATH)
STD_FORMULA = {'garnet': Composition("C3A2D3O12"),
"perovskite": Composition("A2B2O6")}
SITES = {'garnet': ['c', 'a', 'd'],
'perovskite': ['a', 'b']} # use list to preserve order
SITE_INFO = {'garnet': {'c': {"num_atoms": 3, "max_ordering": 20, "cn": "VIII"},
'a': {"num_atoms": 2, "max_ordering": 7, "cn": "VI"},
'd': {"num_atoms": 3, "max_ordering": 18, "cn": "IV"}},
'perovskite': {'a': {"num_atoms": 2, "max_ordering": 10, 'cn': "XII"},
'b': {"num_atoms": 2, "max_ordering": 10, 'cn': "VI"}}}
def binary_encode(config, tot_configs):
"""
Args:
config(int): the index of the configuration
tot_configs(int): total number of configurations
"""
get_bin = lambda x: format(x, 'b')
vb = get_bin(config)
max_digit = len([i for i in (bin(tot_configs))[2:] if i.isdigit()])
letter = [int(char) for char in vb]
letter = [0] * (max_digit - len(letter)) + letter
return letter
def _raw_input(input_spe, cn_specific, site_info):
"""
inputs w/o configuration encoded
"""
if cn_specific:
descriptors = [(get_el_sp(spe).get_shannon_radius(cn=site_info[site]['cn']),
get_el_sp(spe).X)
for spe, site in input_spe]
else:
descriptors = [(get_el_sp(spe).ionic_radius, get_el_sp(spe).X) \
for spe, site in input_spe]
return list(sum(descriptors, ()))
def get_descriptor(structure_type, species, cn_specific=True,
unmix_expansion=None, config=None):
"""
Prepare the inputs for model prediction.
i.e. extract the
[rC', XC', rC'', XC'',rA, XA, rD, XD, b1, b2, b3, b4, b5]
inputs array for given species.
Args:
structure_type (str): 'garnet' or 'perovskite'
species (dict): species in dictionary.
cn_specific(bool): True if use cn specific ionic radius
unmix_expansion(list): list the order of sites that reorgonize unmix data
eg. for an unmix garnet Ca3Al2Ga3O12, to obtain the corresponding
descriptors for extended c-mix/a-mix/d-mix model
the unmix_expansion can be specified as
['c', 'c', 'a', 'd'],
['c', 'a', 'a', 'd'],
['c', 'a', 'd', 'd'],
respectively
Returns:
inputs (list): numerical inputs of the input structure
"""
site_info = SITE_INFO[structure_type]
sites = SITES[structure_type]
input_spe = [(spe, site) for site in sites
for spe in sorted(species[site],
key=lambda x: species[site][x])]
mix_site = [site for site in sites if len(species[site]) == 2]
if not mix_site:
if not unmix_expansion:
return _raw_input(input_spe, cn_specific, site_info)
else:
sites = unmix_expansion
mix_site = Counter(unmix_expansion).most_common(1)[0][0]
input_spe = [(spe, site) for site in sites
for spe in sorted(species[site],
key=lambda x: species[site][x])]
max_ordering = site_info[mix_site]['max_ordering']
descriptors = _raw_input(input_spe, cn_specific, site_info)
descriptors_config = [descriptors + binary_encode(config, max_ordering)
for config in range(0, max_ordering)]
return descriptors_config
else:
mix_site = mix_site[0]
if len(set(species[mix_site].values())) > 1:
descriptors = _raw_input(input_spe, cn_specific, site_info)
max_ordering = site_info[mix_site]['max_ordering']
if config != None:
descriptors_config = descriptors + binary_encode(config, max_ordering)
else:
descriptors_config = [descriptors + binary_encode(config, max_ordering)
for config in range(0, max_ordering)]
return descriptors_config
else:
mix_site = mix_site[0]
max_ordering = site_info[mix_site]['max_ordering']
input_spe = [(spe, site) for site in sites
for spe in sorted(species[site],
key=lambda x: x.__str__())]
descriptors = _raw_input(input_spe, cn_specific, site_info)
input_spe_r = [(spe, site) for site in sites
for spe in sorted(species[site],
key=lambda x: x.__str__(),
reverse=True)]
descriptors_r = _raw_input(input_spe_r, cn_specific, site_info)
if config != None:
descriptors_config = [descriptors + binary_encode(config, max_ordering)]
descriptors_config_r = [descriptors_r + binary_encode(config, max_ordering)]
else:
descriptors_config = [descriptors + binary_encode(config, max_ordering)
for config in range(0, max_ordering)]
descriptors_config_r = [descriptors_r + binary_encode(config, max_ordering)
for config in range(0, max_ordering)]
return descriptors_config + descriptors_config_r
def get_form_e(descriptors, model, scaler, return_full=False):
"""
Get formation energy from the given inputs.
Args:
descriptors (dict): input descriptors dict.
model (keras.model): keras model object.
scaler (keras.StandardScaler): keras StandardScaler object
return_full (bool): if True return the full list of energies
instead of the lowest
Returns:
predicted_ef (float): the predicted formation Energy.
"""
if len(np.array(descriptors).shape) == 1:
descriptors = np.array(descriptors).reshape(1, -1)
inputs_ext_scaled = scaler.transform(descriptors)
if return_full:
return model.predict(inputs_ext_scaled)
else:
form_e = min(model.predict(inputs_ext_scaled))[0]
return form_e
def get_tote(structure_type, form_e, species, debug=False):
spe_dict = Counter({})
for site in SITE_INFO[structure_type]:
spe_dict += Counter({spe.__str__(): round(SITE_INFO[structure_type][site]['num_atoms'] \
* species[site][spe]) \
for spe in sorted(species[site], key=lambda x: species[site][x])})
composition = Composition(spe_dict)
tote = form_e
for el, amt in composition.items():
if debug:
print(el)
if el.symbol == 'O':
continue
if BINARY_OXDIES_ENTRIES.get(el.__str__()):
stable_ox_entry = BINARY_OXDIES_ENTRIES[el.__str__()]
else:
raise ValueError("No binary oxide entry for %s" % el.__str__())
min_e = stable_ox_entry.uncorrected_energy
amt_ox = stable_ox_entry.composition[el.name]
tote += (amt / amt_ox) * min_e
return tote
| [
"os.path.abspath",
"pymatgen.core.periodic_table.get_el_sp",
"monty.serialization.loadfn",
"collections.Counter",
"numpy.array",
"pymatgen.Composition",
"os.path.join"
] | [((280, 337), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""binary_oxides_entries_dict.json"""'], {}), "(DATA_DIR, 'binary_oxides_entries_dict.json')\n", (292, 337), False, 'import os\n'), ((362, 381), 'monty.serialization.loadfn', 'loadfn', (['OXIDES_PATH'], {}), '(OXIDES_PATH)\n', (368, 381), False, 'from monty.serialization import loadfn\n'), ((407, 431), 'pymatgen.Composition', 'Composition', (['"""C3A2D3O12"""'], {}), "('C3A2D3O12')\n", (418, 431), False, 'from pymatgen import Composition\n'), ((462, 483), 'pymatgen.Composition', 'Composition', (['"""A2B2O6"""'], {}), "('A2B2O6')\n", (473, 483), False, 'from pymatgen import Composition\n'), ((6697, 6708), 'collections.Counter', 'Counter', (['{}'], {}), '({})\n', (6704, 6708), False, 'from collections import Counter\n'), ((7037, 7058), 'pymatgen.Composition', 'Composition', (['spe_dict'], {}), '(spe_dict)\n', (7048, 7058), False, 'from pymatgen import Composition\n'), ((227, 252), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (242, 252), False, 'import os\n'), ((6313, 6334), 'numpy.array', 'np.array', (['descriptors'], {}), '(descriptors)\n', (6321, 6334), True, 'import numpy as np\n'), ((6370, 6391), 'numpy.array', 'np.array', (['descriptors'], {}), '(descriptors)\n', (6378, 6391), True, 'import numpy as np\n'), ((1652, 1666), 'pymatgen.core.periodic_table.get_el_sp', 'get_el_sp', (['spe'], {}), '(spe)\n', (1661, 1666), False, 'from pymatgen.core.periodic_table import get_el_sp\n'), ((1755, 1769), 'pymatgen.core.periodic_table.get_el_sp', 'get_el_sp', (['spe'], {}), '(spe)\n', (1764, 1769), False, 'from pymatgen.core.periodic_table import get_el_sp\n'), ((1784, 1798), 'pymatgen.core.periodic_table.get_el_sp', 'get_el_sp', (['spe'], {}), '(spe)\n', (1793, 1798), False, 'from pymatgen.core.periodic_table import get_el_sp\n'), ((1567, 1581), 'pymatgen.core.periodic_table.get_el_sp', 'get_el_sp', (['spe'], {}), '(spe)\n', (1576, 1581), False, 'from pymatgen.core.periodic_table import get_el_sp\n'), ((3347, 3371), 'collections.Counter', 'Counter', (['unmix_expansion'], {}), '(unmix_expansion)\n', (3354, 3371), False, 'from collections import Counter\n')] |
import numpy as np
import sklearn.metrics
def calc_auc(error_array, cutoff=0.25):
error_array = error_array.squeeze()
error_array = np.sort(error_array)
num_values = error_array.shape[0]
plot_points = np.zeros((num_values, 2))
midfraction = 1.
for i in range(num_values):
fraction = (i + 1) * 1.0 / num_values
value = error_array[i]
plot_points[i, 1] = fraction
plot_points[i, 0] = value
if i > 0:
lastvalue = error_array[i - 1]
if lastvalue < cutoff < value:
midfraction = (lastvalue * plot_points[i - 1, 1] +
value * fraction) / (value + lastvalue)
if plot_points[-1, 0] < cutoff:
plot_points = np.vstack([plot_points, np.array([cutoff, 1])])
else:
plot_points = np.vstack([plot_points, np.array([cutoff, midfraction])])
sorting = np.argsort(plot_points[:, 0])
plot_points = plot_points[sorting, :]
auc = sklearn.metrics.auc(plot_points[plot_points[:, 0] <= cutoff, 0],
plot_points[plot_points[:, 0] <= cutoff, 1])
auc = auc / cutoff
return auc, plot_points
| [
"numpy.argsort",
"numpy.sort",
"numpy.zeros",
"numpy.array"
] | [((143, 163), 'numpy.sort', 'np.sort', (['error_array'], {}), '(error_array)\n', (150, 163), True, 'import numpy as np\n'), ((221, 246), 'numpy.zeros', 'np.zeros', (['(num_values, 2)'], {}), '((num_values, 2))\n', (229, 246), True, 'import numpy as np\n'), ((904, 933), 'numpy.argsort', 'np.argsort', (['plot_points[:, 0]'], {}), '(plot_points[:, 0])\n', (914, 933), True, 'import numpy as np\n'), ((775, 796), 'numpy.array', 'np.array', (['[cutoff, 1]'], {}), '([cutoff, 1])\n', (783, 796), True, 'import numpy as np\n'), ((855, 886), 'numpy.array', 'np.array', (['[cutoff, midfraction]'], {}), '([cutoff, midfraction])\n', (863, 886), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import unittest
from numpy.testing import assert_array_almost_equal
from mighty.utils.common import set_seed
from sparse.nn.model import Softshrink, LISTA, MatchingPursuit
from sparse.nn.solver import BasisPursuitADMM
class TestSoftshrink(unittest.TestCase):
def test_softshink(self):
set_seed(16)
lambd = 0.1
softshrink = Softshrink(n_features=1)
softshrink.lambd.data[:] = lambd
softshrink_gt = nn.Softshrink(lambd=lambd)
tensor = torch.randn(10, 20)
assert_array_almost_equal(softshrink(tensor).detach(),
softshrink_gt(tensor))
class TestLISTA(unittest.TestCase):
def test_lista_forward_best(self):
set_seed(16)
solver = BasisPursuitADMM()
in_features = 10
out_features = 40
lista = LISTA(in_features=in_features, out_features=out_features,
solver=solver)
mp = MatchingPursuit(in_features=in_features,
out_features=out_features, solver=solver)
tensor = torch.randn(5, in_features)
with torch.no_grad():
mp.normalize_weight()
lista.weight_input.data = mp.weight.data.clone()
mp_output = mp(tensor)
lista_output_best = lista.forward_best(tensor)
assert_array_almost_equal(lista_output_best.latent, mp_output.latent)
assert_array_almost_equal(lista_output_best.reconstructed,
mp_output.reconstructed)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"sparse.nn.solver.BasisPursuitADMM",
"torch.nn.Softshrink",
"torch.randn",
"mighty.utils.common.set_seed",
"sparse.nn.model.Softshrink",
"sparse.nn.model.MatchingPursuit",
"numpy.testing.assert_array_almost_equal",
"torch.no_grad",
"sparse.nn.model.LISTA"
] | [((1585, 1600), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1598, 1600), False, 'import unittest\n'), ((335, 347), 'mighty.utils.common.set_seed', 'set_seed', (['(16)'], {}), '(16)\n', (343, 347), False, 'from mighty.utils.common import set_seed\n'), ((389, 413), 'sparse.nn.model.Softshrink', 'Softshrink', ([], {'n_features': '(1)'}), '(n_features=1)\n', (399, 413), False, 'from sparse.nn.model import Softshrink, LISTA, MatchingPursuit\n'), ((479, 505), 'torch.nn.Softshrink', 'nn.Softshrink', ([], {'lambd': 'lambd'}), '(lambd=lambd)\n', (492, 505), True, 'import torch.nn as nn\n'), ((523, 542), 'torch.randn', 'torch.randn', (['(10)', '(20)'], {}), '(10, 20)\n', (534, 542), False, 'import torch\n'), ((748, 760), 'mighty.utils.common.set_seed', 'set_seed', (['(16)'], {}), '(16)\n', (756, 760), False, 'from mighty.utils.common import set_seed\n'), ((778, 796), 'sparse.nn.solver.BasisPursuitADMM', 'BasisPursuitADMM', ([], {}), '()\n', (794, 796), False, 'from sparse.nn.solver import BasisPursuitADMM\n'), ((864, 936), 'sparse.nn.model.LISTA', 'LISTA', ([], {'in_features': 'in_features', 'out_features': 'out_features', 'solver': 'solver'}), '(in_features=in_features, out_features=out_features, solver=solver)\n', (869, 936), False, 'from sparse.nn.model import Softshrink, LISTA, MatchingPursuit\n'), ((972, 1059), 'sparse.nn.model.MatchingPursuit', 'MatchingPursuit', ([], {'in_features': 'in_features', 'out_features': 'out_features', 'solver': 'solver'}), '(in_features=in_features, out_features=out_features, solver=\n solver)\n', (987, 1059), False, 'from sparse.nn.model import Softshrink, LISTA, MatchingPursuit\n'), ((1101, 1128), 'torch.randn', 'torch.randn', (['(5)', 'in_features'], {}), '(5, in_features)\n', (1112, 1128), False, 'import torch\n'), ((1356, 1425), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['lista_output_best.latent', 'mp_output.latent'], {}), '(lista_output_best.latent, mp_output.latent)\n', (1381, 1425), False, 'from numpy.testing import assert_array_almost_equal\n'), ((1434, 1522), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['lista_output_best.reconstructed', 'mp_output.reconstructed'], {}), '(lista_output_best.reconstructed, mp_output.\n reconstructed)\n', (1459, 1522), False, 'from numpy.testing import assert_array_almost_equal\n'), ((1142, 1157), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1155, 1157), False, 'import torch\n')] |
import numpy as np
import sklearn
from sklearn.linear_model import Ridge, RidgeCV
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV
from sklearn.base import RegressorMixin, BaseEstimator
from time import time
import scipy
from .hamiltonians import orbs_base, block_to_feat_index
# avoid polluting output with CV failures
import warnings
warnings.filterwarnings('ignore', category=scipy.linalg.LinAlgWarning)
warnings.filterwarnings('ignore', category=sklearn.exceptions.FitFailedWarning)
warnings.filterwarnings('ignore', category=UserWarning)
class SASplitter:
""" CV splitter that takes into account the presence of "L blocks"
associated with symmetry-adapted regression. Basically, you can trick conventional
regression schemes to work on symmetry-adapted data y^M_L(A_i) by having the (2L+1)
angular channels "unrolled" into a flat array. Then however splitting of train/test
or cross validation must not "cut" across the M block. This takes care of that.
"""
def __init__(self, L, cv=2):
self.L = L
self.cv = cv
self.n_splits = cv
def split(self, X, y, groups=None):
ntrain = X.shape[0]
if ntrain % (2*self.L+1) != 0:
raise ValueError("Size of training data is inconsistent with the L value")
ntrain = ntrain // (2*self.L+1)
nbatch = (2*self.L+1)*(ntrain//self.n_splits)
idx = np.arange(len(X))
np.random.shuffle(idx)
for n in range(self.n_splits):
itest = idx[n*nbatch:(n+1)*nbatch]
itrain = np.concatenate([idx[:n*nbatch], idx[(n+1)*nbatch:]])
yield itrain, itest
def get_n_splits(self, X, y, groups=None):
return self.n_splits
class SARidge(Ridge):
""" Symmetry-adapted ridge regression class """
def __init__(self, L, alpha=1, alphas=None, cv=2, solver='auto',
fit_intercept=False, scoring='neg_root_mean_squared_error'):
self.L = L
# L>0 components have zero mean by symmetry
if L>0:
fit_intercept = False
self.cv = SASplitter(L, cv)
self.alphas = alphas
self.cv_stats = None
self.scoring = scoring
self.solver = solver
super(SARidge, self).__init__(alpha=alpha, fit_intercept=fit_intercept, solver=solver)
def fit(self, Xm, Ym, X0=None):
# this expects properties in the form [i, m] and features in the form [i, q, m]
# in order to train a SA-GPR model the m indices have to be moved and merged with the i
Xm_flat = np.moveaxis(Xm, 2, 1).reshape((-1, Xm.shape[1]))
Ym_flat = Ym.flatten()
if self.alphas is not None:
# determines alpha by grid search
rcv = Ridge(fit_intercept=self.fit_intercept)
gscv = GridSearchCV(rcv, dict(alpha=self.alphas), cv=self.cv, scoring=self.scoring)
gscv.fit(Xm_flat, Ym_flat)
self.cv_stats = gscv.cv_results_
self.alpha = gscv.best_params_["alpha"]
super(SARidge, self).fit(Xm_flat, Ym_flat)
def predict(self, Xm, X0=None):
Y = super(SARidge, self).predict(np.moveaxis(Xm, 2, 1).reshape((-1, Xm.shape[1])))
return Y.reshape((-1, 2*self.L+1))
class SAKernelRidge(KernelRidge):
""" Symmetry-adapted kernel ridge regression class """
def __init__(self, L, zeta=[1], zeta_w=None, cv=2, alpha=1, alphas=None,
fit_intercept=False, scoring='neg_root_mean_squared_error', solver=None):
self.L = L
# L>0 components have zero mean by symmetry
if L>0:
fit_intercept = False
self.cv = SASplitter(L, cv)
if not hasattr(zeta,'__len__'):
zeta = [zeta]
if zeta_w is None:
zeta_w = np.ones(len(zeta))
self.zeta = zeta
self.zeta_w = zeta_w
self.alphas = alphas
self.scoring = scoring
self.fit_intercept = fit_intercept
super(SAKernelRidge, self).__init__(kernel="precomputed", alpha=alpha)
def fit(self, Xm, Ym, X0):
# this expects properties in the form [i, m] and features in the form [i, q, m]
# in order to train a SA-GPR model the m indices have to be moved and merged with the i
# it also gets *invariant* features to build non-linear sa-gpr kernels
# computes lambda-soap kernel
X0_flat = X0.reshape(X0.shape[:2])
Xm_flat = np.moveaxis(Xm, -1,1).reshape((-1,Xm.shape[1]))
K0 = X0_flat@X0_flat.T #np.einsum("iq,jq->ij", X0[...,0], X0[...,0])
KLMM = (Xm_flat@Xm_flat.T).reshape((Xm.shape[0],Xm.shape[-1],Xm.shape[0],Xm.shape[-1])) #np.einsum("iqm,jqn->imjn", Xm, Xm)
self.KLscale = np.trace(KLMM.reshape( ((2*self.L+1)*len(Xm),((2*self.L+1)*len(Xm))) ))/len(Xm)
self.K0scale = np.trace(K0)/len(X0)
if self.K0scale == 0.0 :
self.K0scale = 1
if self.KLscale == 0.0 :
self.KLscale = 1
self.rXm = Xm_flat
self.rX0 = X0_flat
Kpoly = KLMM*0.0
for z, zw in zip(self.zeta, self.zeta_w):
Kpoly += np.einsum("imjn,ij->imjn",KLMM/self.KLscale, zw*(K0/self.K0scale)**(z-1))
Kpoly_flat = Kpoly.reshape( ((2*self.L+1)*len(Xm),((2*self.L+1)*len(Xm))) )
Ym_flat = Ym.flatten()
self._Y_mean = 0
if self.fit_intercept:
self._Y_mean = Ym_flat.mean()
Ym_flat = Ym_flat - self._Y_mean
if self.alphas is not None:
# determines alpha by grid search
krcv = KernelRidge(kernel="precomputed")
gscv = GridSearchCV(krcv, dict(alpha=self.alphas), cv=self.cv, scoring=self.scoring)
gscv.fit(Kpoly_flat, Ym_flat)
self.cv_stats = gscv.cv_results_
self.alpha = gscv.best_params_["alpha"]
super(SAKernelRidge, self).fit( Kpoly_flat, Ym_flat)
def predict(self, Xm, X0):
X0_flat = X0.reshape(X0.shape[:2])
Xm_flat = np.moveaxis(Xm, -1,1).reshape((-1,Xm.shape[1]))
K0 = X0_flat@self.rX0.T #np.einsum("iq,jq->ij", X0[...,0], X0[...,0])
KLMM = (Xm_flat@self.rXm.T).reshape((Xm.shape[0],Xm.shape[-1],self.rX0.shape[0],Xm.shape[-1])) #np.einsum("iqm,jqn->imjn", X
# K0 = np.einsum("iq,jq->ij", X0[...,0], self.rX0[...,0])
# KLMM = np.einsum("iqm,jqn->imjn", Xm, self.rXm)
Kpoly = KLMM*0.0
for z, zw in zip(self.zeta, self.zeta_w):
Kpoly += np.einsum("imjn,ij->imjn",KLMM/self.KLscale, zw*(K0/self.K0scale)**(z-1))
Y = self._Y_mean + super(SAKernelRidge, self).predict(
Kpoly.reshape(((2*self.L+1)*len(Xm),-1)))
return Y.reshape((-1, 2*self.L+1))
class SparseGPRSolver:
"""
A few quick implementation notes, docs to be done.
This is meant to solve the sparse GPR problem
b = (KNM.T@KNM + reg*KMM)^-1 @ KNM.T@y
The inverse needs to be stabilized with application of a numerical jitter,
that is expressed as a fraction of the largest eigenvalue of KMM
"""
def __init__(
self, KMM, regularizer=1, jitter=0, rkhs_vectors=None, solver="RKHS", relative_jitter=True
):
self.solver = solver
self.KMM = KMM
self.relative_jitter = relative_jitter
self.rkhs_vectors = rkhs_vectors
self._nM = len(KMM)
if self.solver == "RKHS" or self.solver == "RKHS-QR" or self.solver == "RKHS-RIDGE":
start = time()
if self.rkhs_vectors is None:
vk, Uk = scipy.linalg.eigh(KMM)
self.rkhs_vectors = (vk[::-1], Uk[::-1])
self._vk, self._Uk = self.rkhs_vectors
elif self.solver == "QR" or self.solver == "Normal":
# gets maximum eigenvalue of KMM to scale the numerical jitter
self._KMM_maxeva = scipy.sparse.linalg.eigsh(
KMM, k=1, return_eigenvectors=False
)[0]
else:
raise ValueError(
"Solver ",
solver,
" not supported. Possible values are [RKHS, RKHS-QR, RKHS-RIDGE, QR, Normal].",
)
if relative_jitter:
if self.solver == "RKHS" or self.solver == "RKHS-QR" or self.solver == "RKHS-RIDGE":
self._jitter_scale = self._vk[0]
elif self.solver == "QR" or self.solver == "Normal":
self._jitter_scale = self._KMM_maxeva
else:
self._jitter_scale = 1.0
self.set_regularizers(regularizer, jitter)
def set_regularizers(self, regularizer=1.0, jitter=0.0):
self.regularizer = regularizer
self.jitter = jitter
if self.solver == "RKHS" or self.solver == "RKHS-QR" or self.solver == "RKHS-RIDGE":
self._nM = len(np.where(self._vk > self.jitter * self._jitter_scale)[0])
self._PKPhi = self._Uk[:, : self._nM] * 1 / np.sqrt(self._vk[: self._nM])
elif self.solver == "QR":
self._VMM = scipy.linalg.cholesky(
self.regularizer * self.KMM
+ np.eye(self._nM) * self._jitter_scale * self.jitter
)
self._Cov = np.zeros((self._nM, self._nM))
self._KY = None
def partial_fit(self, KNM, Y, accumulate_only=False):
if len(Y) > 0:
# only accumulate if we are passing data
if len(Y.shape) == 1:
Y = Y[:, np.newaxis]
if self.solver == "RKHS":
Phi = KNM @ self._PKPhi
elif self.solver == "Normal":
Phi = KNM
else:
raise ValueError(
"Partial fit can only be realized with solver = [RKHS, Normal]"
)
if self._KY is None:
self._KY = np.zeros((self._nM, Y.shape[1]))
self._Cov += Phi.T @ Phi
self._KY += Phi.T @ Y
# do actual fit if called with empty array or if asked
if len(Y) == 0 or (not accumulate_only):
if self.solver == "RKHS":
self._weights = self._PKPhi @ scipy.linalg.solve(
self._Cov + np.eye(self._nM) * self.regularizer,
self._KY,
assume_a="pos",
)
elif self.solver == "Normal":
self._weights = scipy.linalg.solve(
self._Cov
+ self.regularizer * self.KMM
+ np.eye(self.KMM.shape[0]) * self.jitter * self._jitter_scale,
self._KY,
assume_a="pos",
)
def fit(self, KNM, Y):
if len(Y.shape) == 1:
Y = Y[:, np.newaxis]
if self.solver == "RKHS":
Phi = KNM @ self._PKPhi
rc = scipy.linalg.solve(
Phi.T @ Phi + np.eye(self._nM) * self.regularizer,
Phi.T @ Y,
assume_a="pos",
)
self._weights = self._PKPhi @ rc
elif self.solver == "RKHS-QR":
A = np.vstack(
[KNM @ self._PKPhi, np.sqrt(self.regularizer) * np.eye(self._nM)]
)
Q, R = np.linalg.qr(A)
self._weights = self._PKPhi @ scipy.linalg.solve_triangular(
R, Q.T @ np.vstack([Y, np.zeros((self._nM, Y.shape[1]))])
)
elif self.solver == "RKHS-RIDGE":
Phi = KNM @ self._PKPhi
if Phi.shape[1] == 0:
self._weights = self._PKPhi@np.zeros(shape=(0,1))
else:
ridge = Ridge(alpha=self.regularizer,fit_intercept=False, solver='svd')
ridge.fit(Phi, Y)
self._weights = self._PKPhi @ ridge.coef_.T
elif self.solver == "QR":
A = np.vstack([KNM, self._VMM])
Q, R = np.linalg.qr(A)
self._weights = scipy.linalg.solve_triangular(
R, Q.T @ np.vstack([Y, np.zeros((KNM.shape[1], Y.shape[1]))])
)
elif self.solver == "Normal":
self._weights = scipy.linalg.solve(
KNM.T @ KNM
+ self.regularizer * self.KMM
+ np.eye(self._nM) * self.jitter * self._jitter_scale,
KNM.T @ Y,
assume_a="pos",
)
def predict(self, KTM):
return KTM @ self._weights
class SparseKernelRidge(BaseEstimator, SparseGPRSolver):
pass
class SASparseKernelRidge(SparseKernelRidge):
""" Symmetry-adapted kernel ridge regression class """
def __init__(self, L, active, active0, zeta=[1], zeta_w=None, cv=2, alpha=1, alphas=None, solver="RKHS",
fit_intercept=False, jitter=1e-15, scoring='neg_root_mean_squared_error'):
self.L = L
# L>0 components have zero mean by symmetry
if L>0:
fit_intercept = False
self.cv = SASplitter(L, cv)
if not hasattr(zeta,'__len__'):
zeta = [zeta]
if zeta_w is None:
zeta_w = np.ones(len(zeta))
self.zeta = zeta
self.zeta_w = zeta_w
self.alpha=alpha
self.alphas = alphas
self.scoring = scoring
self.fit_intercept = fit_intercept
self.solver = solver
self.nactive = len(active)
self.active0 = active0.reshape(active0.shape[:2])
self.active = np.moveaxis(active, -1,1).reshape((-1,active.shape[1]))
print("Sparse GPR, for nactive= ", self.nactive)
start = time()
K0 = self.active0@self.active0.T
KLMM = (self.active@self.active.T).reshape((self.nactive,2*self.L+1,
self.nactive,2*self.L+1))
#K0 = np.einsum("iq,jq->ij", self.active0[...,0], self.active0[...,0])
#KLMM = np.einsum("iqm,jqn->imjn", self.active, self.active)
self.KLscale = np.trace(KLMM.reshape( ((2*self.L+1)*self.nactive,(2*self.L+1)*self.nactive) ))/self.nactive
self.K0scale = np.trace(K0)/self.nactive
if self.K0scale == 0.0 : # handles gently 0-valued features
self.K0scale = 1
if self.KLscale == 0.0 :
self.KLscale = 1
Kpoly = KLMM*0.0
for z, zw in zip(self.zeta, self.zeta_w):
Kpoly += np.einsum("imjn,ij->imjn",KLMM/self.KLscale, zw*(K0/self.K0scale)**(z-1))
print("KMM compute ", time()-start)
start = time()
self.KLMM_flat = Kpoly.reshape( ((2*self.L+1)*self.nactive,((2*self.L+1)*self.nactive)) )
super(SASparseKernelRidge, self).__init__(KMM = self.KLMM_flat,
regularizer=alpha, jitter=jitter, solver=self.solver)
print("KMM init ", time()-start)
def fit(self, Xm, Ym, X0):
# this expects properties in the form [i, m] and features in the form [i, q, m]
# in order to train a SA-GPR model the m indices have to be moved and merged with the i
# it also gets *invariant* features to build non-linear sa-gpr kernels
print("Fitting, ", Xm.shape)
# computes lambda-soap kernel
start = time()
X0_flat = X0.reshape(X0.shape[:2])
Xm_flat = np.moveaxis(Xm, -1,1).reshape((-1,Xm.shape[1]))
K0NM = X0_flat@self.active0.T
KLNM = (Xm_flat@self.active.T).reshape((Xm.shape[0],Xm.shape[-1],self.nactive,Xm.shape[-1]))
#K0NM = np.einsum("iq,jq->ij", X0[...,0], self.active0[...,0])
#KLNM = np.einsum("iqm,jqn->imjn", Xm, self.active)
Kpoly = KLNM*0.0
for z, zw in zip(self.zeta, self.zeta_w):
Kpoly += np.einsum("imjn,ij->imjn",KLNM/self.KLscale, zw*(K0NM/self.K0scale)**(z-1))
KLNM_flat = Kpoly.reshape((KLNM.shape[0]*KLNM.shape[1],KLNM.shape[2]*KLNM.shape[3]))
Ym_flat = Ym.flatten()
self._Y_mean = 0
if self.fit_intercept:
self._Y_mean = Ym_flat.mean()
Ym_flat = Ym_flat - self._Y_mean
print("KNM compute", time()-start)
start = time()
if self.alphas is not None:
# determines alpha by grid search
krcv = SparseKernelRidge(KMM = self.KLMM_flat, jitter=self.jitter)#, rkhs_vectors=self.rkhs_vectors, )
gscv = GridSearchCV(krcv, dict(regularizer=self.alphas), cv=self.cv, scoring=self.scoring)
gscv.fit(KLNM_flat, Ym_flat)
self.cv_stats = gscv.cv_results_
self.regularizer = gscv.best_params_["regularizer"]
print("CV ", time()-start)
self.alpha = self.regularizer # offer common interface - should really rename regularizer upstream
start = time()
super(SASparseKernelRidge, self).fit(KLNM_flat, Ym_flat)
print("KNM fit", time()-start)
def predict(self, Xm, X0):
X0_flat = X0.reshape(X0.shape[:2])
Xm_flat = np.moveaxis(Xm, -1,1).reshape((-1,Xm.shape[1]))
K0NM = X0_flat@self.active0.T
KLNM = (Xm_flat@self.active.T).reshape((Xm.shape[0],Xm.shape[-1],self.nactive,Xm.shape[-1]))
#K0NM = np.einsum("iq,jq->ij", X0[...,0], self.active0[...,0])
#KLNM = np.einsum("iqm,jqn->imjn", Xm, self.active)
Kpoly = KLNM*0.0
for z, zw in zip(self.zeta, self.zeta_w):
Kpoly += np.einsum("imjn,ij->imjn",KLNM/self.KLscale, zw*(K0NM/self.K0scale)**(z-1))
Y = self._Y_mean + super(SASparseKernelRidge, self).predict(Kpoly.reshape((KLNM.shape[0]*KLNM.shape[1],KLNM.shape[2]*KLNM.shape[3])))
return Y.reshape((-1, 2*self.L+1))
class FockRegression:
""" Collects all the models that are needed to fit and predict a Fock matrix in coupled-momentum blocks form. """
def __init__(self, orbs, *args, **kwargs):
# these are the arguments to Ridge - we just store them here because ATM
# we don't know what we'll be learning
self._orbs = orbs
_, self._eldict = orbs_base(orbs)
self._args = args
self._kwargs = kwargs
# guess what we want to do by the arguments
if "active" in self._kwargs:
self._model_template= SASparseKernelRidge
elif "zeta" in self._kwargs:
self._model_template= SAKernelRidge
else:
self._model_template = SARidge
if "fit_intercept" in self._kwargs:
self.fit_intercept = self._kwargs["fit_intercept"]
else:
self.fit_intercept = "auto"
if "jitter" in self._kwargs:
self.jitter = self._kwargs["jitter"]
if "active" in self._kwargs:
self.active = self._kwargs["active"]
self.active0 = self._kwargs["active0"]
def fit(self, feats, fock_bc, slices=None, progress=None):
self._models = {}
self.cv_stats_ = {}
for k in fock_bc.keys():
self._models[k] = {}
pkeys = fock_bc[k].keys()
if progress is not None:
pkeys = progress(fock_bc[k].keys())
for orb in pkeys:
try: # fancier info if this is tqdm
pkeys.set_description("Fitting % 5s: % 20s" % (k, str(orb)))
except:
print("Fitting % 5s: % 20s" % (k, str(orb)))
pass
self._models[k][orb] = {}
el = self._eldict[(orb[0], orb[1])]
elb = self._eldict[(orb[2], orb[3])]
pil1l2 = (1-2*(orb[1]%2))*(1-2*(orb[3]%2)) # parity pi(l1) * pi(l2)
if slices is None:
sl = slice(None)
else:
sl = slices[k][orb]
for L in fock_bc[k][orb]:
# override fit_intercept depending on parameters
self._kwargs["fit_intercept"] = self.fit_intercept
if self.fit_intercept == "auto":
self._kwargs["fit_intercept"] = (L==0 and k == "diag")
pi = pil1l2*(1-2*(L%2))
if (el,L,pi) in feats[k]:
fblock = (el, L, pi)
else:
fblock = (el, elb, L, pi)
if "active" in self._kwargs:
self._kwargs["active"] = self.active[k][fblock]
self._kwargs["active0"] = self.active0[k][fblock[:-2]+(0, 1)]
tgt = fock_bc[k][orb][L][sl]
print(k, orb, fblock, L, np.linalg.norm(tgt))
if "jitter" in self._kwargs:
self._kwargs["jitter"] = self.jitter
f_solved = False
while not f_solved:
# keep trying to increase the jitter, to deal with really tricky blocks
try:
self._models[k][orb][L] = self._model_template(L, *self._args, **self._kwargs)
# X0 has even parity regardless of the parity of the block
self._models[k][orb][L].fit(feats[k][fblock][sl], tgt, X0=feats[k][fblock[:-2]+(0, 1)][sl])
f_solved = True
except np.linalg.LinAlgError:
# handles with error in solve due to small jitter
print("######################################################\nFockRegression: solve failure for ",k, str(fblock) , L,", retrying with larger jitter\n")
self._kwargs["jitter"] *= 10
self.cv_stats_[(k, orb,L)] = self._models[k][orb][L].cv_stats
def predict(self, feats, progress=None):
fock_bc = {}
for k in self._models.keys():
fock_bc[k] = {}
pkeys = self._models[k].keys()
if progress is not None:
pkeys = progress(pkeys)
for orb in pkeys:
try: # fancier info if this is tqdm
pkeys.set_description("Predicting % 5s: % 20s" % (k, str(orb)))
except:
pass
fock_bc[k][orb] = {}
el = self._eldict[(orb[0], orb[1])]
elb = self._eldict[(orb[2], orb[3])]
pil1l2 = (1-2*(orb[1]%2))*(1-2*(orb[3]%2)) # parity pi(l1) * pi(l2)
#if progress is not None:
# print("Orbital: ", el, elb, orb)
for L in self._models[k][orb]:
pi = pil1l2*(1-2*(L%2))
if (el,L,pi) in feats[k]:
if len(feats[k][(el,L,pi)]) > 0:
fock_bc[k][orb][L] = self._models[k][orb][L].predict(feats[k][(el,L,pi)], X0=feats[k][(el,0,1)])
else:
if len(feats[k][(el,elb,L,pi)]) > 0:
fock_bc[k][orb][L] = self._models[k][orb][L].predict(feats[k][(el,elb,L,pi)], X0=feats[k][(el,elb,0,1)])
return fock_bc
def active_set_selection(feats, blocks, orbs, selector, normalize=True, slices=None):
""" Compute active set points for the blocks given. """
n_to_select = selector.n_to_select # makes sure the selector has the "right" interface
# determines active set
active_feats0 = {}
active_feats = {}
active_idx = {}
for tblock in blocks.keys():
active_idx[tblock] = {}
active_feats[tblock] = {}
active_feats0[tblock] = {}
for kblock in blocks[tblock]:
# gets the feature block corresponding to the invariant features (l=0, sigma=1)
fblock0 = block_to_feat_index(tblock, kblock, 0, orbs)[:-1]+(1,)
if slices is None: # if we must only consider a training slice...
islice = slice(None)
else:
islice = slices[tblock][kblock]
if fblock0 in active_idx[tblock]: # reuse indices when available
sel_idx = active_idx[tblock][fblock0]
else:
xblock0 = feats[tblock][fblock0][islice]
if len(xblock0) == 0: # skips zero blocks
print("zero block", tblock, kblock, fblock0)
continue
xblock0 = xblock0.reshape(len(xblock0),-1)
if normalize:
mean_sz = np.sqrt(np.mean(((xblock0-xblock0.mean(axis=0))**2).sum(axis=1)))
if mean_sz > 0:
xblock0 = xblock0/mean_sz
selector.n_to_select = min(xblock0.shape[0]-1, n_to_select)
try:
selector.fit(xblock0)
except np.linalg.LinAlgError:
print(f"Error during selection. Stopping at {len(selector.n_selected_)}/{selector.n_to_select} active points.")
except error:
print(f"Uncaught error for {fblock0}, selected {selector.n_selected_}")
raise error
print(f"Selected {selector.n_selected_} for {kblock} [{fblock0}]")
sel_idx = selector.selected_idx_[:selector.n_selected_]
selector.n_to_select = n_to_select
active_idx[tblock][fblock0] = sel_idx
active_feats0[tblock][fblock0] = feats[tblock][fblock0][islice][sel_idx]
for l in blocks[tblock][kblock]: # these will only generate slices so there's no harm in being redundant
fblock = block_to_feat_index(tblock, kblock, l, orbs)
active_feats[tblock][fblock] = feats[tblock][fblock][islice][sel_idx]
return active_idx, active_feats0, active_feats
| [
"numpy.trace",
"numpy.moveaxis",
"numpy.random.shuffle",
"warnings.filterwarnings",
"sklearn.linear_model.Ridge",
"sklearn.kernel_ridge.KernelRidge",
"numpy.linalg.qr",
"numpy.zeros",
"numpy.einsum",
"scipy.sparse.linalg.eigsh",
"time.time",
"numpy.vstack",
"numpy.where",
"numpy.linalg.nor... | [((382, 452), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'scipy.linalg.LinAlgWarning'}), "('ignore', category=scipy.linalg.LinAlgWarning)\n", (405, 452), False, 'import warnings\n'), ((453, 532), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'sklearn.exceptions.FitFailedWarning'}), "('ignore', category=sklearn.exceptions.FitFailedWarning)\n", (476, 532), False, 'import warnings\n'), ((533, 588), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (556, 588), False, 'import warnings\n'), ((1464, 1486), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (1481, 1486), True, 'import numpy as np\n'), ((9170, 9200), 'numpy.zeros', 'np.zeros', (['(self._nM, self._nM)'], {}), '((self._nM, self._nM))\n', (9178, 9200), True, 'import numpy as np\n'), ((13496, 13502), 'time.time', 'time', ([], {}), '()\n', (13500, 13502), False, 'from time import time\n'), ((14408, 14414), 'time.time', 'time', ([], {}), '()\n', (14412, 14414), False, 'from time import time\n'), ((15117, 15123), 'time.time', 'time', ([], {}), '()\n', (15121, 15123), False, 'from time import time\n'), ((15999, 16005), 'time.time', 'time', ([], {}), '()\n', (16003, 16005), False, 'from time import time\n'), ((16614, 16620), 'time.time', 'time', ([], {}), '()\n', (16618, 16620), False, 'from time import time\n'), ((1594, 1652), 'numpy.concatenate', 'np.concatenate', (['[idx[:n * nbatch], idx[(n + 1) * nbatch:]]'], {}), '([idx[:n * nbatch], idx[(n + 1) * nbatch:]])\n', (1608, 1652), True, 'import numpy as np\n'), ((2769, 2808), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'fit_intercept': 'self.fit_intercept'}), '(fit_intercept=self.fit_intercept)\n', (2774, 2808), False, 'from sklearn.linear_model import Ridge, RidgeCV\n'), ((4843, 4855), 'numpy.trace', 'np.trace', (['K0'], {}), '(K0)\n', (4851, 4855), True, 'import numpy as np\n'), ((5152, 5240), 'numpy.einsum', 'np.einsum', (['"""imjn,ij->imjn"""', '(KLMM / self.KLscale)', '(zw * (K0 / self.K0scale) ** (z - 1))'], {}), "('imjn,ij->imjn', KLMM / self.KLscale, zw * (K0 / self.K0scale) **\n (z - 1))\n", (5161, 5240), True, 'import numpy as np\n'), ((5583, 5616), 'sklearn.kernel_ridge.KernelRidge', 'KernelRidge', ([], {'kernel': '"""precomputed"""'}), "(kernel='precomputed')\n", (5594, 5616), False, 'from sklearn.kernel_ridge import KernelRidge\n'), ((6487, 6575), 'numpy.einsum', 'np.einsum', (['"""imjn,ij->imjn"""', '(KLMM / self.KLscale)', '(zw * (K0 / self.K0scale) ** (z - 1))'], {}), "('imjn,ij->imjn', KLMM / self.KLscale, zw * (K0 / self.K0scale) **\n (z - 1))\n", (6496, 6575), True, 'import numpy as np\n'), ((7479, 7485), 'time.time', 'time', ([], {}), '()\n', (7483, 7485), False, 'from time import time\n'), ((13978, 13990), 'numpy.trace', 'np.trace', (['K0'], {}), '(K0)\n', (13986, 13990), True, 'import numpy as np\n'), ((14273, 14361), 'numpy.einsum', 'np.einsum', (['"""imjn,ij->imjn"""', '(KLMM / self.KLscale)', '(zw * (K0 / self.K0scale) ** (z - 1))'], {}), "('imjn,ij->imjn', KLMM / self.KLscale, zw * (K0 / self.K0scale) **\n (z - 1))\n", (14282, 14361), True, 'import numpy as np\n'), ((15600, 15690), 'numpy.einsum', 'np.einsum', (['"""imjn,ij->imjn"""', '(KLNM / self.KLscale)', '(zw * (K0NM / self.K0scale) ** (z - 1))'], {}), "('imjn,ij->imjn', KLNM / self.KLscale, zw * (K0NM / self.K0scale) **\n (z - 1))\n", (15609, 15690), True, 'import numpy as np\n'), ((17235, 17325), 'numpy.einsum', 'np.einsum', (['"""imjn,ij->imjn"""', '(KLNM / self.KLscale)', '(zw * (K0NM / self.K0scale) ** (z - 1))'], {}), "('imjn,ij->imjn', KLNM / self.KLscale, zw * (K0NM / self.K0scale) **\n (z - 1))\n", (17244, 17325), True, 'import numpy as np\n'), ((2589, 2610), 'numpy.moveaxis', 'np.moveaxis', (['Xm', '(2)', '(1)'], {}), '(Xm, 2, 1)\n', (2600, 2610), True, 'import numpy as np\n'), ((4450, 4472), 'numpy.moveaxis', 'np.moveaxis', (['Xm', '(-1)', '(1)'], {}), '(Xm, -1, 1)\n', (4461, 4472), True, 'import numpy as np\n'), ((6008, 6030), 'numpy.moveaxis', 'np.moveaxis', (['Xm', '(-1)', '(1)'], {}), '(Xm, -1, 1)\n', (6019, 6030), True, 'import numpy as np\n'), ((7553, 7575), 'scipy.linalg.eigh', 'scipy.linalg.eigh', (['KMM'], {}), '(KMM)\n', (7570, 7575), False, 'import scipy\n'), ((8911, 8939), 'numpy.sqrt', 'np.sqrt', (['self._vk[:self._nM]'], {}), '(self._vk[:self._nM])\n', (8918, 8939), True, 'import numpy as np\n'), ((9792, 9824), 'numpy.zeros', 'np.zeros', (['(self._nM, Y.shape[1])'], {}), '((self._nM, Y.shape[1]))\n', (9800, 9824), True, 'import numpy as np\n'), ((11174, 11189), 'numpy.linalg.qr', 'np.linalg.qr', (['A'], {}), '(A)\n', (11186, 11189), True, 'import numpy as np\n'), ((13358, 13384), 'numpy.moveaxis', 'np.moveaxis', (['active', '(-1)', '(1)'], {}), '(active, -1, 1)\n', (13369, 13384), True, 'import numpy as np\n'), ((14378, 14384), 'time.time', 'time', ([], {}), '()\n', (14382, 14384), False, 'from time import time\n'), ((14716, 14722), 'time.time', 'time', ([], {}), '()\n', (14720, 14722), False, 'from time import time\n'), ((15185, 15207), 'numpy.moveaxis', 'np.moveaxis', (['Xm', '(-1)', '(1)'], {}), '(Xm, -1, 1)\n', (15196, 15207), True, 'import numpy as np\n'), ((15968, 15974), 'time.time', 'time', ([], {}), '()\n', (15972, 15974), False, 'from time import time\n'), ((16477, 16483), 'time.time', 'time', ([], {}), '()\n', (16481, 16483), False, 'from time import time\n'), ((16711, 16717), 'time.time', 'time', ([], {}), '()\n', (16715, 16717), False, 'from time import time\n'), ((16820, 16842), 'numpy.moveaxis', 'np.moveaxis', (['Xm', '(-1)', '(1)'], {}), '(Xm, -1, 1)\n', (16831, 16842), True, 'import numpy as np\n'), ((3171, 3192), 'numpy.moveaxis', 'np.moveaxis', (['Xm', '(2)', '(1)'], {}), '(Xm, 2, 1)\n', (3182, 3192), True, 'import numpy as np\n'), ((7851, 7913), 'scipy.sparse.linalg.eigsh', 'scipy.sparse.linalg.eigsh', (['KMM'], {'k': '(1)', 'return_eigenvectors': '(False)'}), '(KMM, k=1, return_eigenvectors=False)\n', (7876, 7913), False, 'import scipy\n'), ((8797, 8850), 'numpy.where', 'np.where', (['(self._vk > self.jitter * self._jitter_scale)'], {}), '(self._vk > self.jitter * self._jitter_scale)\n', (8805, 8850), True, 'import numpy as np\n'), ((10838, 10854), 'numpy.eye', 'np.eye', (['self._nM'], {}), '(self._nM)\n', (10844, 10854), True, 'import numpy as np\n'), ((11571, 11635), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': 'self.regularizer', 'fit_intercept': '(False)', 'solver': '"""svd"""'}), "(alpha=self.regularizer, fit_intercept=False, solver='svd')\n", (11576, 11635), False, 'from sklearn.linear_model import Ridge, RidgeCV\n'), ((11783, 11810), 'numpy.vstack', 'np.vstack', (['[KNM, self._VMM]'], {}), '([KNM, self._VMM])\n', (11792, 11810), True, 'import numpy as np\n'), ((11830, 11845), 'numpy.linalg.qr', 'np.linalg.qr', (['A'], {}), '(A)\n', (11842, 11845), True, 'import numpy as np\n'), ((20401, 20420), 'numpy.linalg.norm', 'np.linalg.norm', (['tgt'], {}), '(tgt)\n', (20415, 20420), True, 'import numpy as np\n'), ((11095, 11120), 'numpy.sqrt', 'np.sqrt', (['self.regularizer'], {}), '(self.regularizer)\n', (11102, 11120), True, 'import numpy as np\n'), ((11123, 11139), 'numpy.eye', 'np.eye', (['self._nM'], {}), '(self._nM)\n', (11129, 11139), True, 'import numpy as np\n'), ((11507, 11529), 'numpy.zeros', 'np.zeros', ([], {'shape': '(0, 1)'}), '(shape=(0, 1))\n', (11515, 11529), True, 'import numpy as np\n'), ((9084, 9100), 'numpy.eye', 'np.eye', (['self._nM'], {}), '(self._nM)\n', (9090, 9100), True, 'import numpy as np\n'), ((10146, 10162), 'numpy.eye', 'np.eye', (['self._nM'], {}), '(self._nM)\n', (10152, 10162), True, 'import numpy as np\n'), ((10463, 10488), 'numpy.eye', 'np.eye', (['self.KMM.shape[0]'], {}), '(self.KMM.shape[0])\n', (10469, 10488), True, 'import numpy as np\n'), ((11302, 11334), 'numpy.zeros', 'np.zeros', (['(self._nM, Y.shape[1])'], {}), '((self._nM, Y.shape[1]))\n', (11310, 11334), True, 'import numpy as np\n'), ((11944, 11980), 'numpy.zeros', 'np.zeros', (['(KNM.shape[1], Y.shape[1])'], {}), '((KNM.shape[1], Y.shape[1]))\n', (11952, 11980), True, 'import numpy as np\n'), ((12175, 12191), 'numpy.eye', 'np.eye', (['self._nM'], {}), '(self._nM)\n', (12181, 12191), True, 'import numpy as np\n')] |
from riglib import bmi, plexon, source
from riglib.bmi import extractor
import numpy as np
from riglib.bmi import clda
from riglib.bmi import train
from riglib.bmi import state_space_models
import datetime
import matplotlib.pyplot as plt
class State(object):
'''For compatibility with other BMI decoding implementations, literally just holds the state'''
def __init__(self, mean, *args, **kwargs):
self.mean = mean
class RatFilter(object):
'''Moving Avergae Filter used in 1D or 2D LFP control:
x_{t} = a0*x_{t} + a1*x_{t-1} + a2*x_{t-2} + ...
x_{t} = b_1:t*x_{}
Parameters
----------
A: np.array of shape (N, )
Weights for previous states
X: np. array of previous states (N, )
'''
def __init__(self, task_params):
self.e1_inds = task_params['e1_inds']
self.e2_inds = task_params['e2_inds']
self.FR_to_freq_fn = task_params['FR_to_freq_fn']
self.t1 = task_params['t1']
self.t2 = task_params['t2']
self.mid = task_params['mid']
self.dec_params = task_params
#Cursor data (X)
self.X = 0.
#Freq data(F)
self.F = 0.
self.baseline = False
def get_mean(self):
return np.array(self.state.mean).ravel()
def init_from_task(self, n_units, **kwargs):
#Define n_steps
if 'nsteps' in kwargs:
self.n_steps = kwargs['nsteps']
self.A = np.ones(( self.n_steps, ))/float(self.n_steps)
#Neural data (Y)
self.Y = np.zeros(( self.n_steps, n_units))
self.n_units = n_units
else:
raise Exception
def _init_state(self, init_state=None,**kwargs):
if init_state is None:
init_state = 0
self.state = State(init_state)
def __call__(self, obs, **kwargs):
self.state = self._mov_avg(obs, **kwargs)
def _mov_avg(self, obs,**kwargs):
''' Function to compute moving average with old mean and new observation'''
self.Y[:-1, :] = self.Y[1:, :]
self.Y[-1, :] = np.squeeze(obs)
d_fr = np.sum(self.Y[:, self.e1_inds], axis=1) - np.sum(self.Y[:, self.e2_inds], axis=1)
mean_FR = np.dot(d_fr, self.A)
self.X = mean_FR
self.F = self.FR_to_freq_fn(self.X)
return State(self.X)
def FR_to_freq(self, mean_FR):
return self.FR_to_freq_fn(mean_FR)
def _pickle_init(self):
pass
class IsmoreSleepFilter(RatFilter):
def __init__(self, task_params):
self.e1_inds = task_params['e1_inds']
self.e2_inds = task_params['e2_inds']
self.FR_to_alpha_fn = task_params['FR_to_alpha_fn']
self.dec_params = task_params
self.mid = task_params['mid']
self.e1_max = task_params['e1_perc']
self.e2_max = task_params['e2_perc']
self.freq_lim = task_params['freq_lim']
#Cursor data (X)
self.FR = 0.
#Freq data(F)
self.alpha = 0.
self.model_attrs = []
self.baseline = False
def init_from_task(self, **kwargs):
#Define n_steps
self.n_steps = kwargs.pop('nsteps', 1)
self.A = np.ones(( self.n_steps, ))/float(self.n_steps)
#Neural data (Y)
self.Y = np.zeros(( self.n_steps, len(self.e1_inds)+len(self.e2_inds)))
self.n_units = len(self.e1_inds)+len(self.e2_inds)
def _mov_avg(self, obs,**kwargs):
''' Function to compute moving average with old mean and new observation'''
self.Y[:-1, :] = self.Y[1:, :]
self.Y[-1, :] = np.squeeze(obs)
if self.e1_max is not None:
e1_tmp = np.min([self.e1_max, np.sum(self.Y[:, self.e1_inds], axis=1)])
else:
e1_tmp = np.sum(self.Y[:, self.e1_inds], axis=1)
if self.e2_max is not None:
e2_tmp = np.min([self.e2_max, np.sum(self.Y[:, self.e2_inds], axis=1)])
else:
e2_tmp = np.sum(self.Y[:, self.e2_inds], axis=1)
d_fr = e1_tmp - e2_tmp
mean_FR = np.dot(d_fr, self.A)
self.FR = mean_FR
self.alpha = self.FR_to_alpha_fn(self.FR)
# Max alpha is -1 or 1 :
if self.alpha > self.freq_lim[1]:
self.alpha = self.freq_lim[1]
elif self.alpha < self.freq_lim[0]:
self.alpha = self.freq_lim[0]
self.baseline = self.FR < self.mid
return State(self.alpha)
from riglib.bmi.bmi import Decoder
class RatDecoder(Decoder):
def __init__(self, *args, **kwargs):
#Args: filter, units, ssm, extractor_cls, extractor_kwargs
super(RatDecoder, self).__init__(args[0], args[1], args[2])
self.extractor_cls = args[3]
self.extractor_kwargs = args[4]
self.n_features = len(self.filt.e1_inds) + len(self.filt.e2_inds)
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
setattr(self,key,value)
def predict(self, neural_obs, **kwargs):
self.filt(neural_obs, **kwargs)
def init_from_task(self,**kwargs):
pass
class IsmoreSleepDecoder(RatDecoder):
def __init__(self, *args, **kwargs):
self.binlen = 0.1
super(IsmoreSleepDecoder, self).__init__(*args, **kwargs)
########## Functions to make decoder ###########
def create_decoder(ssm, task_params):
filter_ = RatFilter(task_params)
decoder = RatDecoder(filter_, task_params['units'], ssm, task_params['extractor_cls'], dict())
return decoder
########### Called from trainbmi.py to make decoder from Baseline #####
import re
cellname = re.compile(r'(\d{1,3})\s*(\w{1})')
def calc_decoder_from_baseline_file(neural_features, neural_features_unbinned, units, nsteps, prob_t1,
prob_t2, timeout, timeout_pause, freq_lim, e1_inds, e2_inds, sim_fcn='rat', **kwargs):
#Enter e1, e2 as string:
if np.logical_or(e1_inds is None, e2_inds is None):
e1_string = input('Enter e1 cells: ')
e2_string = input('Enter e2 cells: ')
e1 = np.array([ (int(c), ord(u) - 96) for c, u in cellname.findall(e1_string)])
e2 = np.array([ (int(c), ord(u) - 96) for c, u in cellname.findall(e2_string)])
e1_inds = np.array([i for i, u in enumerate(units) if np.logical_and(u[0] in e1[:,0], u[1] in e1[:,1])])
e2_inds = np.array([i for i, u in enumerate(units) if np.logical_and(u[0] in e2[:,0], u[1] in e2[:,1])])
T = neural_features.shape[0]
if 'saturate_perc' in kwargs:
baseline_data = np.zeros((T - nsteps, 2))
else:
baseline_data = np.zeros((T - nsteps))
for ib in range(T-nsteps):
if 'saturate_perc' in kwargs:
baseline_data[ib, 0] = np.mean(np.sum(neural_features[ib:ib+nsteps,
e1_inds], axis=1))
baseline_data[ib, 1] = np.mean(np.sum(neural_features[ib:ib+nsteps,
e2_inds], axis=1))
else:
baseline_data[ib] = np.mean(np.sum(neural_features[ib:ib+nsteps,
e1_inds], axis=1))-np.mean(np.sum(neural_features[ib:ib+nsteps,
e2_inds], axis=1))
if 'saturate_perc' in kwargs:
sat_perc = kwargs.pop('saturate_perc')
# ignore the first second of data
e1_perc = np.percentile(baseline_data[20:, 0], sat_perc)
e2_perc = np.percentile(baseline_data[20:, 1], sat_perc)
baseline_data[:, 0][baseline_data[:, 0] > e1_perc] = e1_perc
baseline_data[:, 1][baseline_data[:, 1] > e2_perc] = e2_perc
baseline_data = baseline_data[:, 0] - baseline_data[:, 1]
else:
e1_perc = None
e2_perc = None
x, pdf, pdf_individual = generate_gmm(baseline_data)
if sim_fcn == 'rat':
t2, mid, t1, num_t1, num_t2, num_miss, FR_to_freq_fn = sim_data(x, pdf, pdf_individual, prob_t1, prob_t2,
baseline_data, timeout, timeout_pause, freq_lim, sim_bmi_fcn='rat')
return e1_inds, e2_inds, FR_to_freq_fn, units, t1, t2, mid
elif sim_fcn == 'ismore':
# Get fcn:
t1 = prob_under_pdf(x, pdf, prob_t1)
t2 = prob_under_pdf(x, pdf, prob_t2)
idx_mid = np.argmax(pdf)
mid = x[idx_mid]
FR_to_alpha_fn = map_to_freq(t2, mid, t1, freq_lim[0], freq_lim[1])
# Plot FR to alpha fcn
x_axis = np.linspace(t2, t1, 100)
y_axis = []
for xi in x_axis:
yi = FR_to_alpha_fn(xi)
yi = np.max([freq_lim[0], yi])
yi = np.min([freq_lim[1], yi])
y_axis.append(yi)
x_axis2 = np.arange(-10, 10)
y_axis2 = []
for xi in x_axis2:
y_axis2.append(FR_to_alpha_fn(xi))
import matplotlib.pyplot as plt
f, ax = plt.subplots()
ax.plot(x_axis, y_axis)
ax.plot(x_axis2, y_axis2, 'r.')
ax.plot([-5, 5], [0, 0], 'r--')
kwargs2 = dict(replay_neural_features = neural_features, e1_inds=e1_inds,
e2_inds = e2_inds, FR_to_alpha_fn=FR_to_alpha_fn, mid = mid, e1_perc=e1_perc,
e2_perc=e2_perc, freq_lim=freq_lim)
filt = IsmoreSleepFilter(kwargs2)
decoder = IsmoreSleepDecoder(filt, units, state_space_models.StateSpaceEndptPos1D(),
extractor.BinnedSpikeCountsExtractor, {})
if kwargs['skip_sim']:
nrewards = []
import pdb
pdb.set_trace()
else:
if type(kwargs['targets_matrix']) is str:
import pickle
kwargs['targets_matrix'] = pickle.load(open(kwargs['targets_matrix']))
pname = ismore_sim_bmi(neural_features_unbinned, decoder, targets_matrix=kwargs['targets_matrix'],
session_length=kwargs['session_length'])
# Analyze data:
import tables
import matplotlib.pyplot as plt
hdf = tables.openFile(pname[:-4]+'.hdf')
# Plot x/y trajectory:
f, ax = plt.subplots()
ax.plot(hdf.root.task[2:]['plant_pos'][:, 0], hdf.root.task[2:]['plant_pos'][:, 1])
ix = np.nonzero(hdf.root.task[:]['target_index'] == 1)[0]
ax.plot(hdf.root.task[ix[0]]['target_pos'][0], hdf.root.task[ix[0]]['target_pos'][1], 'r.',
markersize=20)
ix = np.nonzero(hdf.root.task[:]['target_index'] == 0)[0] + 5
ax.plot(hdf.root.task[ix[0]]['target_pos'][0], hdf.root.task[ix[0]]['target_pos'][1], 'g.',
markersize=20)
nrewards = np.nonzero(hdf.root.task_msgs[:]['msg']=='reward')[0]
return decoder, len(nrewards)
###### From Rat BMI #######
###### From Rat BMI #######
###### From Rat BMI #######
###### From Rat BMI #######
from sklearn import metrics
from sklearn.mixture import GMM
import numpy as np
import matplotlib.pyplot as plt
def generate_gmm(data, ax=None):
##reshape the data
X = data.reshape(data.shape[0], 1)
##fit models with 1-10 components
N = np.arange(1,11)
models = [None for i in range(len(N))]
for i in range(len(N)):
models[i] = GMM(N[i]).fit(X)
##compute AIC
AIC = [m.aic(X) for m in models]
##figure out the best-fit mixture
M_best = models[np.argmin(AIC)]
x = np.linspace(data.min()-1, data.max()+1, data.size)
##compute the pdf
logprob, responsibilities = M_best.score_samples(x.reshape(x.size, 1))
pdf = np.exp(logprob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
#plot the stuff
if ax is None:
fig, ax = plt.subplots()
ax.hist(X, 50, normed = True, histtype = 'stepfilled', alpha = 0.4)
ax.plot(x, pdf, '-k')
ax.plot(x, pdf_individual, '--k')
ax.text(0.04, 0.96, "Best-fit Mixture",
ha='left', va='top', transform=ax.transAxes)
ax.set_xlabel('$x$')
ax.set_ylabel('$p(x)$')
return x, pdf, pdf_individual
##this function takes in an array of x-values and an array
##of y-values that correspond to a probability density function
##and determines the x-value at which the area under the PDF is approximately
##equal to some value passed in the arguments.
def prob_under_pdf(x_pdf, y_pdf, prob):
auc = 0
i = 2
while auc < prob:
x_range = x_pdf[0:i]
y_range = y_pdf[0:i]
auc = metrics.auc(x_range, y_range)
i+=1
return x_pdf[i]
##function to map ensemble values to frequency values
def map_to_freq(t2, mid, t1, min_freq, max_freq):
fr_pts = np.array([t2, mid, t1])
freq_pts = np.array([min_freq, (float(max_freq) + float(min_freq))/2., max_freq])
z = np.polyfit(fr_pts, freq_pts, 2)
p = np.poly1d(z)
return p
def sim_data(x, pdf, pdf_individual, prob_t1, prob_t2, data,
timeout, timeout_pause, freq_lim, sim_bmi_fcn='rat'):
t1 = prob_under_pdf(x, pdf, prob_t1)
t2 = prob_under_pdf(x, pdf, prob_t2)
idx_mid = np.argmax(pdf)
mid = x[idx_mid]
fig, ax1 = plt.subplots()
ax1.hist(data+np.random.normal(0, 0.1*data.std(), data.size), 50,
normed = True, histtype = 'stepfilled', alpha = 0.4)
ax1.plot(x, pdf, '-k')
ax1.plot(x, pdf_individual, '--k')
ax1.text(0.04, 0.96, "Best-fit Mixture",
ha='left', va='top', transform=ax1.transAxes)
ax1.set_xlabel('Cursor Value (E1-E2)')
ax1.set_ylabel('$p(x)$')
##find the points where t1 and t2 lie on the gaussian
idx_t2 = np.where(x>t2)[0][0]
x_t2 = t2
y_t2 = pdf[idx_t2]
idx_t1 = np.where(x>t1)[0][0]
x_t1 = t1
y_t1 = pdf[idx_t1]
y_mid = pdf[idx_mid]
ax1.plot(x_t1, y_t1, 'o', color = 'g')
ax1.plot(x_t2, y_t2, 'o', color = 'g')
ax1.plot(mid, y_mid, 'o', color = 'g')
ax1.set_title("Firing rate histogram and gaussian fit")
ax1.annotate('T1: ('+str(round(x_t1, 3))+')', xy=(x_t1, y_t1), xytext=(40,20),
textcoords='offset points', ha='center', va='bottom',
bbox=dict(boxstyle='round,pad=0.2', fc='yellow', alpha=0.3),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0.5',
color='red'))
ax1.annotate('T2: ('+str(round(x_t2, 3))+')', xy=(x_t2, y_t2), xytext=(-40,20),
textcoords='offset points', ha='center', va='bottom',
bbox=dict(boxstyle='round,pad=0.2', fc='yellow', alpha=0.3),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0.5',
color='red'))
ax1.annotate('Base: ('+str(round(mid, 3))+')', xy=(mid, y_mid), xytext=(-100,-20),
textcoords='offset points', ha='center', va='bottom',
bbox=dict(boxstyle='round,pad=0.2', fc='yellow', alpha=0.3),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0.5',
color='red'))
##get the control function
p = map_to_freq(t2, mid, t1, freq_lim[0], freq_lim[1])
##run a simulation
if sim_bmi_fcn=='rat':
num_t1, num_t2, num_miss = sim_bmi(data, t1, t2, mid, timeout, timeout_pause, p)
elif sim_bmi_fcn == 'ismore':
num_t1, num_t2, num_miss = ismore_sim_bmi(data, t1, t2, mid, timeout, timeout_pause, p)
print("Simulation results:\nNumber of T1: " + str(num_t1) + "\nNumber of T2: " + str(num_t2) + "\nNumber of Misses: " + str(num_miss))
print("Calculated T2 value is " + str(round(t2, 5)))
print("Calculated mid value is " + str(round(mid, 5)))
print("Calculated T1 value is " + str(round(t1, 5)))
##plot the control functio
plot_cursor_func(t2, mid, t1, freq_lim[0], freq_lim[1])
#plt.show()
return t2, mid, t1, num_t1, num_t2, num_miss, p
def sim_bmi(baseline_data, t1, t2, midpoint, timeout, timeout_pause, p):
data = baseline_data
samp_int = 100. #ms
##get the timeout duration in samples
timeout_samps = int((timeout*1000.0)/samp_int)
timeout_pause_samps = int((timeout_pause*1000.0)/samp_int)
##"global" variables
num_t1 = 0
num_t2 = 0
num_miss = 0
back_to_baseline = 1
##run through the data and simulate BMI
i = 0
clock = 0
while i < (data.shape[0]-1):
cursor = data[i]
##check for a target hit
if cursor >= t1:
num_t1+=1
i += int(4000/samp_int)
back_to_baseline = 0
##wait for a return to baseline
while cursor >= midpoint and i < (data.shape[0]-1):
#advance the sample
i+=1
##get cursor value
cursor = data[i]
##reset the clock
clock = 0
elif cursor <= t2:
num_t2+=1
i += int(4000/samp_int)
back_to_baseline = 0
##wait for a return to baseline
while cursor >= midpoint and i < (data.shape[0]-1):
#advance the sample
i+=1
##get cursor value
cursor = data[i]
##reset the clock
clock = 0
elif clock >= timeout_samps:
##advance the samples for the timeout duration
i+= timeout_pause_samps
num_miss += 1
##reset the clock
clock = 0
else:
##if nothing else, advance the clock and the sample
i+= 1
clock+=1
return num_t1, num_t2, num_miss
def ismore_sim_bmi(baseline_data, decoder, targets_matrix=None, session_length=0.):
import ismore.invasive.bmi_ismoretasks as bmi_ismoretasks
from riglib import experiment
from features.hdf_features import SaveHDF
from ismore.brainamp_features import SimBrainAmpData
import datetime
import numpy as np
import matplotlib.pyplot as plt
import multiprocessing as mp
from features.blackrock_features import BlackrockBMI
from ismore.exo_3D_visualization import Exo3DVisualizationInvasive
targets = bmi_ismoretasks.SimBMIControlReplayFile.sleep_gen(length=100)
plant_type = 'IsMore'
kwargs=dict(session_length=session_length, replay_neural_features=baseline_data, decoder=decoder)
if targets_matrix is not None:
kwargs['targets_matrix']=targets_matrix
Task = experiment.make(bmi_ismoretasks.SimBMIControlReplayFile, [SaveHDF])#, Exo3DVisualizationInvasive])
task = Task(targets, plant_type=plant_type, **kwargs)
task.run_sync()
pnm = save_dec_enc(task)
return pnm
def plot_cursor_func(t2, mid, t1, min_freq, max_freq):
f, ax2 = plt.subplots()
x = np.linspace(t2-1, t1+1, 1000)
func = map_to_freq(t2, mid, t1, min_freq, max_freq)
#fig, ax = plt.subplots()
ax2.plot(t2, min_freq, 'o', color = 'r')
ax2.plot(mid, np.floor((max_freq-min_freq)/2), 'o', color = 'r')
ax2.plot(t1, max_freq, 'o', color = 'r')
ax2.plot(x, func(x), '-', color = 'g')
ax2.annotate('T1: ('+str(round(t1, 3))+')', xy=(t1, max_freq), xytext=(-20, 20),
textcoords='offset points', ha='center', va='bottom',
bbox=dict(boxstyle='round,pad=0.2', fc='yellow', alpha=0.3),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0.5',
color='red'))
ax2.annotate('T2: ('+str(round(t2, 3))+')', xy=(t2, min_freq), xytext=(-20,20),
textcoords='offset points', ha='center', va='bottom',
bbox=dict(boxstyle='round,pad=0.2', fc='yellow', alpha=0.3),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0.5',
color='red'))
ax2.annotate('Base: ('+str(round(mid, 3))+')', xy=(mid, np.floor((max_freq-min_freq)/2)), xytext=(-20,20),
textcoords='offset points', ha='center', va='bottom',
bbox=dict(boxstyle='round,pad=0.2', fc='yellow', alpha=0.3),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0.5',
color='red'))
ax2.set_ylabel("Feedback frequency")
ax2.set_xlabel("Cursor value (E1-E2)")
ax2.set_title("Cursor-frequency map", fontsize = 18)
def save_dec_enc(task, pref='sleep_sim_'):
'''
Summary: method to save encoder / decoder and hdf file information from task in sim_data folder
Input param: task: task, output from arm_assist_main, or generally task object
Input param: pref: prefix to saved file names (defaults to 'enc' for encoder)
Output param: pkl file name used to save encoder/decoder
'''
#enc = task.encoder
task.decoder.save()
#enc.corresp_dec = task.decoder
#Save task info
import pickle
ct = datetime.datetime.now()
#pnm = '/Users/preeyakhanna/ismore/ismore_tests/sim_data/'+pref + ct.strftime("%Y%m%d_%H_%M_%S") + '.pkl'
pnm = '/home/tecnalia/code/ismore/ismore_tests/sim_data/'+pref + ct.strftime("%m%d%y_%H%M") + '.pkl'
pnm2 = '/Users/preeyakhanna/code/ismore/ismore_tests/sim_data/'+pref + ct.strftime("%m%d%y_%H%M") + '.pkl'
try:
pickle.dump(dict(), open(pnm,'wb'))
except:
pickle.dump(dict(), open(pnm2, 'wb'))
pnm = pnm2
#Save HDF file
new_hdf = pnm[:-4]+'.hdf'
import shutil
f = open(task.h5file.name)
f.close()
#Wait
import time
time.sleep(1.)
#Wait after HDF cleaned up
task.cleanup_hdf()
import time
time.sleep(1.)
#Copy temp file to actual desired location
shutil.copy(task.h5file.name, new_hdf)
f = open(new_hdf)
f.close()
#Return filename
return pnm | [
"numpy.sum",
"numpy.polyfit",
"numpy.argmax",
"numpy.floor",
"numpy.ones",
"numpy.argmin",
"numpy.arange",
"numpy.exp",
"shutil.copy",
"sklearn.mixture.GMM",
"riglib.bmi.state_space_models.StateSpaceEndptPos1D",
"numpy.max",
"numpy.linspace",
"datetime.datetime.now",
"matplotlib.pyplot.s... | [((5607, 5643), 're.compile', 're.compile', (['"""(\\\\d{1,3})\\\\s*(\\\\w{1})"""'], {}), "('(\\\\d{1,3})\\\\s*(\\\\w{1})')\n", (5617, 5643), False, 'import re\n'), ((5875, 5922), 'numpy.logical_or', 'np.logical_or', (['(e1_inds is None)', '(e2_inds is None)'], {}), '(e1_inds is None, e2_inds is None)\n', (5888, 5922), True, 'import numpy as np\n'), ((10951, 10967), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (10960, 10967), True, 'import numpy as np\n'), ((11370, 11385), 'numpy.exp', 'np.exp', (['logprob'], {}), '(logprob)\n', (11376, 11385), True, 'import numpy as np\n'), ((12423, 12446), 'numpy.array', 'np.array', (['[t2, mid, t1]'], {}), '([t2, mid, t1])\n', (12431, 12446), True, 'import numpy as np\n'), ((12541, 12572), 'numpy.polyfit', 'np.polyfit', (['fr_pts', 'freq_pts', '(2)'], {}), '(fr_pts, freq_pts, 2)\n', (12551, 12572), True, 'import numpy as np\n'), ((12581, 12593), 'numpy.poly1d', 'np.poly1d', (['z'], {}), '(z)\n', (12590, 12593), True, 'import numpy as np\n'), ((12824, 12838), 'numpy.argmax', 'np.argmax', (['pdf'], {}), '(pdf)\n', (12833, 12838), True, 'import numpy as np\n'), ((12875, 12889), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12887, 12889), True, 'import matplotlib.pyplot as plt\n'), ((17785, 17846), 'ismore.invasive.bmi_ismoretasks.SimBMIControlReplayFile.sleep_gen', 'bmi_ismoretasks.SimBMIControlReplayFile.sleep_gen', ([], {'length': '(100)'}), '(length=100)\n', (17834, 17846), True, 'import ismore.invasive.bmi_ismoretasks as bmi_ismoretasks\n'), ((18079, 18146), 'riglib.experiment.make', 'experiment.make', (['bmi_ismoretasks.SimBMIControlReplayFile', '[SaveHDF]'], {}), '(bmi_ismoretasks.SimBMIControlReplayFile, [SaveHDF])\n', (18094, 18146), False, 'from riglib import experiment\n'), ((18369, 18383), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (18381, 18383), True, 'import matplotlib.pyplot as plt\n'), ((18392, 18425), 'numpy.linspace', 'np.linspace', (['(t2 - 1)', '(t1 + 1)', '(1000)'], {}), '(t2 - 1, t1 + 1, 1000)\n', (18403, 18425), True, 'import numpy as np\n'), ((20429, 20452), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (20450, 20452), False, 'import datetime\n'), ((21059, 21074), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (21069, 21074), False, 'import time\n'), ((21149, 21164), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (21159, 21164), False, 'import time\n'), ((21216, 21254), 'shutil.copy', 'shutil.copy', (['task.h5file.name', 'new_hdf'], {}), '(task.h5file.name, new_hdf)\n', (21227, 21254), False, 'import shutil\n'), ((2086, 2101), 'numpy.squeeze', 'np.squeeze', (['obs'], {}), '(obs)\n', (2096, 2101), True, 'import numpy as np\n'), ((2218, 2238), 'numpy.dot', 'np.dot', (['d_fr', 'self.A'], {}), '(d_fr, self.A)\n', (2224, 2238), True, 'import numpy as np\n'), ((3581, 3596), 'numpy.squeeze', 'np.squeeze', (['obs'], {}), '(obs)\n', (3591, 3596), True, 'import numpy as np\n'), ((4039, 4059), 'numpy.dot', 'np.dot', (['d_fr', 'self.A'], {}), '(d_fr, self.A)\n', (4045, 4059), True, 'import numpy as np\n'), ((6520, 6545), 'numpy.zeros', 'np.zeros', (['(T - nsteps, 2)'], {}), '((T - nsteps, 2))\n', (6528, 6545), True, 'import numpy as np\n'), ((6580, 6600), 'numpy.zeros', 'np.zeros', (['(T - nsteps)'], {}), '(T - nsteps)\n', (6588, 6600), True, 'import numpy as np\n'), ((7256, 7302), 'numpy.percentile', 'np.percentile', (['baseline_data[20:, 0]', 'sat_perc'], {}), '(baseline_data[20:, 0], sat_perc)\n', (7269, 7302), True, 'import numpy as np\n'), ((7321, 7367), 'numpy.percentile', 'np.percentile', (['baseline_data[20:, 1]', 'sat_perc'], {}), '(baseline_data[20:, 1], sat_perc)\n', (7334, 7367), True, 'import numpy as np\n'), ((11188, 11202), 'numpy.argmin', 'np.argmin', (['AIC'], {}), '(AIC)\n', (11197, 11202), True, 'import numpy as np\n'), ((11502, 11516), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (11514, 11516), True, 'import matplotlib.pyplot as plt\n'), ((12242, 12271), 'sklearn.metrics.auc', 'metrics.auc', (['x_range', 'y_range'], {}), '(x_range, y_range)\n', (12253, 12271), False, 'from sklearn import metrics\n'), ((18571, 18606), 'numpy.floor', 'np.floor', (['((max_freq - min_freq) / 2)'], {}), '((max_freq - min_freq) / 2)\n', (18579, 18606), True, 'import numpy as np\n'), ((1543, 1576), 'numpy.zeros', 'np.zeros', (['(self.n_steps, n_units)'], {}), '((self.n_steps, n_units))\n', (1551, 1576), True, 'import numpy as np\n'), ((2118, 2157), 'numpy.sum', 'np.sum', (['self.Y[:, self.e1_inds]'], {'axis': '(1)'}), '(self.Y[:, self.e1_inds], axis=1)\n', (2124, 2157), True, 'import numpy as np\n'), ((2160, 2199), 'numpy.sum', 'np.sum', (['self.Y[:, self.e2_inds]'], {'axis': '(1)'}), '(self.Y[:, self.e2_inds], axis=1)\n', (2166, 2199), True, 'import numpy as np\n'), ((3182, 3206), 'numpy.ones', 'np.ones', (['(self.n_steps,)'], {}), '((self.n_steps,))\n', (3189, 3206), True, 'import numpy as np\n'), ((3753, 3792), 'numpy.sum', 'np.sum', (['self.Y[:, self.e1_inds]'], {'axis': '(1)'}), '(self.Y[:, self.e1_inds], axis=1)\n', (3759, 3792), True, 'import numpy as np\n'), ((3949, 3988), 'numpy.sum', 'np.sum', (['self.Y[:, self.e2_inds]'], {'axis': '(1)'}), '(self.Y[:, self.e2_inds], axis=1)\n', (3955, 3988), True, 'import numpy as np\n'), ((8134, 8148), 'numpy.argmax', 'np.argmax', (['pdf'], {}), '(pdf)\n', (8143, 8148), True, 'import numpy as np\n'), ((8307, 8331), 'numpy.linspace', 'np.linspace', (['t2', 't1', '(100)'], {}), '(t2, t1, 100)\n', (8318, 8331), True, 'import numpy as np\n'), ((8549, 8567), 'numpy.arange', 'np.arange', (['(-10)', '(10)'], {}), '(-10, 10)\n', (8558, 8567), True, 'import numpy as np\n'), ((8720, 8734), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8732, 8734), True, 'import matplotlib.pyplot as plt\n'), ((13330, 13346), 'numpy.where', 'np.where', (['(x > t2)'], {}), '(x > t2)\n', (13338, 13346), True, 'import numpy as np\n'), ((13401, 13417), 'numpy.where', 'np.where', (['(x > t1)'], {}), '(x > t1)\n', (13409, 13417), True, 'import numpy as np\n'), ((1241, 1266), 'numpy.array', 'np.array', (['self.state.mean'], {}), '(self.state.mean)\n', (1249, 1266), True, 'import numpy as np\n'), ((1445, 1469), 'numpy.ones', 'np.ones', (['(self.n_steps,)'], {}), '((self.n_steps,))\n', (1452, 1469), True, 'import numpy as np\n'), ((6715, 6771), 'numpy.sum', 'np.sum', (['neural_features[ib:ib + nsteps, e1_inds]'], {'axis': '(1)'}), '(neural_features[ib:ib + nsteps, e1_inds], axis=1)\n', (6721, 6771), True, 'import numpy as np\n'), ((6831, 6887), 'numpy.sum', 'np.sum', (['neural_features[ib:ib + nsteps, e2_inds]'], {'axis': '(1)'}), '(neural_features[ib:ib + nsteps, e2_inds], axis=1)\n', (6837, 6887), True, 'import numpy as np\n'), ((8431, 8456), 'numpy.max', 'np.max', (['[freq_lim[0], yi]'], {}), '([freq_lim[0], yi])\n', (8437, 8456), True, 'import numpy as np\n'), ((8474, 8499), 'numpy.min', 'np.min', (['[freq_lim[1], yi]'], {}), '([freq_lim[1], yi])\n', (8480, 8499), True, 'import numpy as np\n'), ((9162, 9203), 'riglib.bmi.state_space_models.StateSpaceEndptPos1D', 'state_space_models.StateSpaceEndptPos1D', ([], {}), '()\n', (9201, 9203), False, 'from riglib.bmi import state_space_models\n'), ((9351, 9366), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (9364, 9366), False, 'import pdb\n'), ((9839, 9875), 'tables.openFile', 'tables.openFile', (["(pname[:-4] + '.hdf')"], {}), "(pname[:-4] + '.hdf')\n", (9854, 9875), False, 'import tables\n'), ((9931, 9945), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9943, 9945), True, 'import matplotlib.pyplot as plt\n'), ((11058, 11067), 'sklearn.mixture.GMM', 'GMM', (['N[i]'], {}), '(N[i])\n', (11061, 11067), False, 'from sklearn.mixture import GMM\n'), ((19459, 19494), 'numpy.floor', 'np.floor', (['((max_freq - min_freq) / 2)'], {}), '((max_freq - min_freq) / 2)\n', (19467, 19494), True, 'import numpy as np\n'), ((3676, 3715), 'numpy.sum', 'np.sum', (['self.Y[:, self.e1_inds]'], {'axis': '(1)'}), '(self.Y[:, self.e1_inds], axis=1)\n', (3682, 3715), True, 'import numpy as np\n'), ((3872, 3911), 'numpy.sum', 'np.sum', (['self.Y[:, self.e2_inds]'], {'axis': '(1)'}), '(self.Y[:, self.e2_inds], axis=1)\n', (3878, 3911), True, 'import numpy as np\n'), ((6264, 6314), 'numpy.logical_and', 'np.logical_and', (['(u[0] in e1[:, 0])', '(u[1] in e1[:, 1])'], {}), '(u[0] in e1[:, 0], u[1] in e1[:, 1])\n', (6278, 6314), True, 'import numpy as np\n'), ((6377, 6427), 'numpy.logical_and', 'np.logical_and', (['(u[0] in e2[:, 0])', '(u[1] in e2[:, 1])'], {}), '(u[0] in e2[:, 0], u[1] in e2[:, 1])\n', (6391, 6427), True, 'import numpy as np\n'), ((6959, 7015), 'numpy.sum', 'np.sum', (['neural_features[ib:ib + nsteps, e1_inds]'], {'axis': '(1)'}), '(neural_features[ib:ib + nsteps, e1_inds], axis=1)\n', (6965, 7015), True, 'import numpy as np\n'), ((7040, 7096), 'numpy.sum', 'np.sum', (['neural_features[ib:ib + nsteps, e2_inds]'], {'axis': '(1)'}), '(neural_features[ib:ib + nsteps, e2_inds], axis=1)\n', (7046, 7096), True, 'import numpy as np\n'), ((10060, 10109), 'numpy.nonzero', 'np.nonzero', (["(hdf.root.task[:]['target_index'] == 1)"], {}), "(hdf.root.task[:]['target_index'] == 1)\n", (10070, 10109), True, 'import numpy as np\n'), ((10490, 10542), 'numpy.nonzero', 'np.nonzero', (["(hdf.root.task_msgs[:]['msg'] == 'reward')"], {}), "(hdf.root.task_msgs[:]['msg'] == 'reward')\n", (10500, 10542), True, 'import numpy as np\n'), ((10274, 10323), 'numpy.nonzero', 'np.nonzero', (["(hdf.root.task[:]['target_index'] == 0)"], {}), "(hdf.root.task[:]['target_index'] == 0)\n", (10284, 10323), True, 'import numpy as np\n')] |
import sys, os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname('.'), os.path.pardir)))
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.patches import Ellipse
from matplotlib.patches import Arrow
from matplotlib.patches import Wedge
class FrameTracker(object):
"""
Object that displays a video frame. Class used by frame_scroll method.
Parameters
------------------------
ax : object containing elements of a figure
Used to set window title and axis labels
video : array_like
series of video frames
"""
def __init__(self, ax, video):
self.ax = ax
self.ax.set_title('Use keyboard to navigate images')
self.video = video
self.slices = len(self.video)
self.ind = 0
self.im = ax.imshow(self.video[self.ind])
self.ax.set_xlabel('Frame %s' % self.ind)
def on_key(self, event):
if event.key == 'up':
self.ind = (self.ind + 1) % self.slices
elif event.key =='down':
self.ind = (self.ind - 1) % self.slices
elif event.key =='right':
self.ind = (self.ind + 50) % self.slices
elif event.key =='left':
self.ind = (self.ind - 50) % self.slices
self.update()
def update(self):
self.im.set_data(self.video[self.ind])
self.ax.set_xlabel('Frame %s' % self.ind)
self.im.axes.figure.canvas.draw()
class EyelidTracker(FrameTracker):
"""
Object that displays a video frame with the located eyelid overlayed. Class used by eyelid_scroll method.
Parameters
------------------------
ax : object containing elements of a figure
Used to set window title and axis labels
video : array_like
series of video frames
eyelid : dictionary
dictionary of pupils where the key is the frame index and the value is the frame with the eyelid removed.
Does not need to include all video frames.
"""
def __init__(self, ax, video, eyelid_list):
FrameTracker.__init__(self, ax, video)
self.eyelid_list = eyelid_list
def update(self):
display_img = self.video[self.ind]
if self.ind in self.eyelid_list:
self.eyelid_at_ind = self.eyelid_list[self.ind]
if self.eyelid_at_ind is not None:
display_img = self.eyelid_at_ind
self.im.set_data(display_img)
self.ax.set_xlabel('Frame %s' % self.ind)
self.im.axes.figure.canvas.draw()
class PupilTracker(FrameTracker):
"""
Object that displays a video frame with the located pupil overlayed. Class used by pupil_scroll method.
Parameters
------------------------
ax : object containing elements of a figure
Used to set window title and axis labels
video : array_like
series of video frames
pupil_list : dictionary
dictionary of pupils where the key is the frame index and the value is the pupil.
Does not need to include all video frames.
"""
def __init__(self, ax, video, pupil_list):
FrameTracker.__init__(self, ax, video)
self.pupil_list = pupil_list
def update(self):
try:
self.pupil_patch.remove()
self.center_patch.remove()
except AttributeError:
pass
except ValueError:
pass
if self.ind in self.pupil_list:
self.pupil_at_ind = self.pupil_list[self.ind]
if self.pupil_at_ind:
#self.pupil_circle = Circle((self.pupil_at_ind.center_col,self.pupil_at_ind.center_row),self.pupil_at_ind.radius,fill=False,ec=[1,0,0])
self.pupil_circle = Ellipse((self.pupil_at_ind.center_col,self.pupil_at_ind.center_row), self.pupil_at_ind.width, self.pupil_at_ind.height,fill=False,ec=[1,0,0])
self.pupil_center = Circle((self.pupil_at_ind.center_col,self.pupil_at_ind.center_row),int(0.1*self.pupil_at_ind.radius),fill=True,ec=[1,0,0], fc=[1,0,0])
self.pupil_patch = self.ax.add_patch(self.pupil_circle)
self.center_patch = self.ax.add_patch(self.pupil_center)
else:
print('ERROR: No pupil at frame index %d' % (self.ind))
self.im.set_data(self.video[self.ind])
self.ax.set_xlabel('Frame %s' % self.ind)
self.im.axes.figure.canvas.draw()
class TorsionTracker(FrameTracker):
'''
Torsion tracking object. Window updates x y axis to visualize torsion. Class used by torsion_scroll method.
Parameters:
------------------------
ax : object containing elements of a figure
Used to set window title and axis labels
video : array_like
video to scroll through
pupil_list : dictionary
dictionary of pupils where the key is the frame index and the value is the pupil.
Does not need to include all video frames.
offset_first_frame : dictionary
dictionary of rotation angles. key is the frame index and the value is the rotation.
Does not need to include all video frames
'''
def __init__(self, ax, video, pupil_list, offset_first_frame):
FrameTracker.__init__(self, ax, video)
self.pupil_list = pupil_list
self.torsion_list = offset_first_frame
def update(self):
try:
self.pupil_patch.remove()
self.center_patch.remove()
self.x_patch.remove()
self.y_patch.remove()
except AttributeError:
pass
except ValueError:
pass
if self.ind in self.pupil_list:
self.pupil_at_ind = self.pupil_list[self.ind]
if self.pupil_at_ind:
self.pupil_circle = Circle((self.pupil_at_ind.center_col,self.pupil_at_ind.center_row),self.pupil_at_ind.radius,fill=False,ec=[1,0,0])
self.pupil_center = Circle((self.pupil_at_ind.center_col,self.pupil_at_ind.center_row),int(0.1*self.pupil_at_ind.radius),fill=True,ec=[1,0,0],fc=[1,0,0])
self.pupil_patch = self.ax.add_patch(self.pupil_circle)
self.center_patch = self.ax.add_patch(self.pupil_center)
else:
print('ERROR: No pupil at frame index %d' % (self.ind))
if self.ind in self.torsion_list:
self.angle = self.torsion_list[self.ind]
radius = self.video.height/2
if self.pupil_at_ind:
self.x_axis = Arrow(self.pupil_at_ind.center_col, self.pupil_at_ind.center_row,radius*np.cos(np.pi*(self.angle)/180),-radius*np.sin(np.pi*(self.angle)/180), width = 5, ec=[1,0,0], fc=[1,0,0], fill=True)
self.y_axis = Arrow(self.pupil_at_ind.center_col, self.pupil_at_ind.center_row,radius*np.cos(np.pi*(self.angle+90)/180),-radius*np.sin(np.pi*(self.angle+90)/180), width = 5, ec=[1,0,0], fc=[1,0,0], fill=True)
self.x_patch = self.ax.add_patch(self.x_axis)
self.y_patch = self.ax.add_patch(self.y_axis)
else:
print('ERROR: No pupil at frame index %d' % (self.ind))
self.im.set_data(self.video[self.ind])
self.ax.set_xlabel('Frame %s' % self.ind)
self.im.axes.figure.canvas.draw()
class WindowTracker(FrameTracker):
'''
Window tracking object. Window updates window location while frames are scrolling. Class used by window_scroll method.
Parameters:
------------------------
ax : object containing elements of a figure
Used to set window title and axis labels
video : array_like
video to scroll through
pupil_list : dictionary
dictionary of pupils where the key is the frame index and the value is the pupil.
Does not need to include all video frames.
offset_first_frame : dictionary
dictionary of rotation angles. key is the frame index and the value is the rotation.
Does not need to include all video frames
theta_window : tuple of angles
theta[0] is the lower bound of the window
theta[1] is the upper bound of the window
WINDOW_RADIUS: integer
Pixel width of the window radius
'''
def __init__(self, ax, video, pupil_list, offset_first_frame,theta_window,WINDOW_RADIUS):
FrameTracker.__init__(self, ax, video)
self.pupil_list = pupil_list
self.offset_first_frame = offset_first_frame
self.theta_window = theta_window
self.WINDOW_RADIUS = WINDOW_RADIUS
def update(self):
try:
self.pupil_patch.remove()
self.center_patch.remove()
self.window_patch.remove()
except AttributeError:
pass
except ValueError:
pass
if self.ind in self.pupil_list:
self.pupil_at_ind = self.pupil_list[self.ind]
if self.pupil_at_ind:
self.pupil_circle = Circle((self.pupil_at_ind.center_col,self.pupil_at_ind.center_row),self.pupil_at_ind.radius,fill=False,ec=[1,0,0])
self.pupil_center = Circle((self.pupil_at_ind.center_col,self.pupil_at_ind.center_row),int(0.1*self.pupil_at_ind.radius),fill=True,ec=[1,0,0],fc=[1,0,0])
self.pupil_patch = self.ax.add_patch(self.pupil_circle)
self.center_patch = self.ax.add_patch(self.pupil_center)
else:
print('ERROR: No pupil at frame index %d' % (self.ind))
if self.ind in self.offset_first_frame:
self.angle = self.offset_first_frame[self.ind]
radius = self.video.height/2
self.window = Wedge((self.pupil_at_ind.center_col,self.pupil_at_ind.center_row),self.pupil_at_ind.radius+self.WINDOW_RADIUS,-(self.theta_window[1]+self.angle),-(self.theta_window[0]+self.angle),self.WINDOW_RADIUS,fill=False,ec=[1,0,0])
self.window_patch = self.ax.add_patch(self.window)
self.im.set_data(self.video[self.ind])
self.ax.set_xlabel('Frame %s' % self.ind)
self.im.axes.figure.canvas.draw()
#plt.savefig('frame_%d.png' % (self.ind), bbox_inches='tight')
def frame_scroll(video):
'''
Allows user to scroll through video frames using the keyboard.
Parameters:
------------------------
video : array_like
video to scroll through
'''
fig, ax = plt.subplots(1, 1)
tracker = FrameTracker(ax, video)
fig.canvas.mpl_connect('key_press_event', tracker.on_key)
plt.show()
def eyelid_scroll(video, eyelid_list):
'''
Overlays eyelid during frame scroll
Parameters:
------------------------
video : array_like
video to scroll through
eyelid_list : dictionary
dictionary of pupils where the key is the frame index and the value is the pupil.
Does not need to include all video frames.
'''
fig, ax = plt.subplots(1, 1)
tracker = EyelidTracker(ax, video, eyelid_list)
fig.canvas.mpl_connect('key_press_event', tracker.on_key)
plt.show()
def pupil_scroll(video,pupil_list):
'''
Overlays pupil during frame scroll
Parameters:
------------------------
video : array_like
video to scroll through
pupil_list : dictionary
dictionary of pupils where the key is the frame index and the value is the pupil.
Does not need to include all video frames.
'''
fig, ax = plt.subplots(1, 1)
tracker = PupilTracker(ax, video, pupil_list)
fig.canvas.mpl_connect('key_press_event', tracker.on_key)
plt.show()
def torsion_scroll(video, pupil_list, offset_first_frame):
'''
Tracks torsion using rotating 2D axis during frame scroll.
Parameters:
------------------------
video : array_like
video to scroll through
pupil_list : dictionary
dictionary of pupils where the key is the frame index and the value is the pupil.
Does not need to include all video frames.
offset_first_frame : dictionary
dictionary of rotation angles. key is the frame index and the value is the rotation.
Does not need to include all video frames
'''
fig, ax = plt.subplots(1,1)
tracker = TorsionTracker(ax, video, pupil_list, offset_first_frame)
fig.canvas.mpl_connect('key_press_event', tracker.on_key)
plt.show()
def window_scroll(video,pupil_list,offset_first_frame,theta_window,WINDOW_RADIUS):
'''
Tracks window location during frame scroll.
Parameters:
------------------------
video : array_like
video to scroll through
pupil_list : dictionary
dictionary of pupils where the key is the frame index and the value is the pupil.
Does not need to include all video frames.
offset_first_frame : dictionary
dictionary of rotation angles. key is the frame index and the value is the rotation.
Does not need to include all video frames
theta_window : tuple of angles
theta[0] is the lower bound of the window
theta[1] is the upper bound of the window
WINDOW_RADIUS: integer
Pixel width of the window radius
'''
fig, ax = plt.subplots(1,1)
tracker = WindowTracker(ax, video, pupil_list, offset_first_frame, theta_window,WINDOW_RADIUS)
fig.canvas.mpl_connect('key_press_event', tracker.on_key)
plt.show()
| [
"matplotlib.pyplot.show",
"os.path.dirname",
"matplotlib.patches.Wedge",
"matplotlib.patches.Circle",
"numpy.sin",
"numpy.cos",
"matplotlib.patches.Ellipse",
"matplotlib.pyplot.subplots"
] | [((10742, 10760), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (10754, 10760), True, 'import matplotlib.pyplot as plt\n'), ((10865, 10875), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10873, 10875), True, 'import matplotlib.pyplot as plt\n'), ((11330, 11348), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (11342, 11348), True, 'import matplotlib.pyplot as plt\n'), ((11467, 11477), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11475, 11477), True, 'import matplotlib.pyplot as plt\n'), ((11928, 11946), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (11940, 11946), True, 'import matplotlib.pyplot as plt\n'), ((12063, 12073), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12071, 12073), True, 'import matplotlib.pyplot as plt\n'), ((12794, 12812), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (12806, 12812), True, 'import matplotlib.pyplot as plt\n'), ((12950, 12960), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12958, 12960), True, 'import matplotlib.pyplot as plt\n'), ((13967, 13985), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (13979, 13985), True, 'import matplotlib.pyplot as plt\n'), ((14150, 14160), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14158, 14160), True, 'import matplotlib.pyplot as plt\n'), ((60, 80), 'os.path.dirname', 'os.path.dirname', (['"""."""'], {}), "('.')\n", (75, 80), False, 'import os\n'), ((9993, 10244), 'matplotlib.patches.Wedge', 'Wedge', (['(self.pupil_at_ind.center_col, self.pupil_at_ind.center_row)', '(self.pupil_at_ind.radius + self.WINDOW_RADIUS)', '(-(self.theta_window[1] + self.angle))', '(-(self.theta_window[0] + self.angle))', 'self.WINDOW_RADIUS'], {'fill': '(False)', 'ec': '[1, 0, 0]'}), '((self.pupil_at_ind.center_col, self.pupil_at_ind.center_row), self.\n pupil_at_ind.radius + self.WINDOW_RADIUS, -(self.theta_window[1] + self\n .angle), -(self.theta_window[0] + self.angle), self.WINDOW_RADIUS, fill\n =False, ec=[1, 0, 0])\n', (9998, 10244), False, 'from matplotlib.patches import Wedge\n'), ((3746, 3897), 'matplotlib.patches.Ellipse', 'Ellipse', (['(self.pupil_at_ind.center_col, self.pupil_at_ind.center_row)', 'self.pupil_at_ind.width', 'self.pupil_at_ind.height'], {'fill': '(False)', 'ec': '[1, 0, 0]'}), '((self.pupil_at_ind.center_col, self.pupil_at_ind.center_row), self.\n pupil_at_ind.width, self.pupil_at_ind.height, fill=False, ec=[1, 0, 0])\n', (3753, 3897), False, 'from matplotlib.patches import Ellipse\n'), ((5936, 6061), 'matplotlib.patches.Circle', 'Circle', (['(self.pupil_at_ind.center_col, self.pupil_at_ind.center_row)', 'self.pupil_at_ind.radius'], {'fill': '(False)', 'ec': '[1, 0, 0]'}), '((self.pupil_at_ind.center_col, self.pupil_at_ind.center_row), self.\n pupil_at_ind.radius, fill=False, ec=[1, 0, 0])\n', (5942, 6061), False, 'from matplotlib.patches import Circle\n'), ((9298, 9423), 'matplotlib.patches.Circle', 'Circle', (['(self.pupil_at_ind.center_col, self.pupil_at_ind.center_row)', 'self.pupil_at_ind.radius'], {'fill': '(False)', 'ec': '[1, 0, 0]'}), '((self.pupil_at_ind.center_col, self.pupil_at_ind.center_row), self.\n pupil_at_ind.radius, fill=False, ec=[1, 0, 0])\n', (9304, 9423), False, 'from matplotlib.patches import Circle\n'), ((6729, 6761), 'numpy.cos', 'np.cos', (['(np.pi * self.angle / 180)'], {}), '(np.pi * self.angle / 180)\n', (6735, 6761), True, 'import numpy as np\n'), ((6768, 6800), 'numpy.sin', 'np.sin', (['(np.pi * self.angle / 180)'], {}), '(np.pi * self.angle / 180)\n', (6774, 6800), True, 'import numpy as np\n'), ((6948, 6987), 'numpy.cos', 'np.cos', (['(np.pi * (self.angle + 90) / 180)'], {}), '(np.pi * (self.angle + 90) / 180)\n', (6954, 6987), True, 'import numpy as np\n'), ((6990, 7029), 'numpy.sin', 'np.sin', (['(np.pi * (self.angle + 90) / 180)'], {}), '(np.pi * (self.angle + 90) / 180)\n', (6996, 7029), True, 'import numpy as np\n')] |
import glob
from torchvision import utils
from torch.utils.data import DataLoader, Dataset
from shutil import copyfile
from tqdm import tqdm
import os
import cv2
from skimage import io
import multiprocessing
import traceback
import numpy as np
from skimage import io
import torch
import webp
SKIP_ITEM = 0
SIZE_BLOCK = 128
MIN_SIZE = 128
class RawImageDataset(Dataset):
def __init__(self):
file_names = file_names = glob.glob('data/raw/**.jpg', recursive=True)
self.hashes = [f.split('/')[-1][:-4] for f in file_names]
self.skip_existing_files = True
def __len__(self):
return len(self.hashes)
def __getitem__(self, index):
hash = self.hashes[index]
image_file_name = 'data/raw/{:s}.jpg'.format(hash)
result_file_name = 'data/images_alpha/{:s}.webp'.format(hash)
if self.skip_existing_files and os.path.exists(result_file_name):
return SKIP_ITEM
try:
image = io.imread(image_file_name)
image = image.transpose((2, 0, 1)).astype(np.float32) / 255
input_width = image.shape[2]
input_height = image.shape[1]
width = SIZE_BLOCK * (input_width // SIZE_BLOCK + 1)
height = SIZE_BLOCK * (input_height // SIZE_BLOCK + 1)
result = np.ones((3, height, width), dtype=np.float32)
result[:, :image.shape[1], :image.shape[2]] = image
image = torch.from_numpy(result)
except:
print("Could not open {:s}.".format(image_file_name))
return SKIP_ITEM
return image, result_file_name, input_width, input_height
def remove_smaller_components(mask):
_, labels, stats, _ = cv2.connectedComponentsWithStats(mask.astype(np.uint8), connectivity=4)
if stats.shape[0] < 2:
return
max_label = np.argmax(stats[1:, 4]) + 1
mask[labels != max_label] = 0
def save_image(image, mask, file_name):
image = image.squeeze(0).numpy()
mask = mask.squeeze(0).numpy()
mask -= 0.5
mask /= 0.35
mask += 0.5
mask = np.clip(mask, 0, 1)
mask_binary = mask > 0.001
remove_smaller_components(mask_binary)
mask *= mask_binary # remove unconnected components
coords = np.stack(mask_binary.nonzero())
if coords.size == 0:
return
top_left = np.min(coords, axis=1)
bottom_right = np.max(coords, axis=1)
mask = mask[top_left[0]:bottom_right[0], top_left[1]:bottom_right[1]]
image = image[:, top_left[0]:bottom_right[0], top_left[1]:bottom_right[1]]
if image.shape[1] < MIN_SIZE or image.shape[2] < MIN_SIZE:
return
new_size = int(max(image.shape[1], image.shape[2]))
result = np.ones((4, new_size, new_size))
result[3, :, :] = 0
y, x = (new_size - image.shape[1]) // 2, (new_size - image.shape[2]) // 2
result[:3, y:y+image.shape[1], x:x+image.shape[2]] = image
result[3, y:y+image.shape[1], x:x+image.shape[2]] = mask
webp.imwrite(file_name, (result.transpose((1, 2, 0)) * 255).astype(np.uint8).copy(order='C'), quality=95)
if __name__ == '__main__':
import torch
from classifier import Classifier
from torch.utils.data import DataLoader, Dataset
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
CLASSIFIER_FILENAME = 'trained_models/classifier.to'
classifier = Classifier()
classifier.cuda()
classifier.load_state_dict(torch.load(CLASSIFIER_FILENAME))
classifier.eval()
dataset = RawImageDataset()
data_loader = DataLoader(dataset, batch_size=1, shuffle=True, num_workers=8)
worker_count = os.cpu_count()
print("Using {:d} processes.".format(worker_count))
context = multiprocessing.get_context('spawn')
pool = context.Pool(worker_count)
progress = tqdm(total=len(dataset))
def on_complete(*_):
progress.update()
for item in data_loader:
if item == SKIP_ITEM:
progress.update()
continue
image, result_file_name, width, height = item
width, height = width[0].item(), height[0].item()
try:
with torch.no_grad():
mask = classifier(image.to(device)).squeeze(0).squeeze(0).cpu()
image = image[0, :, :height, :width]
mask = mask[:height, :width]
pool.apply_async(save_image, args=(image, mask, result_file_name[0]), callback=on_complete)
except Exception as exception:
if isinstance(exception, KeyboardInterrupt):
raise exception
print(("Error while handling {:s}".format(result_file_name[0])))
traceback.print_exc()
pool.close()
pool.join() | [
"traceback.print_exc",
"torch.utils.data.DataLoader",
"numpy.argmax",
"torch.load",
"os.path.exists",
"numpy.ones",
"classifier.Classifier",
"numpy.clip",
"os.cpu_count",
"multiprocessing.get_context",
"numpy.min",
"numpy.max",
"torch.cuda.is_available",
"glob.glob",
"torch.no_grad",
"... | [((2092, 2111), 'numpy.clip', 'np.clip', (['mask', '(0)', '(1)'], {}), '(mask, 0, 1)\n', (2099, 2111), True, 'import numpy as np\n'), ((2344, 2366), 'numpy.min', 'np.min', (['coords'], {'axis': '(1)'}), '(coords, axis=1)\n', (2350, 2366), True, 'import numpy as np\n'), ((2386, 2408), 'numpy.max', 'np.max', (['coords'], {'axis': '(1)'}), '(coords, axis=1)\n', (2392, 2408), True, 'import numpy as np\n'), ((2724, 2756), 'numpy.ones', 'np.ones', (['(4, new_size, new_size)'], {}), '((4, new_size, new_size))\n', (2731, 2756), True, 'import numpy as np\n'), ((3384, 3396), 'classifier.Classifier', 'Classifier', ([], {}), '()\n', (3394, 3396), False, 'from classifier import Classifier\n'), ((3556, 3618), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(1)', 'shuffle': '(True)', 'num_workers': '(8)'}), '(dataset, batch_size=1, shuffle=True, num_workers=8)\n', (3566, 3618), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((3639, 3653), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (3651, 3653), False, 'import os\n'), ((3724, 3760), 'multiprocessing.get_context', 'multiprocessing.get_context', (['"""spawn"""'], {}), "('spawn')\n", (3751, 3760), False, 'import multiprocessing\n'), ((431, 475), 'glob.glob', 'glob.glob', (['"""data/raw/**.jpg"""'], {'recursive': '(True)'}), "('data/raw/**.jpg', recursive=True)\n", (440, 475), False, 'import glob\n'), ((1856, 1879), 'numpy.argmax', 'np.argmax', (['stats[1:, 4]'], {}), '(stats[1:, 4])\n', (1865, 1879), True, 'import numpy as np\n'), ((3450, 3481), 'torch.load', 'torch.load', (['CLASSIFIER_FILENAME'], {}), '(CLASSIFIER_FILENAME)\n', (3460, 3481), False, 'import torch\n'), ((885, 917), 'os.path.exists', 'os.path.exists', (['result_file_name'], {}), '(result_file_name)\n', (899, 917), False, 'import os\n'), ((982, 1008), 'skimage.io.imread', 'io.imread', (['image_file_name'], {}), '(image_file_name)\n', (991, 1008), False, 'from skimage import io\n'), ((1320, 1365), 'numpy.ones', 'np.ones', (['(3, height, width)'], {'dtype': 'np.float32'}), '((3, height, width), dtype=np.float32)\n', (1327, 1365), True, 'import numpy as np\n'), ((1451, 1475), 'torch.from_numpy', 'torch.from_numpy', (['result'], {}), '(result)\n', (1467, 1475), False, 'import torch\n'), ((3271, 3296), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3294, 3296), False, 'import torch\n'), ((4147, 4162), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4160, 4162), False, 'import torch\n'), ((4655, 4676), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4674, 4676), False, 'import traceback\n')] |
from __future__ import print_function, division, absolute_import
import numpy as np
from ...common.timeseries_output_comp import TimeseriesOutputCompBase
class RungeKuttaTimeseriesOutputComp(TimeseriesOutputCompBase):
def setup(self):
"""
Define the independent variables as output variables.
"""
grid_data = self.options['grid_data']
num_nodes = 2 * grid_data.num_segments
for (name, kwargs) in self._timeseries_outputs:
input_kwargs = {k: kwargs[k] for k in ('units', 'desc')}
input_name = 'segend_values:{0}'.format(name)
self.add_input(input_name,
shape=(num_nodes,) + kwargs['shape'],
**input_kwargs)
output_name = name
output_kwargs = {k: kwargs[k] for k in ('units', 'desc')}
output_kwargs['shape'] = (num_nodes,) + kwargs['shape']
self.add_output(output_name, **output_kwargs)
self._vars.append((input_name, output_name, kwargs['shape']))
# Setup partials
segend_shape = (num_nodes,) + kwargs['shape']
segend_size = np.prod(segend_shape)
ar = np.arange(segend_size)
self.declare_partials(
of=output_name,
wrt=input_name,
rows=ar,
cols=ar,
val=1.0)
def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):
for (input_name, output_name, _) in self._vars:
outputs[output_name] = inputs[input_name]
| [
"numpy.arange",
"numpy.prod"
] | [((1172, 1193), 'numpy.prod', 'np.prod', (['segend_shape'], {}), '(segend_shape)\n', (1179, 1193), True, 'import numpy as np\n'), ((1212, 1234), 'numpy.arange', 'np.arange', (['segend_size'], {}), '(segend_size)\n', (1221, 1234), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import timeit
from skimage import io
import numpy as np
import os
import os.path
import random
start = timeit.default_timer()
im = io.imread('specimen1_data/data3_halfbinned_750vol.tif')
grid_type = 'cover_all' #'not_cover_all'
NumOfImg =3#plus one of number of cubes in 1D
z_thickness = 150
if grid_type == 'cover_all':
SizeOfVol = 750
start = 0
else:
included_vol =input('key in size of vol covered by grid') #grids that cover all vol = 750, grids not cover all = max vol that can go
random_num = random.randint(0,750-int(included_vol))
start = random_num # 0 for grids that cover all vol, random value for grids not cover all
SizeOfVol = start+int(included_vol)
if SizeOfVol>750:
print('vol error')
raise SystemExit
xloop = np.linspace(start,SizeOfVol,NumOfImg,dtype=int) #np.array([0,5,10,15])
yloop = xloop
zloop=np.linspace(0,750,(750/z_thickness)+1,dtype=int)
w = xloop[1]-xloop[0] # width
h = w # height
d = z_thickness # depth
save_path = "z_fixed_sampling/sample_123761/z_150/crop_img_"+str(w)
try:
os.mkdir(save_path)
except OSError:
print ("Creation of the directory %s failed" %save_path)
else:
print ("Successfully created the directory %s " % save_path)
tup = ()
c = 0
for xx in range(0,len(xloop)-1):
for yy in range(0,len(yloop)-1):
for zz in range(0,len(zloop)-1):
x = xloop[xx]
y = yloop[yy]
z = zloop[zz]
for i in range(z,z+d):
imcrop = im[i][y:y+h, x:x+w]
np_image = np.array(imcrop)
tup = tup + (np_image,) #add the image to the sequence
final_image = np.stack(tup)
fname = str(x) + '_' + str(x+w-1) + '_' +\
str(y) + '_' + str(y+h-1) + '_' + \
str(z) + '_' + str(i) + '.tif'
print(fname)
path = os.path.join(save_path, fname)
io.imsave(path, final_image)
tup = ()
stop = timeit.default_timer()
print('Time: ', stop - start)
| [
"numpy.stack",
"os.mkdir",
"skimage.io.imsave",
"timeit.default_timer",
"numpy.array",
"numpy.linspace",
"os.path.join",
"skimage.io.imread"
] | [((136, 158), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (156, 158), False, 'import timeit\n'), ((167, 222), 'skimage.io.imread', 'io.imread', (['"""specimen1_data/data3_halfbinned_750vol.tif"""'], {}), "('specimen1_data/data3_halfbinned_750vol.tif')\n", (176, 222), False, 'from skimage import io\n'), ((837, 887), 'numpy.linspace', 'np.linspace', (['start', 'SizeOfVol', 'NumOfImg'], {'dtype': 'int'}), '(start, SizeOfVol, NumOfImg, dtype=int)\n', (848, 887), True, 'import numpy as np\n'), ((930, 983), 'numpy.linspace', 'np.linspace', (['(0)', '(750)', '(750 / z_thickness + 1)'], {'dtype': 'int'}), '(0, 750, 750 / z_thickness + 1, dtype=int)\n', (941, 983), True, 'import numpy as np\n'), ((2127, 2149), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (2147, 2149), False, 'import timeit\n'), ((1138, 1157), 'os.mkdir', 'os.mkdir', (['save_path'], {}), '(save_path)\n', (1146, 1157), False, 'import os\n'), ((2008, 2038), 'os.path.join', 'os.path.join', (['save_path', 'fname'], {}), '(save_path, fname)\n', (2020, 2038), False, 'import os\n'), ((2054, 2082), 'skimage.io.imsave', 'io.imsave', (['path', 'final_image'], {}), '(path, final_image)\n', (2063, 2082), False, 'from skimage import io\n'), ((1641, 1657), 'numpy.array', 'np.array', (['imcrop'], {}), '(imcrop)\n', (1649, 1657), True, 'import numpy as np\n'), ((1779, 1792), 'numpy.stack', 'np.stack', (['tup'], {}), '(tup)\n', (1787, 1792), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from ..prediction import StatePrediction, StateMeasurementPrediction
from ..detection import Detection
from ..track import Track
from ..hypothesis import (
SingleHypothesis,
SingleDistanceHypothesis,
SingleProbabilityHypothesis,
JointHypothesis,
ProbabilityJointHypothesis,
DistanceJointHypothesis)
prediction = StatePrediction(np.array([[1], [0]]))
measurement_prediction = StateMeasurementPrediction(np.array([[1], [0]]))
detection = Detection(np.array([[1], [0]]))
distance = float(1)
def test_single_hypothesis():
"""Single Measurement Hypothesis type test"""
hypothesis = SingleHypothesis(prediction, detection)
assert hypothesis.prediction is prediction
assert hypothesis.measurement is detection
assert hypothesis.measurement_prediction is None
assert hypothesis
hypothesis = SingleHypothesis(prediction, detection,
measurement_prediction)
assert hypothesis.prediction is prediction
assert hypothesis.measurement is detection
assert hypothesis.measurement_prediction is measurement_prediction
assert hypothesis
hypothesis = SingleHypothesis(prediction, None)
assert hypothesis.prediction is prediction
assert hypothesis.measurement is None
assert hypothesis.measurement_prediction is None
assert not hypothesis
def test_single_distance_hypothesis():
"""Single Measurement Distance Hypothesis type test"""
hypothesis = SingleDistanceHypothesis(
prediction, detection, distance, measurement_prediction)
assert hypothesis.prediction is prediction
assert hypothesis.measurement is detection
assert hypothesis.distance is distance
assert hypothesis.measurement_prediction is measurement_prediction
assert hypothesis.weight == 1/distance
hypothesis.distance = 0
assert hypothesis.weight == float('inf')
def test_single_distance_hypothesis_comparison():
"""Single Measurement Distance Hypothesis comparison test"""
h1 = SingleDistanceHypothesis(
prediction, detection, distance, measurement_prediction)
h2 = SingleDistanceHypothesis(
prediction, detection, distance + 1, measurement_prediction)
assert h1 > h2
assert h2 < h1
assert h1 <= h1
assert h1 >= h1
assert h1 == h1
def test_single_probability_hypothesis_comparison():
"""Single Measurement Probability Hypothesis comparison test"""
h1 = SingleProbabilityHypothesis(
prediction, detection, 0.9, measurement_prediction)
h2 = SingleProbabilityHypothesis(
prediction, detection, 0.1, measurement_prediction)
assert h1 > h2
assert h2 < h1
assert h1 <= h1
assert h1 >= h1
assert h1 == h1
def test_probability_joint_hypothesis():
"""Probability Joint Hypothesis type test"""
t1 = Track()
t2 = Track()
h1 = SingleProbabilityHypothesis(
prediction, detection, 0.9, measurement_prediction)
h2 = SingleProbabilityHypothesis(
prediction, detection, 0.1, measurement_prediction)
hypotheses = {t1: h1, t2: h2}
joint_hypothesis = JointHypothesis(hypotheses)
assert isinstance(joint_hypothesis,
ProbabilityJointHypothesis)
assert joint_hypothesis[t1] is h1
assert joint_hypothesis[t2] is h2
assert joint_hypothesis.probability == h1.probability * h2.probability
def test_probability_joint_hypothesis_comparison():
"""Probability Joint Hypothesis comparison test"""
t1 = Track()
t2 = Track()
h1 = SingleProbabilityHypothesis(
prediction, detection, 0.75, measurement_prediction)
h2 = SingleProbabilityHypothesis(
prediction, detection, 0.75, measurement_prediction)
h3 = SingleProbabilityHypothesis(
prediction, detection, 0.25, measurement_prediction)
hypotheses1 = {t1: h1, t2: h2}
hypotheses2 = {t1: h1, t2: h3}
j1 = JointHypothesis(hypotheses1)
j1.normalise()
j2 = JointHypothesis(hypotheses2)
j2.normalise()
assert j1 > j2
assert j2 < j1
assert j1 <= j1
assert j1 >= j1
assert j1 == j1
def test_distance_joint_hypothesis():
"""Distance Joint Hypothesis type test"""
t1 = Track()
t2 = Track()
h1 = SingleDistanceHypothesis(
prediction, detection, distance, measurement_prediction)
h2 = SingleDistanceHypothesis(
prediction, detection, distance, measurement_prediction)
hypotheses = {t1: h1, t2: h2}
joint_hypothesis = JointHypothesis(hypotheses)
assert isinstance(joint_hypothesis,
DistanceJointHypothesis)
assert joint_hypothesis[t1] is h1
assert joint_hypothesis[t2] is h2
assert joint_hypothesis.distance == distance * 2
def test_distance_joint_hypothesis_comparison():
"""Distance Joint Hypothesis comparison test"""
t1 = Track()
t2 = Track()
h1 = SingleDistanceHypothesis(
prediction, detection, distance, measurement_prediction)
h2 = SingleDistanceHypothesis(
prediction, detection, distance, measurement_prediction)
h3 = SingleDistanceHypothesis(
prediction, detection, distance + 1, measurement_prediction)
hypotheses1 = {t1: h1, t2: h2}
hypotheses2 = {t1: h1, t2: h3}
j1 = JointHypothesis(hypotheses1)
j2 = JointHypothesis(hypotheses2)
assert j1 > j2
assert j2 < j1
assert j1 <= j1
assert j1 >= j1
assert j1 == j1
def test_invalid_single_joint_hypothesis():
"""Invalid Single Measurement Joint Hypothesis test"""
t1 = Track()
t2 = Track()
h1 = object()
h2 = object()
hypotheses = {t1: h1, t2: h2}
with pytest.raises(NotImplementedError):
JointHypothesis(hypotheses)
| [
"pytest.raises",
"numpy.array"
] | [((411, 431), 'numpy.array', 'np.array', (['[[1], [0]]'], {}), '([[1], [0]])\n', (419, 431), True, 'import numpy as np\n'), ((485, 505), 'numpy.array', 'np.array', (['[[1], [0]]'], {}), '([[1], [0]])\n', (493, 505), True, 'import numpy as np\n'), ((529, 549), 'numpy.array', 'np.array', (['[[1], [0]]'], {}), '([[1], [0]])\n', (537, 549), True, 'import numpy as np\n'), ((5689, 5723), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (5702, 5723), False, 'import pytest\n')] |
###############################################################################################
### Machine learning -- unsupervised (plus supervised nnet model)
import numpy as np
import pandas as pd
###############################################################################################
#### Unsupervised learning models
#####################################
## k-means model
# standardize data for better result
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(samples)
samples_scaled = scaler.transform(samples)
from sklearn.cluster import KMeans
model = KMeans(n_clusters=3)
model.fit(samples_scaled) # samples are a 2D matrix
labels = model.predict(samples_scaled) # labels for existing observations
new_labels = model.predict(samples_scaled)
print(model.inertia_) # minimize inertia as a metric for model quality
# plot the inertia as a function of n_clusters parameter
# choose the "elbow" point as the best model
#####################################
## Hierarchical clustering (result is a dendrogram)
from scipy.cluster.hierarchy import linkage, dendrogram
import matplotlib.pyplot as plt
mergings = linkage(samples_scaled, method='complete')
dendrogram(mergings, labels=country_names, leaf_rotation=90, leaf_font_size=6)
plt.show()
# Height on dendrogram = distance between merging clusters
# can further assign cluster labels based on above result
from scipy.cluster.hierarchy import fcluster
labels = fcluster(mergings, 15, criterion='distance')
pairs = pd.DataFrame({'labels': labels, 'countries': country_names})
print(pairs.sort_values('labels'))
#####################################
## t-SNE for 2D map visualizing high-dim data
from sklearn.manifold import TSNE
model = TSNE(learning_rate=100) # learning rate is usually within [50, 200]
transformed = model.fit_transform(samples)
xs = transformed[:,0]
ys = transformed[:,1]
plt.scatter(xs, ys, c=species)
plt.show()
#####################################
## PCA transformation
from sklearn.decomposition import PCA
model = PCA(n_components=2) # set n_component to None to retain all features
model.fit(samples)
transformed = model.transform(samples)
# Rows of transformed correspond to samples
# Columns of transformed are the "PCA features"
print(model.components_)
# visualize explained variance by each component
features = range(model.n_components_)
plt.bar(features, model.explained_variance_)
###############################################################################################
#### nnet
#####################################
## keras - regression
from keras.layers import Dense
from keras.models import Sequential
predictors = np.loadtxt('predictors_data.csv', delimiter=',')
target = np.loadtxt('target_data.csv', delimiter=',')
n_cols = predictors.shape[1]
model = Sequential()
model.add(Dense(100, activation='relu', input_shape = (n_cols,)))
model.add(Dense(100, activation='relu'))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(predictors, target, validation_split=0.3, epochs=20, callbacks = [early_stopping_monitor])
## keras - classification
from keras.layers import Dense
from keras.models import Sequential
from keras.utils import to_categorical
data = pd.read_csv('basketball_shot_log.csv')
predictors = data.drop(['shot_result'], axis=1).as_matrix()
target = to_categorical(data.shot_result)
n_cols = predictors.shape[1]
model = Sequential()
model.add(Dense(100, activation='relu', input_shape = (n_cols,)))
model.add(Dense(100, activation='relu'))
model.add(Dense(100, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.compile(optimizer=SGD(lr=lr), loss='categorical_crossentropy') # SGD optimization
model.fit(predictors, target, validation_split=0.3, epochs=20, callbacks = [early_stopping_monitor])
## keras - save and load model
# for more details, please refer to datacamp course "Deep Learning in Python"
my_model = load_model('my_model.h5')
my_model.summary() # verify the model structure
| [
"pandas.DataFrame",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.show",
"scipy.cluster.hierarchy.fcluster",
"sklearn.manifold.TSNE",
"pandas.read_csv",
"sklearn.cluster.KMeans",
"matplotlib.pyplot.scatter",
"scipy.cluster.hierarchy.linkage",
"matplotlib.pyplot.bar",
"keras.layers.D... | [((483, 499), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (497, 499), False, 'from sklearn.preprocessing import StandardScaler\n'), ((606, 626), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(3)'}), '(n_clusters=3)\n', (612, 626), False, 'from sklearn.cluster import KMeans\n'), ((1163, 1205), 'scipy.cluster.hierarchy.linkage', 'linkage', (['samples_scaled'], {'method': '"""complete"""'}), "(samples_scaled, method='complete')\n", (1170, 1205), False, 'from scipy.cluster.hierarchy import linkage, dendrogram\n'), ((1206, 1284), 'scipy.cluster.hierarchy.dendrogram', 'dendrogram', (['mergings'], {'labels': 'country_names', 'leaf_rotation': '(90)', 'leaf_font_size': '(6)'}), '(mergings, labels=country_names, leaf_rotation=90, leaf_font_size=6)\n', (1216, 1284), False, 'from scipy.cluster.hierarchy import linkage, dendrogram\n'), ((1285, 1295), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1293, 1295), True, 'import matplotlib.pyplot as plt\n'), ((1469, 1513), 'scipy.cluster.hierarchy.fcluster', 'fcluster', (['mergings', '(15)'], {'criterion': '"""distance"""'}), "(mergings, 15, criterion='distance')\n", (1477, 1513), False, 'from scipy.cluster.hierarchy import fcluster\n'), ((1522, 1582), 'pandas.DataFrame', 'pd.DataFrame', (["{'labels': labels, 'countries': country_names}"], {}), "({'labels': labels, 'countries': country_names})\n", (1534, 1582), True, 'import pandas as pd\n'), ((1745, 1768), 'sklearn.manifold.TSNE', 'TSNE', ([], {'learning_rate': '(100)'}), '(learning_rate=100)\n', (1749, 1768), False, 'from sklearn.manifold import TSNE\n'), ((1900, 1930), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xs', 'ys'], {'c': 'species'}), '(xs, ys, c=species)\n', (1911, 1930), True, 'import matplotlib.pyplot as plt\n'), ((1931, 1941), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1939, 1941), True, 'import matplotlib.pyplot as plt\n'), ((2049, 2068), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (2052, 2068), False, 'from sklearn.decomposition import PCA\n'), ((2384, 2428), 'matplotlib.pyplot.bar', 'plt.bar', (['features', 'model.explained_variance_'], {}), '(features, model.explained_variance_)\n', (2391, 2428), True, 'import matplotlib.pyplot as plt\n'), ((2679, 2727), 'numpy.loadtxt', 'np.loadtxt', (['"""predictors_data.csv"""'], {'delimiter': '""","""'}), "('predictors_data.csv', delimiter=',')\n", (2689, 2727), True, 'import numpy as np\n'), ((2737, 2781), 'numpy.loadtxt', 'np.loadtxt', (['"""target_data.csv"""'], {'delimiter': '""","""'}), "('target_data.csv', delimiter=',')\n", (2747, 2781), True, 'import numpy as np\n'), ((2819, 2831), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2829, 2831), False, 'from keras.models import Sequential\n'), ((3259, 3297), 'pandas.read_csv', 'pd.read_csv', (['"""basketball_shot_log.csv"""'], {}), "('basketball_shot_log.csv')\n", (3270, 3297), True, 'import pandas as pd\n'), ((3367, 3399), 'keras.utils.to_categorical', 'to_categorical', (['data.shot_result'], {}), '(data.shot_result)\n', (3381, 3399), False, 'from keras.utils import to_categorical\n'), ((3437, 3449), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3447, 3449), False, 'from keras.models import Sequential\n'), ((2842, 2894), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""', 'input_shape': '(n_cols,)'}), "(100, activation='relu', input_shape=(n_cols,))\n", (2847, 2894), False, 'from keras.layers import Dense\n'), ((2908, 2937), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (2913, 2937), False, 'from keras.layers import Dense\n'), ((2949, 2957), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (2954, 2957), False, 'from keras.layers import Dense\n'), ((3460, 3512), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""', 'input_shape': '(n_cols,)'}), "(100, activation='relu', input_shape=(n_cols,))\n", (3465, 3512), False, 'from keras.layers import Dense\n'), ((3526, 3555), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (3531, 3555), False, 'from keras.layers import Dense\n'), ((3567, 3596), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (3572, 3596), False, 'from keras.layers import Dense\n'), ((3608, 3638), 'keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (3613, 3638), False, 'from keras.layers import Dense\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon May 27 10:14:38 2019
@author: YQ
"""
from tensorflow.keras.preprocessing.sequence import pad_sequences
import tensorflow as tf
import pickle
import numpy as np
import itertools
class load_noteseqs:
def __init__(self, path, x_depth, batch_size=16, augment=True):
self.data = [pickle.load(open(p, "rb")) for p in path]
self.notes = [d for d in self.data]
self.labels = []
for i in range(len(self.data)):
tmp = len(self.data[i])
self.labels.append(np.ones([tmp])*i)
self.labels = self.labels[0] if len(self.labels) == 1 else np.concatenate(self.labels, 0)
self.x_depth = x_depth
self.notes = list(itertools.chain.from_iterable(self.notes))
self.seq_len = [len(x) for x in self.notes]
self.batch_size = batch_size
self.augment = augment
self.total_batches = int(len(self.notes) // self.batch_size)
def loader(self):
Z = list(zip(self.notes, self.seq_len, self.labels))
np.random.shuffle(Z)
notes, seq_len, labels = zip(*Z)
for i in range(self.total_batches):
tmp_notes = notes[self.batch_size*i:(self.batch_size*i)+self.batch_size]
tmp_seq_len = seq_len[self.batch_size*i:(self.batch_size*i)+self.batch_size]
tmp_label = labels[self.batch_size*i:(self.batch_size*i)+self.batch_size]
if len(tmp_notes) == self.batch_size:
tmp_notes = pad_sequences(tmp_notes, padding="post", dtype=np.int32, value=-1)
if self.augment:
aug = np.random.choice(np.arange(-5, 6))
pitch = np.roll(tmp_notes[:, :, :88], aug, axis=-1)
tmp_notes = np.concatenate([pitch, tmp_notes[:, :, 88:]], -1)
yield tmp_notes, tmp_seq_len, tmp_label
else:
break
def get_iterator(self):
ds = tf.data.Dataset.from_generator(self.loader, (tf.float32, tf.int32, tf.int32))
ds = ds.shuffle(self.batch_size*2)
iterate = ds.make_initializable_iterator()
note, seq_len, label = iterate.get_next()
note.set_shape([None, None, sum(self.x_depth)])
seq_len.set_shape([None])
label.set_shape([None])
return iterate, note, seq_len, label
if __name__ == "__main__":
"""
For testing purposes
"""
import time
noteseq = load_noteseqs(["data/jsbvl.pkl", "data/nmdvl.pkl", "data/popvl.pkl"], [89, 33, 33])
it, note, seq_len, label = noteseq.get_iterator()
sess = tf.Session()
sess.run(it.initializer)
data = []
total = 0.0
tik = time.time()
while True:
try:
data.append(sess.run([note, seq_len, label]))
tok = time.time()
print(tok-tik)
total += tok-tik
tik = time.time()
except tf.errors.OutOfRangeError:
break
| [
"numpy.random.shuffle",
"numpy.roll",
"tensorflow.Session",
"numpy.ones",
"time.time",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"tensorflow.data.Dataset.from_generator",
"numpy.arange",
"itertools.chain.from_iterable",
"numpy.concatenate"
] | [((2706, 2718), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2716, 2718), True, 'import tensorflow as tf\n'), ((2793, 2804), 'time.time', 'time.time', ([], {}), '()\n', (2802, 2804), False, 'import time\n'), ((1110, 1130), 'numpy.random.shuffle', 'np.random.shuffle', (['Z'], {}), '(Z)\n', (1127, 1130), True, 'import numpy as np\n'), ((2041, 2118), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['self.loader', '(tf.float32, tf.int32, tf.int32)'], {}), '(self.loader, (tf.float32, tf.int32, tf.int32))\n', (2071, 2118), True, 'import tensorflow as tf\n'), ((654, 684), 'numpy.concatenate', 'np.concatenate', (['self.labels', '(0)'], {}), '(self.labels, 0)\n', (668, 684), True, 'import numpy as np\n'), ((760, 801), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['self.notes'], {}), '(self.notes)\n', (789, 801), False, 'import itertools\n'), ((2910, 2921), 'time.time', 'time.time', ([], {}), '()\n', (2919, 2921), False, 'import time\n'), ((2996, 3007), 'time.time', 'time.time', ([], {}), '()\n', (3005, 3007), False, 'import time\n'), ((1554, 1620), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['tmp_notes'], {'padding': '"""post"""', 'dtype': 'np.int32', 'value': '(-1)'}), "(tmp_notes, padding='post', dtype=np.int32, value=-1)\n", (1567, 1620), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((569, 583), 'numpy.ones', 'np.ones', (['[tmp]'], {}), '([tmp])\n', (576, 583), True, 'import numpy as np\n'), ((1760, 1803), 'numpy.roll', 'np.roll', (['tmp_notes[:, :, :88]', 'aug'], {'axis': '(-1)'}), '(tmp_notes[:, :, :88], aug, axis=-1)\n', (1767, 1803), True, 'import numpy as np\n'), ((1836, 1885), 'numpy.concatenate', 'np.concatenate', (['[pitch, tmp_notes[:, :, 88:]]', '(-1)'], {}), '([pitch, tmp_notes[:, :, 88:]], -1)\n', (1850, 1885), True, 'import numpy as np\n'), ((1714, 1730), 'numpy.arange', 'np.arange', (['(-5)', '(6)'], {}), '(-5, 6)\n', (1723, 1730), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import math
#subprocess.call('git clone https://github.com/mikgroup/sigpy.git')
#subprocess.call('pip install sigpy')
import sigpy as sp
import sigpy.mri
import matplotlib.animation as manimation
import time
# Simulation paramaters
Tpe = 0.1*1e-3 # Time for blipped PE
Tread = 1.0*1e-3 # Readout time in ms
Npe = 128
Nfreq = 128
t_offset = 0.0
t = []
kx = []
ky = []
k_line = np.linspace(-Nfreq/2.0,Nfreq/2.0,Nfreq+2)
ky_all = np.linspace(-Npe/2.0,Npe/2.0,Npe)
t_line = np.linspace(0, Tread,Nfreq+2)
for pe in range(Npe):
if pe%2 == 0:
if pe > 0:
kx = np.concatenate([kx, k_line])
else:
kx = k_line
else:
if pe > 0:
kx = np.concatenate([kx, -k_line])
else:
kx = -k_line
ky = np.concatenate([ky,ky_all[pe]*np.ones(k_line.shape,k_line.dtype)])
t = np.concatenate([t,t_line+t_offset])
t_offset += Tpe + Tread
ky = -ky
# Plot Kx,Ky
plt.figure()
plt.plot(kx, ky)
# Kx/Ky vs time
plt.figure()
plt.plot(t, kx)
plt.plot(t, ky)
# Define fat as the outer ring
sl_amps = [1.0,-1.0]
sl_scales = [[.6900, .920, .810], # white big
[.6624, .874, .780]] # gray big
sl_offsets = [[0., 0., 0],
[0., -.0184, 0]]
sl_angles = [[0, 0, 0],
[0, 0, 0]]
fat = sp.sim.phantom([256,256], sl_amps, sl_scales, sl_offsets, sl_angles,dtype=np.complex64)
plt.figure()
plt.imshow(np.abs(fat))
sl_amps = [0.2, 1., 1.2, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]
sl_scales = [[.6624, .874, .780], # gray big
[.1100, .310, .220], # right black
[.1600, .410, .280], # left black
[.2100, .250, .410], # gray center blob
[.0460, .046, .050],
[.0460, .046, .050],
[.0460, .046, .050], # left small dot
[.0230, .023, .020], # mid small dot
[.0230, .023, .020]]
sl_offsets = [[0., -.0184, 0],
[.22, 0., 0],
[-.22, 0., 0],
[0., .35, -.15],
[0., .1, .25],
[0., -.1, .25],
[-.08, -.605, 0],
[0., -.606, 0],
[.06, -.605, 0]]
sl_angles = [[0, 0, 0],
[-18, 0, 10],
[18, 0, 10],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]
# Get complex phantom
water = sp.sim.phantom([256,256], sl_amps, sl_scales, sl_offsets, sl_angles,dtype=np.complex64)
[x,y] = np.meshgrid(np.linspace(-0.5,0.5,256),np.linspace(-0.5,0.5,256))
x0 = 0.0
y0 = 0.4
wY = 0.3
wX = 0.2
offmap = np.exp( -((x-x0)**2/(wX**2) + (y-y0)**2/(wY**2))**2)
# Subtract off mean
offmap -= np.mean(offmap)
offmap /= np.max(offmap)
offmap = np.flipud(offmap)
water = np.flipud(water)
fat = np.flipud(fat)
# Create gradients as triangles
plt.figure()
plt.imshow(offmap)
plt.draw()
plt.colorbar()
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title='epi_without_fat', artist='KMJ',
comment='EPI simulation with off-resonance')
writer = FFMpegWriter(fps=10, metadata=metadata)
# Do this on GPU
device = sp.backend.Device(0)
xp = device.xp
kx = sp.backend.to_device(kx, device)
ky = sp.backend.to_device(ky, device)
t = sp.backend.to_device(t, device)
water = sp.backend.to_device(water, device)
fat = sp.backend.to_device(fat, device)
offmap = sp.backend.to_device(offmap, device)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(20, 10))
with writer.saving(fig, "epi_without_fat.mp4", 100):
for fmax in xp.linspace(-200.0,200.0,41):
with device:
t_start = time.time()
s = xp.zeros(kx.shape, xp.complex64)
img_est = xp.zeros(water.shape, xp.complex64)
# Now do the DFT
[x, y] = xp.meshgrid(xp.linspace(-0.5, 0.5, 256), xp.linspace(-0.5, 0.5, 256))
for pos in range(kx.shape[0]):
Gphase = xp.exp(1j*2.0*math.pi*(kx[pos]*x + ky[pos]*y))
Ophase = xp.exp(1j*2.0*math.pi*t[pos]*offmap*fmax)
s[pos] = xp.sum((water+0.0*fat*xp.exp(2j*math.pi*t[pos]*440))*Gphase*Ophase)
fr = 0.9*Nfreq/2.0
fw = 0.1*Nfreq/2.0
kr = xp.abs(kx[pos])
wx = 1. / (1. + xp.exp( (kr-fr)/fw))
kr = xp.abs(ky[pos])
wy = 1. / (1. + xp.exp( (kr-fr)/fw))
img_est += s[pos]*xp.conj(Gphase)*wx*wy
print(f'Took {time.time()-t_start}')
# Get Images
img_est_cpu = sp.backend.to_device(img_est,sp.cpu_device)
offmap_cpu = sp.backend.to_device(offmap,sp.cpu_device)
fmax_cpu = sp.backend.to_device(fmax,sp.cpu_device)
from mpl_toolkits.axes_grid1 import make_axes_locatable
def colorbar(mappable):
ax = mappable.axes
fig = ax.figure
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
return fig.colorbar(mappable, cax=cax)
img1 = ax1.imshow(fmax_cpu*offmap_cpu,cmap='Spectral')
img1.set_clim(-150,150)
colorbar(img1)
ax1.set_title('Off Resonance [Hz]')
ax1.axis('off')
img2 = ax2.imshow(np.abs(img_est_cpu),cmap='gray')
ax2.set_title('Estimated Image')
ax2.axis('off')
plt.tight_layout(h_pad=1)
plt.draw()
plt.pause(0.0001)
writer.grab_frame()
plt.show()
| [
"numpy.abs",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.exp",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.draw",
"numpy.max",
"numpy.linspace",
"sigpy.backend.Device",
"matplotlib.pyplot.pause",
"matplot... | [((451, 500), 'numpy.linspace', 'np.linspace', (['(-Nfreq / 2.0)', '(Nfreq / 2.0)', '(Nfreq + 2)'], {}), '(-Nfreq / 2.0, Nfreq / 2.0, Nfreq + 2)\n', (462, 500), True, 'import numpy as np\n'), ((503, 542), 'numpy.linspace', 'np.linspace', (['(-Npe / 2.0)', '(Npe / 2.0)', 'Npe'], {}), '(-Npe / 2.0, Npe / 2.0, Npe)\n', (514, 542), True, 'import numpy as np\n'), ((547, 579), 'numpy.linspace', 'np.linspace', (['(0)', 'Tread', '(Nfreq + 2)'], {}), '(0, Tread, Nfreq + 2)\n', (558, 579), True, 'import numpy as np\n'), ((1030, 1042), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1040, 1042), True, 'import matplotlib.pyplot as plt\n'), ((1044, 1060), 'matplotlib.pyplot.plot', 'plt.plot', (['kx', 'ky'], {}), '(kx, ky)\n', (1052, 1060), True, 'import matplotlib.pyplot as plt\n'), ((1081, 1093), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1091, 1093), True, 'import matplotlib.pyplot as plt\n'), ((1095, 1110), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'kx'], {}), '(t, kx)\n', (1103, 1110), True, 'import matplotlib.pyplot as plt\n'), ((1112, 1127), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'ky'], {}), '(t, ky)\n', (1120, 1127), True, 'import matplotlib.pyplot as plt\n'), ((1396, 1490), 'sigpy.sim.phantom', 'sp.sim.phantom', (['[256, 256]', 'sl_amps', 'sl_scales', 'sl_offsets', 'sl_angles'], {'dtype': 'np.complex64'}), '([256, 256], sl_amps, sl_scales, sl_offsets, sl_angles, dtype\n =np.complex64)\n', (1410, 1490), True, 'import sigpy as sp\n'), ((1489, 1501), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1499, 1501), True, 'import matplotlib.pyplot as plt\n'), ((2550, 2644), 'sigpy.sim.phantom', 'sp.sim.phantom', (['[256, 256]', 'sl_amps', 'sl_scales', 'sl_offsets', 'sl_angles'], {'dtype': 'np.complex64'}), '([256, 256], sl_amps, sl_scales, sl_offsets, sl_angles, dtype\n =np.complex64)\n', (2564, 2644), True, 'import sigpy as sp\n'), ((2762, 2827), 'numpy.exp', 'np.exp', (['(-((x - x0) ** 2 / wX ** 2 + (y - y0) ** 2 / wY ** 2) ** 2)'], {}), '(-((x - x0) ** 2 / wX ** 2 + (y - y0) ** 2 / wY ** 2) ** 2)\n', (2768, 2827), True, 'import numpy as np\n'), ((2849, 2864), 'numpy.mean', 'np.mean', (['offmap'], {}), '(offmap)\n', (2856, 2864), True, 'import numpy as np\n'), ((2876, 2890), 'numpy.max', 'np.max', (['offmap'], {}), '(offmap)\n', (2882, 2890), True, 'import numpy as np\n'), ((2903, 2920), 'numpy.flipud', 'np.flipud', (['offmap'], {}), '(offmap)\n', (2912, 2920), True, 'import numpy as np\n'), ((2930, 2946), 'numpy.flipud', 'np.flipud', (['water'], {}), '(water)\n', (2939, 2946), True, 'import numpy as np\n'), ((2954, 2968), 'numpy.flipud', 'np.flipud', (['fat'], {}), '(fat)\n', (2963, 2968), True, 'import numpy as np\n'), ((3005, 3017), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3015, 3017), True, 'import matplotlib.pyplot as plt\n'), ((3019, 3037), 'matplotlib.pyplot.imshow', 'plt.imshow', (['offmap'], {}), '(offmap)\n', (3029, 3037), True, 'import matplotlib.pyplot as plt\n'), ((3039, 3049), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (3047, 3049), True, 'import matplotlib.pyplot as plt\n'), ((3051, 3065), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3063, 3065), True, 'import matplotlib.pyplot as plt\n'), ((3311, 3331), 'sigpy.backend.Device', 'sp.backend.Device', (['(0)'], {}), '(0)\n', (3328, 3331), True, 'import sigpy as sp\n'), ((3354, 3386), 'sigpy.backend.to_device', 'sp.backend.to_device', (['kx', 'device'], {}), '(kx, device)\n', (3374, 3386), True, 'import sigpy as sp\n'), ((3393, 3425), 'sigpy.backend.to_device', 'sp.backend.to_device', (['ky', 'device'], {}), '(ky, device)\n', (3413, 3425), True, 'import sigpy as sp\n'), ((3431, 3462), 'sigpy.backend.to_device', 'sp.backend.to_device', (['t', 'device'], {}), '(t, device)\n', (3451, 3462), True, 'import sigpy as sp\n'), ((3472, 3507), 'sigpy.backend.to_device', 'sp.backend.to_device', (['water', 'device'], {}), '(water, device)\n', (3492, 3507), True, 'import sigpy as sp\n'), ((3515, 3548), 'sigpy.backend.to_device', 'sp.backend.to_device', (['fat', 'device'], {}), '(fat, device)\n', (3535, 3548), True, 'import sigpy as sp\n'), ((3559, 3595), 'sigpy.backend.to_device', 'sp.backend.to_device', (['offmap', 'device'], {}), '(offmap, device)\n', (3579, 3595), True, 'import sigpy as sp\n'), ((3617, 3656), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)', 'figsize': '(20, 10)'}), '(ncols=2, figsize=(20, 10))\n', (3629, 3656), True, 'import matplotlib.pyplot as plt\n'), ((5686, 5696), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5694, 5696), True, 'import matplotlib.pyplot as plt\n'), ((934, 972), 'numpy.concatenate', 'np.concatenate', (['[t, t_line + t_offset]'], {}), '([t, t_line + t_offset])\n', (948, 972), True, 'import numpy as np\n'), ((1514, 1525), 'numpy.abs', 'np.abs', (['fat'], {}), '(fat)\n', (1520, 1525), True, 'import numpy as np\n'), ((2659, 2686), 'numpy.linspace', 'np.linspace', (['(-0.5)', '(0.5)', '(256)'], {}), '(-0.5, 0.5, 256)\n', (2670, 2686), True, 'import numpy as np\n'), ((2685, 2712), 'numpy.linspace', 'np.linspace', (['(-0.5)', '(0.5)', '(256)'], {}), '(-0.5, 0.5, 256)\n', (2696, 2712), True, 'import numpy as np\n'), ((4745, 4789), 'sigpy.backend.to_device', 'sp.backend.to_device', (['img_est', 'sp.cpu_device'], {}), '(img_est, sp.cpu_device)\n', (4765, 4789), True, 'import sigpy as sp\n'), ((4811, 4854), 'sigpy.backend.to_device', 'sp.backend.to_device', (['offmap', 'sp.cpu_device'], {}), '(offmap, sp.cpu_device)\n', (4831, 4854), True, 'import sigpy as sp\n'), ((4874, 4915), 'sigpy.backend.to_device', 'sp.backend.to_device', (['fmax', 'sp.cpu_device'], {}), '(fmax, sp.cpu_device)\n', (4894, 4915), True, 'import sigpy as sp\n'), ((5579, 5604), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'h_pad': '(1)'}), '(h_pad=1)\n', (5595, 5604), True, 'import matplotlib.pyplot as plt\n'), ((5614, 5624), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (5622, 5624), True, 'import matplotlib.pyplot as plt\n'), ((5634, 5651), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.0001)'], {}), '(0.0001)\n', (5643, 5651), True, 'import matplotlib.pyplot as plt\n'), ((659, 687), 'numpy.concatenate', 'np.concatenate', (['[kx, k_line]'], {}), '([kx, k_line])\n', (673, 687), True, 'import numpy as np\n'), ((777, 806), 'numpy.concatenate', 'np.concatenate', (['[kx, -k_line]'], {}), '([kx, -k_line])\n', (791, 806), True, 'import numpy as np\n'), ((3809, 3820), 'time.time', 'time.time', ([], {}), '()\n', (3818, 3820), False, 'import time\n'), ((5099, 5122), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (5118, 5122), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((5468, 5487), 'numpy.abs', 'np.abs', (['img_est_cpu'], {}), '(img_est_cpu)\n', (5474, 5487), True, 'import numpy as np\n'), ((888, 923), 'numpy.ones', 'np.ones', (['k_line.shape', 'k_line.dtype'], {}), '(k_line.shape, k_line.dtype)\n', (895, 923), True, 'import numpy as np\n'), ((4675, 4686), 'time.time', 'time.time', ([], {}), '()\n', (4684, 4686), False, 'import time\n')] |
import pandas as pd
import numpy as np
#import re
import argparse
import sys
import pickle
from cddm_data_simulation import ddm
from cddm_data_simulation import ddm_flexbound
from cddm_data_simulation import levy_flexbound
from cddm_data_simulation import ornstein_uhlenbeck
from cddm_data_simulation import full_ddm
from cddm_data_simulation import ddm_sdv
#from cddm_data_simulation import ddm_flexbound_pre
from cddm_data_simulation import race_model
from cddm_data_simulation import lca
from cddm_data_simulation import ddm_flexbound_seq2
from cddm_data_simulation import ddm_flexbound_par2
from cddm_data_simulation import ddm_flexbound_mic2
import cddm_data_simulation as cds
import boundary_functions as bf
def bin_simulator_output_pointwise(out = [0, 0],
bin_dt = 0.04,
nbins = 0): # ['v', 'a', 'w', 'ndt', 'angle']
out_copy = deepcopy(out)
# Generate bins
if nbins == 0:
nbins = int(out[2]['max_t'] / bin_dt)
bins = np.zeros(nbins + 1)
bins[:nbins] = np.linspace(0, out[2]['max_t'], nbins)
bins[nbins] = np.inf
else:
bins = np.zeros(nbins + 1)
bins[:nbins] = np.linspace(0, out[2]['max_t'], nbins)
bins[nbins] = np.inf
cnt = 0
counts = np.zeros( (nbins, len(out[2]['possible_choices']) ) )
#data_out = pd.DataFrame(np.zeros(( columns = ['rt', 'response'])
out_copy_tmp = deepcopy(out_copy)
for i in range(out_copy[0].shape[0]):
for j in range(1, bins.shape[0], 1):
if out_copy[0][i] > bins[j - 1] and out_copy[0][i] < bins[j]:
out_copy_tmp[0][i] = j - 1
out_copy = out_copy_tmp
#np.array(out_copy[0] / (bins[1] - bins[0])).astype(np.int32)
out_copy[1][out_copy[1] == -1] = 0
return np.concatenate([out_copy[0], out_copy[1]], axis = -1).astype(np.int32)
def bin_simulator_output(out = None,
bin_dt = 0.04,
nbins = 0,
max_t = -1,
freq_cnt = False): # ['v', 'a', 'w', 'ndt', 'angle']
if max_t == -1:
max_t = out[2]['max_t']
# Generate bins
if nbins == 0:
nbins = int(max_t / bin_dt)
bins = np.zeros(nbins + 1)
bins[:nbins] = np.linspace(0, max_t, nbins)
bins[nbins] = np.inf
else:
bins = np.zeros(nbins + 1)
bins[:nbins] = np.linspace(0, max_t, nbins)
bins[nbins] = np.inf
cnt = 0
counts = np.zeros( (nbins, len(out[2]['possible_choices']) ) )
for choice in out[2]['possible_choices']:
counts[:, cnt] = np.histogram(out[0][out[1] == choice], bins = bins)[0]
cnt += 1
if freq_cnt == False:
counts = counts / out[2]['n_samples']
return counts
def bin_arbitrary_fptd(out = None,
bin_dt = 0.04,
nbins = 256,
nchoices = 2,
choice_codes = [-1.0, 1.0],
max_t = 10.0): # ['v', 'a', 'w', 'ndt', 'angle']
# Generate bins
if nbins == 0:
nbins = int(max_t / bin_dt)
bins = np.zeros(nbins + 1)
bins[:nbins] = np.linspace(0, max_t, nbins)
bins[nbins] = np.inf
else:
bins = np.zeros(nbins + 1)
bins[:nbins] = np.linspace(0, max_t, nbins)
bins[nbins] = np.inf
cnt = 0
counts = np.zeros( (nbins, nchoices) )
for choice in choice_codes:
counts[:, cnt] = np.histogram(out[:, 0][out[:, 1] == choice], bins = bins)[0]
print(np.histogram(out[:, 0][out[:, 1] == choice], bins = bins)[1])
cnt += 1
return counts
def simulator(theta,
model = 'angle',
n_samples = 1000,
n_trials = 1,
delta_t = 0.001,
max_t = 20,
cartoon = False,
bin_dim = None,
bin_pointwise = False):
# Useful for sbi
if type(theta) == list:
print('theta is supplied as list --> simulator assumes n_trials = 1')
theta = np.asarray(theta).astype(np.float32)
elif type(theta) == np.ndarray:
theta = theta.astype(np.float32)
else:
theta = theta.numpy()
if len(theta.shape) < 2:
theta = np.expand_dims(theta, axis = 0)
if theta.shape[0] != n_trials:
print('ERROR number of trials does not match first dimension of theta array')
return
# 2 choice models
if cartoon:
s = 0.0
else:
s = 1.0
if model == 'test':
x = ddm_flexbound(v = theta[:, 0],
a = theta[:, 1],
w = theta[:, 2],
ndt = theta[:, 3],
s = s,
n_samples = n_samples,
n_trials = n_trials,
delta_t = delta_t,
boundary_params = {},
boundary_fun = bf.constant,
boundary_multiplicative = True,
max_t = max_t)
if model == 'ddm' or model == 'ddm_elife' or model == 'ddm_analytic':
x = ddm_flexbound(v = theta[:, 0],
a = theta[:, 1],
w = theta[:, 2],
ndt = theta[:, 3],
s = s,
n_samples = n_samples,
n_trials = 1,
delta_t = delta_t,
boundary_params = {},
boundary_fun = bf.constant,
boundary_multiplicative = True,
max_t = max_t)
if model == 'angle' or model == 'angle2':
x = ddm_flexbound(v = theta[:, 0],
a = theta[:, 1],
w = theta[:, 2],
ndt = theta[:, 3],
s = s,
boundary_fun = bf.angle,
boundary_multiplicative = False,
boundary_params = {'theta': theta[:, 4]},
delta_t = delta_t,
n_samples = n_samples,
n_trials = n_trials,
max_t = max_t)
if model == 'weibull_cdf' or model == 'weibull_cdf2' or model == 'weibull_cdf_ext' or model == 'weibull_cdf_concave' or model == 'weibull':
x = ddm_flexbound(v = theta[:, 0],
a = theta[:, 1],
w = theta[:, 2],
ndt = theta[:, 3],
s = s,
boundary_fun = bf.weibull_cdf,
boundary_multiplicative = True,
boundary_params = {'alpha': theta[:, 4], 'beta': theta[:, 5]},
delta_t = delta_t,
n_samples = n_samples,
n_trials = n_trials,
max_t = max_t)
if model == 'levy':
x = levy_flexbound(v = theta[:, 0],
a = theta[:, 1],
w = theta[:, 2],
alpha_diff = theta[:, 3],
ndt = theta[:, 4],
s = s,
boundary_fun = bf.constant,
boundary_multiplicative = True,
boundary_params = {},
delta_t = delta_t,
n_samples = n_samples,
n_trials = n_trials,
max_t = max_t)
if model == 'full_ddm' or model == 'full_ddm2':
x = full_ddm(v = theta[:, 0],
a = theta[:, 1],
w = theta[:, 2],
ndt = theta[:, 3],
dw = theta[:, 4],
sdv = theta[:, 5],
dndt = theta[:, 6],
s = s,
boundary_fun = bf.constant,
boundary_multiplicative = True,
boundary_params = {},
delta_t = delta_t,
n_samples = n_samples,
n_trials = n_trials,
max_t = max_t)
if model == 'ddm_sdv':
x = ddm_sdv(v = theta[:, 0],
a = theta[:, 1],
w = theta[:, 2],
ndt = theta[:, 3],
sdv = theta[:, 4],
s = s,
boundary_fun = bf.constant,
boundary_multiplicative = True,
boundary_params = {},
delta_t = delta_t,
n_samples = n_samples,
n_trials = n_trials,
max_t = max_t)
if model == 'ornstein' or model == 'ornstein_uhlenbeck':
x = ornstein_uhlenbeck(v = theta[:, 0],
a = theta[:, 1],
w = theta[:, 2],
g = theta[:, 3],
ndt = theta[:, 4],
s = s,
boundary_fun = bf.constant,
boundary_multiplicative = True,
boundary_params = {},
delta_t = delta_t,
n_samples = n_samples,
n_trials = n_trials,
max_t = max_t)
# 3 Choice models
if cartoon:
s = np.tile(np.array([0.0, 0.0, 0.0], dtype = np.float32), (n_trials, 1))
else:
s = np.tile(np.array([1.0, 1.0, 1.0], dtype = np.float32), (n_trials, 1))
if model == 'race_model_3' or model == 'race_3':
x = race_model(v = theta[:, :3],
a = theta[:, [3]],
w = theta[:, 4:7],
ndt = theta[:, [7]],
s = s,
boundary_fun = bf.constant,
boundary_multiplicative = True,
boundary_params = {},
delta_t = delta_t,
n_samples = n_samples,
n_trials = n_trials,
max_t = max_t)
if model == 'lca_3':
x = lca(v = theta[:, :3],
a = theta[:, [4]],
w = theta[:, 4:7],
g = theta[:, [7]],
b = theta[:, [8]],
ndt = theta[:, [9]],
s = s,
boundary_fun = bf.constant,
boundary_multiplicative = True,
boundary_params = {},
delta_t = delta_t,
n_samples = n_samples,
n_trials = n_trials,
max_t = max_t)
# 4 Choice models
if cartoon:
s = np.tile(np.array([0.0, 0.0, 0.0, 0.0], dtype = np.float32), (n_trials, 1))
else:
s = np.tile(np.array([1.0, 1.0, 1.0, 1.0], dtype = np.float32), (n_trials, 1))
if model == 'race_model_4' or model == 'race_4':
x = race_model(v = theta[:, :4],
a = theta[:, [4]],
w = theta[:, 5:9],
ndt = theta[:, [9]],
s = s,
boundary_fun = bf.constant,
boundary_multiplicative = True,
boundary_params = {},
delta_t = delta_t,
n_samples = n_samples,
n_trials = n_trials,
max_t = max_t)
if model == 'lca_4':
x = lca(v = theta[:, :4],
a = theta[:, [4]],
w = theta[:, 5:9],
g = theta[:, [9]],
b = theta[:, [10]],
ndt = theta[:, [11]],
s = s,
boundary_fun = bf.constant,
boundary_multiplicative = True,
boundary_params = {},
delta_t = delta_t,
n_samples = n_samples,
n_trials = n_trials,
max_t = max_t)
# Seq / Parallel models (4 choice)
if cartoon:
s = 0.0
else:
s = 1.0
if model == 'ddm_seq2':
x = ddm_flexbound_seq2(v_h = theta[:, 0],
v_l_1 = theta[:, 1],
v_l_2 = theta[:, 2],
a = theta[:, 3],
w_h = theta[:, 4],
w_l_1 = theta[:, 5],
w_l_2 = theta[:, 6],
ndt = theta[:, 7],
s = s,
n_samples = n_samples,
n_trials = n_trials,
delta_t = delta_t,
max_t = max_t,
boundary_fun = bf.constant,
boundary_multiplicative = True,
boundary_params = {})
if model == 'ddm_par2':
x = ddm_flexbound_par2(v_h = theta[:, 0],
v_l_1 = theta[:, 1],
v_l_2 = theta[:, 2],
a = theta[:, 3],
w_h = theta[:, 4],
w_l_1 = theta[:, 5],
w_l_2 = theta[:, 6],
ndt = theta[:, 7],
s = s,
n_samples = n_samples,
n_trials = n_trials,
delta_t = delta_t,
max_t = max_t,
boundary_fun = bf.constant,
boundary_multiplicative = True,
boundary_params = {})
if model == 'ddm_mic2':
x = ddm_flexbound_mic2(v_h = theta[:, 0],
v_l_1 = theta[:, 1],
v_l_2 = theta[:, 2],
a = theta[:, 3],
w_h = theta[:, 4],
w_l_1 = theta[:, 5],
w_l_2 = theta[:, 6],
d = theta[:, 7],
ndt = theta[:, 8],
s = s,
n_samples = n_samples,
n_trials = n_trials,
delta_t = delta_t,
max_t = max_t,
boundary_fun = bf.constant,
boundary_multiplicative = True,
boundary_params = {})
if n_trials == 1:
#print('passing through')
#print(x)
x = (np.squeeze(x[0], axis = 1), np.squeeze(x[1], axis = 1), x[2])
if bin_dim == 0 or bin_dim == None:
return x
elif bin_dim > 0 and not bin_pointwise and n_trials == 1:
binned_out = bin_simulator_output(x, nbins = bin_dim)
return (binned_out, x[2])
elif bin_dim > 0 and bin_pointwise and n_trials == 1:
binned_out = bin_simulator_output_pointwise(x, nbins = bin_dim)
return (np.expand_dims(binned_out[:,0], axis = 1), np.expand_dims(binned_out[:, 1], axis = 1), x[2])
elif bin_dim > 0 and n_trials > 1:
return 'currently binned outputs not implemented for multi-trial simulators'
elif bin_dim == -1:
return 'invalid bin_dim'
| [
"cddm_data_simulation.ornstein_uhlenbeck",
"cddm_data_simulation.ddm_flexbound_mic2",
"numpy.concatenate",
"cddm_data_simulation.levy_flexbound",
"cddm_data_simulation.lca",
"numpy.asarray",
"numpy.zeros",
"numpy.expand_dims",
"numpy.histogram",
"cddm_data_simulation.full_ddm",
"cddm_data_simula... | [((3449, 3476), 'numpy.zeros', 'np.zeros', (['(nbins, nchoices)'], {}), '((nbins, nchoices))\n', (3457, 3476), True, 'import numpy as np\n'), ((1027, 1046), 'numpy.zeros', 'np.zeros', (['(nbins + 1)'], {}), '(nbins + 1)\n', (1035, 1046), True, 'import numpy as np\n'), ((1070, 1108), 'numpy.linspace', 'np.linspace', (['(0)', "out[2]['max_t']", 'nbins'], {}), "(0, out[2]['max_t'], nbins)\n", (1081, 1108), True, 'import numpy as np\n'), ((1165, 1184), 'numpy.zeros', 'np.zeros', (['(nbins + 1)'], {}), '(nbins + 1)\n', (1173, 1184), True, 'import numpy as np\n'), ((1208, 1246), 'numpy.linspace', 'np.linspace', (['(0)', "out[2]['max_t']", 'nbins'], {}), "(0, out[2]['max_t'], nbins)\n", (1219, 1246), True, 'import numpy as np\n'), ((2278, 2297), 'numpy.zeros', 'np.zeros', (['(nbins + 1)'], {}), '(nbins + 1)\n', (2286, 2297), True, 'import numpy as np\n'), ((2321, 2349), 'numpy.linspace', 'np.linspace', (['(0)', 'max_t', 'nbins'], {}), '(0, max_t, nbins)\n', (2332, 2349), True, 'import numpy as np\n'), ((2406, 2425), 'numpy.zeros', 'np.zeros', (['(nbins + 1)'], {}), '(nbins + 1)\n', (2414, 2425), True, 'import numpy as np\n'), ((2449, 2477), 'numpy.linspace', 'np.linspace', (['(0)', 'max_t', 'nbins'], {}), '(0, max_t, nbins)\n', (2460, 2477), True, 'import numpy as np\n'), ((3192, 3211), 'numpy.zeros', 'np.zeros', (['(nbins + 1)'], {}), '(nbins + 1)\n', (3200, 3211), True, 'import numpy as np\n'), ((3235, 3263), 'numpy.linspace', 'np.linspace', (['(0)', 'max_t', 'nbins'], {}), '(0, max_t, nbins)\n', (3246, 3263), True, 'import numpy as np\n'), ((3322, 3341), 'numpy.zeros', 'np.zeros', (['(nbins + 1)'], {}), '(nbins + 1)\n', (3330, 3341), True, 'import numpy as np\n'), ((3365, 3393), 'numpy.linspace', 'np.linspace', (['(0)', 'max_t', 'nbins'], {}), '(0, max_t, nbins)\n', (3376, 3393), True, 'import numpy as np\n'), ((4337, 4366), 'numpy.expand_dims', 'np.expand_dims', (['theta'], {'axis': '(0)'}), '(theta, axis=0)\n', (4351, 4366), True, 'import numpy as np\n'), ((4625, 4864), 'cddm_data_simulation.ddm_flexbound', 'ddm_flexbound', ([], {'v': 'theta[:, 0]', 'a': 'theta[:, 1]', 'w': 'theta[:, 2]', 'ndt': 'theta[:, 3]', 's': 's', 'n_samples': 'n_samples', 'n_trials': 'n_trials', 'delta_t': 'delta_t', 'boundary_params': '{}', 'boundary_fun': 'bf.constant', 'boundary_multiplicative': '(True)', 'max_t': 'max_t'}), '(v=theta[:, 0], a=theta[:, 1], w=theta[:, 2], ndt=theta[:, 3],\n s=s, n_samples=n_samples, n_trials=n_trials, delta_t=delta_t,\n boundary_params={}, boundary_fun=bf.constant, boundary_multiplicative=\n True, max_t=max_t)\n', (4638, 4864), False, 'from cddm_data_simulation import ddm_flexbound\n'), ((5254, 5482), 'cddm_data_simulation.ddm_flexbound', 'ddm_flexbound', ([], {'v': 'theta[:, 0]', 'a': 'theta[:, 1]', 'w': 'theta[:, 2]', 'ndt': 'theta[:, 3]', 's': 's', 'n_samples': 'n_samples', 'n_trials': '(1)', 'delta_t': 'delta_t', 'boundary_params': '{}', 'boundary_fun': 'bf.constant', 'boundary_multiplicative': '(True)', 'max_t': 'max_t'}), '(v=theta[:, 0], a=theta[:, 1], w=theta[:, 2], ndt=theta[:, 3],\n s=s, n_samples=n_samples, n_trials=1, delta_t=delta_t, boundary_params=\n {}, boundary_fun=bf.constant, boundary_multiplicative=True, max_t=max_t)\n', (5267, 5482), False, 'from cddm_data_simulation import ddm_flexbound\n'), ((5848, 6105), 'cddm_data_simulation.ddm_flexbound', 'ddm_flexbound', ([], {'v': 'theta[:, 0]', 'a': 'theta[:, 1]', 'w': 'theta[:, 2]', 'ndt': 'theta[:, 3]', 's': 's', 'boundary_fun': 'bf.angle', 'boundary_multiplicative': '(False)', 'boundary_params': "{'theta': theta[:, 4]}", 'delta_t': 'delta_t', 'n_samples': 'n_samples', 'n_trials': 'n_trials', 'max_t': 'max_t'}), "(v=theta[:, 0], a=theta[:, 1], w=theta[:, 2], ndt=theta[:, 3],\n s=s, boundary_fun=bf.angle, boundary_multiplicative=False,\n boundary_params={'theta': theta[:, 4]}, delta_t=delta_t, n_samples=\n n_samples, n_trials=n_trials, max_t=max_t)\n", (5861, 6105), False, 'from cddm_data_simulation import ddm_flexbound\n'), ((6569, 6852), 'cddm_data_simulation.ddm_flexbound', 'ddm_flexbound', ([], {'v': 'theta[:, 0]', 'a': 'theta[:, 1]', 'w': 'theta[:, 2]', 'ndt': 'theta[:, 3]', 's': 's', 'boundary_fun': 'bf.weibull_cdf', 'boundary_multiplicative': '(True)', 'boundary_params': "{'alpha': theta[:, 4], 'beta': theta[:, 5]}", 'delta_t': 'delta_t', 'n_samples': 'n_samples', 'n_trials': 'n_trials', 'max_t': 'max_t'}), "(v=theta[:, 0], a=theta[:, 1], w=theta[:, 2], ndt=theta[:, 3],\n s=s, boundary_fun=bf.weibull_cdf, boundary_multiplicative=True,\n boundary_params={'alpha': theta[:, 4], 'beta': theta[:, 5]}, delta_t=\n delta_t, n_samples=n_samples, n_trials=n_trials, max_t=max_t)\n", (6582, 6852), False, 'from cddm_data_simulation import ddm_flexbound\n'), ((7198, 7462), 'cddm_data_simulation.levy_flexbound', 'levy_flexbound', ([], {'v': 'theta[:, 0]', 'a': 'theta[:, 1]', 'w': 'theta[:, 2]', 'alpha_diff': 'theta[:, 3]', 'ndt': 'theta[:, 4]', 's': 's', 'boundary_fun': 'bf.constant', 'boundary_multiplicative': '(True)', 'boundary_params': '{}', 'delta_t': 'delta_t', 'n_samples': 'n_samples', 'n_trials': 'n_trials', 'max_t': 'max_t'}), '(v=theta[:, 0], a=theta[:, 1], w=theta[:, 2], alpha_diff=\n theta[:, 3], ndt=theta[:, 4], s=s, boundary_fun=bf.constant,\n boundary_multiplicative=True, boundary_params={}, delta_t=delta_t,\n n_samples=n_samples, n_trials=n_trials, max_t=max_t)\n', (7212, 7462), False, 'from cddm_data_simulation import levy_flexbound\n'), ((7876, 8163), 'cddm_data_simulation.full_ddm', 'full_ddm', ([], {'v': 'theta[:, 0]', 'a': 'theta[:, 1]', 'w': 'theta[:, 2]', 'ndt': 'theta[:, 3]', 'dw': 'theta[:, 4]', 'sdv': 'theta[:, 5]', 'dndt': 'theta[:, 6]', 's': 's', 'boundary_fun': 'bf.constant', 'boundary_multiplicative': '(True)', 'boundary_params': '{}', 'delta_t': 'delta_t', 'n_samples': 'n_samples', 'n_trials': 'n_trials', 'max_t': 'max_t'}), '(v=theta[:, 0], a=theta[:, 1], w=theta[:, 2], ndt=theta[:, 3], dw=\n theta[:, 4], sdv=theta[:, 5], dndt=theta[:, 6], s=s, boundary_fun=bf.\n constant, boundary_multiplicative=True, boundary_params={}, delta_t=\n delta_t, n_samples=n_samples, n_trials=n_trials, max_t=max_t)\n', (7884, 8163), False, 'from cddm_data_simulation import full_ddm\n'), ((8521, 8772), 'cddm_data_simulation.ddm_sdv', 'ddm_sdv', ([], {'v': 'theta[:, 0]', 'a': 'theta[:, 1]', 'w': 'theta[:, 2]', 'ndt': 'theta[:, 3]', 'sdv': 'theta[:, 4]', 's': 's', 'boundary_fun': 'bf.constant', 'boundary_multiplicative': '(True)', 'boundary_params': '{}', 'delta_t': 'delta_t', 'n_samples': 'n_samples', 'n_trials': 'n_trials', 'max_t': 'max_t'}), '(v=theta[:, 0], a=theta[:, 1], w=theta[:, 2], ndt=theta[:, 3], sdv=\n theta[:, 4], s=s, boundary_fun=bf.constant, boundary_multiplicative=\n True, boundary_params={}, delta_t=delta_t, n_samples=n_samples,\n n_trials=n_trials, max_t=max_t)\n', (8528, 8772), False, 'from cddm_data_simulation import ddm_sdv\n'), ((9111, 9370), 'cddm_data_simulation.ornstein_uhlenbeck', 'ornstein_uhlenbeck', ([], {'v': 'theta[:, 0]', 'a': 'theta[:, 1]', 'w': 'theta[:, 2]', 'g': 'theta[:, 3]', 'ndt': 'theta[:, 4]', 's': 's', 'boundary_fun': 'bf.constant', 'boundary_multiplicative': '(True)', 'boundary_params': '{}', 'delta_t': 'delta_t', 'n_samples': 'n_samples', 'n_trials': 'n_trials', 'max_t': 'max_t'}), '(v=theta[:, 0], a=theta[:, 1], w=theta[:, 2], g=theta[:, \n 3], ndt=theta[:, 4], s=s, boundary_fun=bf.constant,\n boundary_multiplicative=True, boundary_params={}, delta_t=delta_t,\n n_samples=n_samples, n_trials=n_trials, max_t=max_t)\n', (9129, 9370), False, 'from cddm_data_simulation import ornstein_uhlenbeck\n'), ((10039, 10283), 'cddm_data_simulation.race_model', 'race_model', ([], {'v': 'theta[:, :3]', 'a': 'theta[:, [3]]', 'w': 'theta[:, 4:7]', 'ndt': 'theta[:, [7]]', 's': 's', 'boundary_fun': 'bf.constant', 'boundary_multiplicative': '(True)', 'boundary_params': '{}', 'delta_t': 'delta_t', 'n_samples': 'n_samples', 'n_trials': 'n_trials', 'max_t': 'max_t'}), '(v=theta[:, :3], a=theta[:, [3]], w=theta[:, 4:7], ndt=theta[:, [\n 7]], s=s, boundary_fun=bf.constant, boundary_multiplicative=True,\n boundary_params={}, delta_t=delta_t, n_samples=n_samples, n_trials=\n n_trials, max_t=max_t)\n', (10049, 10283), False, 'from cddm_data_simulation import race_model\n'), ((10593, 10863), 'cddm_data_simulation.lca', 'lca', ([], {'v': 'theta[:, :3]', 'a': 'theta[:, [4]]', 'w': 'theta[:, 4:7]', 'g': 'theta[:, [7]]', 'b': 'theta[:, [8]]', 'ndt': 'theta[:, [9]]', 's': 's', 'boundary_fun': 'bf.constant', 'boundary_multiplicative': '(True)', 'boundary_params': '{}', 'delta_t': 'delta_t', 'n_samples': 'n_samples', 'n_trials': 'n_trials', 'max_t': 'max_t'}), '(v=theta[:, :3], a=theta[:, [4]], w=theta[:, 4:7], g=theta[:, [7]], b=\n theta[:, [8]], ndt=theta[:, [9]], s=s, boundary_fun=bf.constant,\n boundary_multiplicative=True, boundary_params={}, delta_t=delta_t,\n n_samples=n_samples, n_trials=n_trials, max_t=max_t)\n', (10596, 10863), False, 'from cddm_data_simulation import lca\n'), ((11377, 11621), 'cddm_data_simulation.race_model', 'race_model', ([], {'v': 'theta[:, :4]', 'a': 'theta[:, [4]]', 'w': 'theta[:, 5:9]', 'ndt': 'theta[:, [9]]', 's': 's', 'boundary_fun': 'bf.constant', 'boundary_multiplicative': '(True)', 'boundary_params': '{}', 'delta_t': 'delta_t', 'n_samples': 'n_samples', 'n_trials': 'n_trials', 'max_t': 'max_t'}), '(v=theta[:, :4], a=theta[:, [4]], w=theta[:, 5:9], ndt=theta[:, [\n 9]], s=s, boundary_fun=bf.constant, boundary_multiplicative=True,\n boundary_params={}, delta_t=delta_t, n_samples=n_samples, n_trials=\n n_trials, max_t=max_t)\n', (11387, 11621), False, 'from cddm_data_simulation import race_model\n'), ((11931, 12203), 'cddm_data_simulation.lca', 'lca', ([], {'v': 'theta[:, :4]', 'a': 'theta[:, [4]]', 'w': 'theta[:, 5:9]', 'g': 'theta[:, [9]]', 'b': 'theta[:, [10]]', 'ndt': 'theta[:, [11]]', 's': 's', 'boundary_fun': 'bf.constant', 'boundary_multiplicative': '(True)', 'boundary_params': '{}', 'delta_t': 'delta_t', 'n_samples': 'n_samples', 'n_trials': 'n_trials', 'max_t': 'max_t'}), '(v=theta[:, :4], a=theta[:, [4]], w=theta[:, 5:9], g=theta[:, [9]], b=\n theta[:, [10]], ndt=theta[:, [11]], s=s, boundary_fun=bf.constant,\n boundary_multiplicative=True, boundary_params={}, delta_t=delta_t,\n n_samples=n_samples, n_trials=n_trials, max_t=max_t)\n', (11934, 12203), False, 'from cddm_data_simulation import lca\n'), ((12575, 12905), 'cddm_data_simulation.ddm_flexbound_seq2', 'ddm_flexbound_seq2', ([], {'v_h': 'theta[:, 0]', 'v_l_1': 'theta[:, 1]', 'v_l_2': 'theta[:, 2]', 'a': 'theta[:, 3]', 'w_h': 'theta[:, 4]', 'w_l_1': 'theta[:, 5]', 'w_l_2': 'theta[:, 6]', 'ndt': 'theta[:, 7]', 's': 's', 'n_samples': 'n_samples', 'n_trials': 'n_trials', 'delta_t': 'delta_t', 'max_t': 'max_t', 'boundary_fun': 'bf.constant', 'boundary_multiplicative': '(True)', 'boundary_params': '{}'}), '(v_h=theta[:, 0], v_l_1=theta[:, 1], v_l_2=theta[:, 2], a\n =theta[:, 3], w_h=theta[:, 4], w_l_1=theta[:, 5], w_l_2=theta[:, 6],\n ndt=theta[:, 7], s=s, n_samples=n_samples, n_trials=n_trials, delta_t=\n delta_t, max_t=max_t, boundary_fun=bf.constant, boundary_multiplicative\n =True, boundary_params={})\n', (12593, 12905), False, 'from cddm_data_simulation import ddm_flexbound_seq2\n'), ((13425, 13755), 'cddm_data_simulation.ddm_flexbound_par2', 'ddm_flexbound_par2', ([], {'v_h': 'theta[:, 0]', 'v_l_1': 'theta[:, 1]', 'v_l_2': 'theta[:, 2]', 'a': 'theta[:, 3]', 'w_h': 'theta[:, 4]', 'w_l_1': 'theta[:, 5]', 'w_l_2': 'theta[:, 6]', 'ndt': 'theta[:, 7]', 's': 's', 'n_samples': 'n_samples', 'n_trials': 'n_trials', 'delta_t': 'delta_t', 'max_t': 'max_t', 'boundary_fun': 'bf.constant', 'boundary_multiplicative': '(True)', 'boundary_params': '{}'}), '(v_h=theta[:, 0], v_l_1=theta[:, 1], v_l_2=theta[:, 2], a\n =theta[:, 3], w_h=theta[:, 4], w_l_1=theta[:, 5], w_l_2=theta[:, 6],\n ndt=theta[:, 7], s=s, n_samples=n_samples, n_trials=n_trials, delta_t=\n delta_t, max_t=max_t, boundary_fun=bf.constant, boundary_multiplicative\n =True, boundary_params={})\n', (13443, 13755), False, 'from cddm_data_simulation import ddm_flexbound_par2\n'), ((14275, 14620), 'cddm_data_simulation.ddm_flexbound_mic2', 'ddm_flexbound_mic2', ([], {'v_h': 'theta[:, 0]', 'v_l_1': 'theta[:, 1]', 'v_l_2': 'theta[:, 2]', 'a': 'theta[:, 3]', 'w_h': 'theta[:, 4]', 'w_l_1': 'theta[:, 5]', 'w_l_2': 'theta[:, 6]', 'd': 'theta[:, 7]', 'ndt': 'theta[:, 8]', 's': 's', 'n_samples': 'n_samples', 'n_trials': 'n_trials', 'delta_t': 'delta_t', 'max_t': 'max_t', 'boundary_fun': 'bf.constant', 'boundary_multiplicative': '(True)', 'boundary_params': '{}'}), '(v_h=theta[:, 0], v_l_1=theta[:, 1], v_l_2=theta[:, 2], a\n =theta[:, 3], w_h=theta[:, 4], w_l_1=theta[:, 5], w_l_2=theta[:, 6], d=\n theta[:, 7], ndt=theta[:, 8], s=s, n_samples=n_samples, n_trials=\n n_trials, delta_t=delta_t, max_t=max_t, boundary_fun=bf.constant,\n boundary_multiplicative=True, boundary_params={})\n', (14293, 14620), False, 'from cddm_data_simulation import ddm_flexbound_mic2\n'), ((1827, 1878), 'numpy.concatenate', 'np.concatenate', (['[out_copy[0], out_copy[1]]'], {'axis': '(-1)'}), '([out_copy[0], out_copy[1]], axis=-1)\n', (1841, 1878), True, 'import numpy as np\n'), ((2659, 2708), 'numpy.histogram', 'np.histogram', (['out[0][out[1] == choice]'], {'bins': 'bins'}), '(out[0][out[1] == choice], bins=bins)\n', (2671, 2708), True, 'import numpy as np\n'), ((3538, 3593), 'numpy.histogram', 'np.histogram', (['out[:, 0][out[:, 1] == choice]'], {'bins': 'bins'}), '(out[:, 0][out[:, 1] == choice], bins=bins)\n', (3550, 3593), True, 'import numpy as np\n'), ((9819, 9862), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {'dtype': 'np.float32'}), '([0.0, 0.0, 0.0], dtype=np.float32)\n', (9827, 9862), True, 'import numpy as np\n'), ((9911, 9954), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {'dtype': 'np.float32'}), '([1.0, 1.0, 1.0], dtype=np.float32)\n', (9919, 9954), True, 'import numpy as np\n'), ((11147, 11195), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0]'], {'dtype': 'np.float32'}), '([0.0, 0.0, 0.0, 0.0], dtype=np.float32)\n', (11155, 11195), True, 'import numpy as np\n'), ((11244, 11292), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0]'], {'dtype': 'np.float32'}), '([1.0, 1.0, 1.0, 1.0], dtype=np.float32)\n', (11252, 11292), True, 'import numpy as np\n'), ((15224, 15248), 'numpy.squeeze', 'np.squeeze', (['x[0]'], {'axis': '(1)'}), '(x[0], axis=1)\n', (15234, 15248), True, 'import numpy as np\n'), ((15252, 15276), 'numpy.squeeze', 'np.squeeze', (['x[1]'], {'axis': '(1)'}), '(x[1], axis=1)\n', (15262, 15276), True, 'import numpy as np\n'), ((3614, 3669), 'numpy.histogram', 'np.histogram', (['out[:, 0][out[:, 1] == choice]'], {'bins': 'bins'}), '(out[:, 0][out[:, 1] == choice], bins=bins)\n', (3626, 3669), True, 'import numpy as np\n'), ((4133, 4150), 'numpy.asarray', 'np.asarray', (['theta'], {}), '(theta)\n', (4143, 4150), True, 'import numpy as np\n'), ((15648, 15688), 'numpy.expand_dims', 'np.expand_dims', (['binned_out[:, 0]'], {'axis': '(1)'}), '(binned_out[:, 0], axis=1)\n', (15662, 15688), True, 'import numpy as np\n'), ((15691, 15731), 'numpy.expand_dims', 'np.expand_dims', (['binned_out[:, 1]'], {'axis': '(1)'}), '(binned_out[:, 1], axis=1)\n', (15705, 15731), True, 'import numpy as np\n')] |
import cv2
import numpy as np
image = cv2.imread('pic\car2.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # 变成灰度图
# 高斯滤波去噪
blurred = cv2.GaussianBlur(gray, (5, 5), 0, 0, cv2.BORDER_DEFAULT)
# 形态学处理,开运算
kernel = np.ones((23, 23), np.uint8)
opened = cv2.morphologyEx(blurred, cv2.MORPH_OPEN, kernel) # 开运算
opened = cv2.addWeighted(blurred, 1, opened, -1, 0)
# Otsu大津算法自适应阈值分割
ret, thresh = cv2.threshold(opened, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# 找到图像边缘
edge = cv2.Canny(thresh, 100, 200)
# 使用开运算和闭运算让图像边缘连成一个整体
# 形态学处理获取区域,将区域连通,车牌的区域可能在其中
kernel = np.ones((10, 10), np.uint8)
edge1 = cv2.morphologyEx(edge, cv2.MORPH_CLOSE, kernel)
edge2 = cv2.morphologyEx(edge1, cv2.MORPH_OPEN, kernel)
cv2.imshow('edge',edge2)# 查看边缘图
cv2.imwrite('pic\edge2.jpg',edge2)
# 轮廓
# 查找图像边缘整体形成的矩形区域,可能有很多,车牌就在其中一个矩形区域中
contours, hierarchy = cv2.findContours(edge2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# 尝试获取车牌区域
temp_contours = []
for contour in contours:
if cv2.contourArea(contour) > 500:
temp_contours.append(contour)
car_plates = []
for temp_contour in temp_contours:
rect_tupple = cv2.minAreaRect(temp_contour)
rect_width, rect_height = rect_tupple[1]
if rect_width < rect_height:# 置换,保障长比宽宽
rect_width, rect_height = rect_height, rect_width
aspect_ratio = rect_width / rect_height
# 车牌正常情况下宽高比在2 - 5.5之间
if aspect_ratio > 2 and aspect_ratio < 5.5:
car_plates.append(temp_contour)
rect_vertices = cv2.boxPoints(rect_tupple)
rect_vertices = np.int0(rect_vertices)
if len(car_plates) == 1:
for car_plate in car_plates:
row_min, col_min = np.min(car_plate[:, 0, :], axis=0)
row_max, col_max = np.max(car_plate[:, 0, :], axis=0)
cv2.rectangle(image, (row_min, col_min), (row_max, col_max), (0, 0, 0), 2)
card = image[col_min:col_max, row_min:row_max ]
cv2.imshow("img", image)
cv2.imshow("card_img.jpg", card)
cv2.imwrite('pic\plate2.jpg',card)
| [
"cv2.GaussianBlur",
"cv2.Canny",
"cv2.contourArea",
"numpy.int0",
"cv2.cvtColor",
"cv2.morphologyEx",
"cv2.threshold",
"cv2.imwrite",
"numpy.ones",
"cv2.addWeighted",
"cv2.rectangle",
"cv2.imread",
"cv2.boxPoints",
"numpy.min",
"numpy.max",
"cv2.minAreaRect",
"cv2.imshow",
"cv2.fin... | [((42, 69), 'cv2.imread', 'cv2.imread', (['"""pic\\\\car2.jpg"""'], {}), "('pic\\\\car2.jpg')\n", (52, 69), False, 'import cv2\n'), ((79, 118), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (91, 118), False, 'import cv2\n'), ((149, 205), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(5, 5)', '(0)', '(0)', 'cv2.BORDER_DEFAULT'], {}), '(gray, (5, 5), 0, 0, cv2.BORDER_DEFAULT)\n', (165, 205), False, 'import cv2\n'), ((229, 256), 'numpy.ones', 'np.ones', (['(23, 23)', 'np.uint8'], {}), '((23, 23), np.uint8)\n', (236, 256), True, 'import numpy as np\n'), ((267, 316), 'cv2.morphologyEx', 'cv2.morphologyEx', (['blurred', 'cv2.MORPH_OPEN', 'kernel'], {}), '(blurred, cv2.MORPH_OPEN, kernel)\n', (283, 316), False, 'import cv2\n'), ((334, 376), 'cv2.addWeighted', 'cv2.addWeighted', (['blurred', '(1)', 'opened', '(-1)', '(0)'], {}), '(blurred, 1, opened, -1, 0)\n', (349, 376), False, 'import cv2\n'), ((413, 479), 'cv2.threshold', 'cv2.threshold', (['opened', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(opened, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (426, 479), False, 'import cv2\n'), ((498, 525), 'cv2.Canny', 'cv2.Canny', (['thresh', '(100)', '(200)'], {}), '(thresh, 100, 200)\n', (507, 525), False, 'import cv2\n'), ((594, 621), 'numpy.ones', 'np.ones', (['(10, 10)', 'np.uint8'], {}), '((10, 10), np.uint8)\n', (601, 621), True, 'import numpy as np\n'), ((631, 678), 'cv2.morphologyEx', 'cv2.morphologyEx', (['edge', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(edge, cv2.MORPH_CLOSE, kernel)\n', (647, 678), False, 'import cv2\n'), ((688, 735), 'cv2.morphologyEx', 'cv2.morphologyEx', (['edge1', 'cv2.MORPH_OPEN', 'kernel'], {}), '(edge1, cv2.MORPH_OPEN, kernel)\n', (704, 735), False, 'import cv2\n'), ((737, 762), 'cv2.imshow', 'cv2.imshow', (['"""edge"""', 'edge2'], {}), "('edge', edge2)\n", (747, 762), False, 'import cv2\n'), ((770, 806), 'cv2.imwrite', 'cv2.imwrite', (['"""pic\\\\edge2.jpg"""', 'edge2'], {}), "('pic\\\\edge2.jpg', edge2)\n", (781, 806), False, 'import cv2\n'), ((873, 936), 'cv2.findContours', 'cv2.findContours', (['edge2', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(edge2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (889, 936), False, 'import cv2\n'), ((1176, 1205), 'cv2.minAreaRect', 'cv2.minAreaRect', (['temp_contour'], {}), '(temp_contour)\n', (1191, 1205), False, 'import cv2\n'), ((2074, 2106), 'cv2.imshow', 'cv2.imshow', (['"""card_img.jpg"""', 'card'], {}), "('card_img.jpg', card)\n", (2084, 2106), False, 'import cv2\n'), ((2115, 2151), 'cv2.imwrite', 'cv2.imwrite', (['"""pic\\\\plate2.jpg"""', 'card'], {}), "('pic\\\\plate2.jpg', card)\n", (2126, 2151), False, 'import cv2\n'), ((1008, 1032), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (1023, 1032), False, 'import cv2\n'), ((1577, 1603), 'cv2.boxPoints', 'cv2.boxPoints', (['rect_tupple'], {}), '(rect_tupple)\n', (1590, 1603), False, 'import cv2\n'), ((1635, 1657), 'numpy.int0', 'np.int0', (['rect_vertices'], {}), '(rect_vertices)\n', (1642, 1657), True, 'import numpy as np\n'), ((1769, 1803), 'numpy.min', 'np.min', (['car_plate[:, 0, :]'], {'axis': '(0)'}), '(car_plate[:, 0, :], axis=0)\n', (1775, 1803), True, 'import numpy as np\n'), ((1838, 1872), 'numpy.max', 'np.max', (['car_plate[:, 0, :]'], {'axis': '(0)'}), '(car_plate[:, 0, :], axis=0)\n', (1844, 1872), True, 'import numpy as np\n'), ((1888, 1962), 'cv2.rectangle', 'cv2.rectangle', (['image', '(row_min, col_min)', '(row_max, col_max)', '(0, 0, 0)', '(2)'], {}), '(image, (row_min, col_min), (row_max, col_max), (0, 0, 0), 2)\n', (1901, 1962), False, 'import cv2\n'), ((2041, 2065), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'image'], {}), "('img', image)\n", (2051, 2065), False, 'import cv2\n')] |
#This model is modified from DeepZip by Goyal et al
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, Bidirectional
from keras.layers import LSTM, Flatten, Conv1D, LocallyConnected1D, MaxPooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D
from tensorflow.compat.v1.keras.layers import CuDNNLSTM
from tensorflow.compat.v1.keras.layers import CuDNNGRU
from math import sqrt
from keras.layers.embeddings import Embedding
from keras.callbacks import ModelCheckpoint, EarlyStopping
# from matplotlib import pyplot
import keras
from sklearn.preprocessing import OneHotEncoder
from keras.layers.normalization import BatchNormalization
import tensorflow as tf
import numpy as np
import argparse
import os
from keras.callbacks import CSVLogger
import models
tf.random.set_seed(42)
np.random.seed(0)
os.environ["CUDA_VISIBLE_DEVICES"]="1"
parser = argparse.ArgumentParser()
parser.add_argument('-d', action='store', default=None,
dest='data',
help='choose sequence file')
parser.add_argument('-gpu', action='store', default="0",
dest='gpu',
help='choose gpu number')
parser.add_argument('-name', action='store', default="model1",
dest='name',
help='weights will be stored with this name')
parser.add_argument('-model_name', action='store', default=None,
dest='model_name',
help='name of the model to call')
parser.add_argument('-log_file', action='store',
dest='log_file',
help='Log file')
import keras.backend as K
def loss_fn(y_true, y_pred):#使用交叉分类熵作为损失函数
return 1/np.log(2) * K.categorical_crossentropy(y_true, y_pred)
def stride_input(a, L, S): # Window len = L, Stride len/stepsize = S
nrows = ((a.size - L) // S) + 1#计算行数
n = a.strides[0]
#通过stride方法将输入序列切分为一条一条的子序列
#每条子序列长为65,即步长+1
return np.lib.stride_tricks.as_strided(a, shape=(nrows, L), strides=(S * n, n), writeable=False)
def generate_single_output_data(file_path,batch_size,time_steps):
series = np.load(file_path)#读入文件
series = series.reshape(-1, 1)#将读入的序列转化为1列以方便进行独热编码
onehot_encoder = OneHotEncoder(sparse=False)
onehot_encoded = onehot_encoder.fit(series)#配置独热编码转换器,以方便后续提取特征
series = series.reshape(-1)#再次变为1行
data = stride_input(series, time_steps+1, 1)
l = int(len(data)/batch_size) * batch_size
data = data[:l]
X = data[:, :-1]#x仅存储划分好的子序列的上文,长为64,即步长
Y = data[:, -1:]#y仅存储待预测的每个子序列的实际碱基
Y = onehot_encoder.transform(Y)#对y进行独热编码
return X,Y
def fit_model(X, Y, bs, nb_epoch, model):
y = Y
optim = keras.optimizers.Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-8, decay=0, amsgrad=False)
model.compile(loss=loss_fn, optimizer=optim)
#设置模型保存时机,仅保存模型权重
checkpoint = ModelCheckpoint(arguments.name, monitor='loss', verbose=1, save_best_only=True, mode='min', save_weights_only=True)
csv_logger = CSVLogger(arguments.log_file, append=True, separator=';')
early_stopping = EarlyStopping(monitor='loss', mode='min', min_delta=0.005, patience=3, verbose=1)
callbacks_list = [checkpoint, csv_logger, early_stopping]
#callbacks_list = [checkpoint, csv_logger]
model.fit(X, y, epochs=nb_epoch, batch_size=bs, verbose=1, shuffle=True, callbacks=callbacks_list)
arguments = parser.parse_args()
print(arguments)
#设置模型参数
batch_size=128
sequence_length=64
num_epochs=20
X,Y = generate_single_output_data(arguments.data,batch_size, sequence_length)
print(Y.shape[1])
model = getattr(models, arguments.model_name)(batch_size, sequence_length, Y.shape[1])
fit_model(X, Y, batch_size,num_epochs , model)
| [
"tensorflow.random.set_seed",
"numpy.load",
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.log",
"keras.callbacks.ModelCheckpoint",
"keras.backend.categorical_crossentropy",
"sklearn.preprocessing.OneHotEncoder",
"keras.optimizers.Adam",
"numpy.lib.stride_tricks.as_strided",
"keras.callb... | [((865, 887), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(42)'], {}), '(42)\n', (883, 887), True, 'import tensorflow as tf\n'), ((888, 905), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (902, 905), True, 'import numpy as np\n'), ((955, 980), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (978, 980), False, 'import argparse\n'), ((2057, 2150), 'numpy.lib.stride_tricks.as_strided', 'np.lib.stride_tricks.as_strided', (['a'], {'shape': '(nrows, L)', 'strides': '(S * n, n)', 'writeable': '(False)'}), '(a, shape=(nrows, L), strides=(S * n, n),\n writeable=False)\n', (2088, 2150), True, 'import numpy as np\n'), ((2232, 2250), 'numpy.load', 'np.load', (['file_path'], {}), '(file_path)\n', (2239, 2250), True, 'import numpy as np\n'), ((2341, 2368), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)'}), '(sparse=False)\n', (2354, 2368), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((2867, 2967), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': '(0.001)', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': '(1e-08)', 'decay': '(0)', 'amsgrad': '(False)'}), '(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08,\n decay=0, amsgrad=False)\n', (2888, 2967), False, 'import keras\n'), ((3062, 3182), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['arguments.name'], {'monitor': '"""loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""min"""', 'save_weights_only': '(True)'}), "(arguments.name, monitor='loss', verbose=1, save_best_only=\n True, mode='min', save_weights_only=True)\n", (3077, 3182), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((3199, 3256), 'keras.callbacks.CSVLogger', 'CSVLogger', (['arguments.log_file'], {'append': '(True)', 'separator': '""";"""'}), "(arguments.log_file, append=True, separator=';')\n", (3208, 3256), False, 'from keras.callbacks import CSVLogger\n'), ((3282, 3367), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""loss"""', 'mode': '"""min"""', 'min_delta': '(0.005)', 'patience': '(3)', 'verbose': '(1)'}), "(monitor='loss', mode='min', min_delta=0.005, patience=3,\n verbose=1)\n", (3295, 3367), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((1797, 1839), 'keras.backend.categorical_crossentropy', 'K.categorical_crossentropy', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1823, 1839), True, 'import keras.backend as K\n'), ((1785, 1794), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1791, 1794), True, 'import numpy as np\n')] |
import numpy as np
from scipy.signal import welch
from scipy.stats import skew, kurtosis
from scipy.interpolate import Rbf
from itertools import permutations, combinations
import matplotlib.pyplot as plt
def normalizacao(VETOR, metodo='std', r=1):
""" Normalizes the values of a single feature.
INPUT:
- VETOR: valores de uma caracteristica calculada em N padroes e C classes,
Vetor com dimensao 1 x N*C
- metodo ='std' : normalizacao linear (padrao)
= 'mmx': limitada entre -1 e 1
= 'sfm': rescala nao linear no intervalo 0 a 1
- r = parametro do metodo sfm (padrao =1)
OUTPUT:
- VETORNORM = 1 x N*C: vetor com os valores normalizados da caracteristica
"""
M, N = VETOR.shape
VETOR = VETOR.reshape(1, M*N)
if metodo == 'std':
VETORNORM = VETOR-VETOR.mean()
VETORNORM = VETORNORM/(VETOR.std())
elif metodo == 'mmx':
VETORNORM = 2*VETOR/(max(VETOR)-min(VETOR))
VETORNORM = VETORNORM-(min(min(VETORNORM))+1)
elif metodo == 'sfm':
Y = VETOR-VETOR.mean()
Y = Y/(r*VETOR.std())
VETORNORM = 1/(1+np.exp(-Y))
else:
raise AttributeError("Unknown method, but don't get sad, not everything is made of roses.")
VETORNORM = VETORNORM.reshape(M, N)
return VETORNORM
def rmoutliers(data, p=3):
""" Remove outliers de um conjunto de valores utilizando como limiar um numero p de desvios padroes em relacao a mediana.
INPUT:
- x = vector of data of a single feature.
- p = number of standard deviations to be used.
OUTPUT
- data: data without outliers
- outliers: outliers detected
- indexes: indexes of outliers
"""
inferior = np.where(data<np.median(data) - p*data.std())[0]
superior = np.where(data>np.median(data) + p*data.std())[0]
indexes = np.union1d(inferior, superior)
outliers = data[indexes]
data = np.delete(data, indexes)
return data, outliers, indexes
| [
"numpy.median",
"numpy.exp",
"numpy.union1d",
"numpy.delete"
] | [((1717, 1747), 'numpy.union1d', 'np.union1d', (['inferior', 'superior'], {}), '(inferior, superior)\n', (1727, 1747), True, 'import numpy as np\n'), ((1782, 1806), 'numpy.delete', 'np.delete', (['data', 'indexes'], {}), '(data, indexes)\n', (1791, 1806), True, 'import numpy as np\n'), ((1610, 1625), 'numpy.median', 'np.median', (['data'], {}), '(data)\n', (1619, 1625), True, 'import numpy as np\n'), ((1671, 1686), 'numpy.median', 'np.median', (['data'], {}), '(data)\n', (1680, 1686), True, 'import numpy as np\n'), ((1051, 1061), 'numpy.exp', 'np.exp', (['(-Y)'], {}), '(-Y)\n', (1057, 1061), True, 'import numpy as np\n')] |
"""
This tutorial shows you how to use Model Predictive Control
with the true model.
"""
from stable_baselines.common import set_global_seeds
from causal_world.envs.causalworld import CausalWorld
from causal_world.dynamics_model import SimulatorModel
from causal_world.utils.mpc_optimizers import \
CrossEntropyMethod
from gym.wrappers.monitoring.video_recorder import VideoRecorder
import numpy as np
from causal_world.task_generators.task import generate_task
seed = 0
skip_frame = 35
num_of_particles = 500
num_elite = 50
max_iterations = 20
horizon_length = 6
parallel_agents = 25
def _make_env():
def _init():
task = generate_task(
task_generator_id='picking',
joint_positions=[-0.21737874, 0.55613149,
-1.09308519, -0.12868997,
0.52551013, -1.08006493,
-0.00221536, 0.46163487,
-1.00948735],
tool_block_position=[0.0, 0, 0.035],
fractional_reward_weight=1,
dense_reward_weights=np.array([0, 10, 0,
1, 1, 0, 0,
0]))
env = CausalWorld(task=task,
skip_frame=skip_frame,
enable_visualization=False,
seed=seed)
return env
set_global_seeds(seed)
return _init
def run_mpc():
task = generate_task(
task_generator_id='picking',
joint_positions=[-0.21737874, 0.55613149,
-1.09308519, -0.12868997,
0.52551013, -1.08006493,
-0.00221536, 0.46163487,
-1.00948735],
tool_block_position=[0.0, 0, 0.035],
fractional_reward_weight=1,
dense_reward_weights=np.array([0, 10, 0,
1, 1, 0, 0,
0]))
env = CausalWorld(task=task,
skip_frame=1,
enable_visualization=False,
seed=seed)
true_model = SimulatorModel(_make_env, parallel_agents=parallel_agents)
optimizer = CrossEntropyMethod(
planning_horizon=horizon_length,
max_iterations=max_iterations,
population_size=num_of_particles,
num_elite=num_elite,
action_upper_bound=np.array(env.action_space.high),
action_lower_bound=np.array(env.action_space.low),
model=true_model)
env.reset()
actions = optimizer.get_actions()
true_model.end_sim()
recorder = VideoRecorder(env, 'picking.mp4')
for i in range(horizon_length):
for _ in range(skip_frame):
recorder.capture_frame()
obs, reward, done, info = env.step(actions[i])
recorder.capture_frame()
recorder.close()
env.close()
if __name__ == '__main__':
run_mpc()
| [
"stable_baselines.common.set_global_seeds",
"causal_world.dynamics_model.SimulatorModel",
"numpy.array",
"causal_world.envs.causalworld.CausalWorld",
"gym.wrappers.monitoring.video_recorder.VideoRecorder"
] | [((1404, 1426), 'stable_baselines.common.set_global_seeds', 'set_global_seeds', (['seed'], {}), '(seed)\n', (1420, 1426), False, 'from stable_baselines.common import set_global_seeds\n'), ((1999, 2074), 'causal_world.envs.causalworld.CausalWorld', 'CausalWorld', ([], {'task': 'task', 'skip_frame': '(1)', 'enable_visualization': '(False)', 'seed': 'seed'}), '(task=task, skip_frame=1, enable_visualization=False, seed=seed)\n', (2010, 2074), False, 'from causal_world.envs.causalworld import CausalWorld\n'), ((2158, 2216), 'causal_world.dynamics_model.SimulatorModel', 'SimulatorModel', (['_make_env'], {'parallel_agents': 'parallel_agents'}), '(_make_env, parallel_agents=parallel_agents)\n', (2172, 2216), False, 'from causal_world.dynamics_model import SimulatorModel\n'), ((2643, 2676), 'gym.wrappers.monitoring.video_recorder.VideoRecorder', 'VideoRecorder', (['env', '"""picking.mp4"""'], {}), "(env, 'picking.mp4')\n", (2656, 2676), False, 'from gym.wrappers.monitoring.video_recorder import VideoRecorder\n'), ((1217, 1305), 'causal_world.envs.causalworld.CausalWorld', 'CausalWorld', ([], {'task': 'task', 'skip_frame': 'skip_frame', 'enable_visualization': '(False)', 'seed': 'seed'}), '(task=task, skip_frame=skip_frame, enable_visualization=False,\n seed=seed)\n', (1228, 1305), False, 'from causal_world.envs.causalworld import CausalWorld\n'), ((1874, 1909), 'numpy.array', 'np.array', (['[0, 10, 0, 1, 1, 0, 0, 0]'], {}), '([0, 10, 0, 1, 1, 0, 0, 0])\n', (1882, 1909), True, 'import numpy as np\n'), ((2431, 2462), 'numpy.array', 'np.array', (['env.action_space.high'], {}), '(env.action_space.high)\n', (2439, 2462), True, 'import numpy as np\n'), ((2491, 2521), 'numpy.array', 'np.array', (['env.action_space.low'], {}), '(env.action_space.low)\n', (2499, 2521), True, 'import numpy as np\n'), ((1080, 1115), 'numpy.array', 'np.array', (['[0, 10, 0, 1, 1, 0, 0, 0]'], {}), '([0, 10, 0, 1, 1, 0, 0, 0])\n', (1088, 1115), True, 'import numpy as np\n')] |
import numpy as np
from numpy.random import default_rng
_rng = default_rng()
def roller(dice_size=6, dice_number=1):
return _rng.integers(1, dice_size, endpoint=True, size=(dice_number,))
def roll_dice(size=6, number=1, modifier=0, reroll=0):
rolls = roller(size, number)
if isinstance(reroll, str):
if reroll == 'lowest':
idx = np.argmin(rolls)
rolls[idx] = roller(size)
elif reroll == '2lowest':
idx = np.argpartition(rolls, 2)[:2]
rolls[idx] = roller(size, dice_number=2)
elif (reroll > 0):
rolls = np.array([x if x > reroll else roller(size).sum() for x in rolls])
return rolls.sum() + modifier
def meanroll(size=6, number=1, modifier=0, reroll=0, repeat=1e4):
repeat = int(repeat)
res = sum(roll_dice(size, number, modifier, reroll) for _ in range(repeat))
return res / repeat
def minroll(size=6, number=1, modifier=0):
return 1 * number + modifier
def maxroll(size=6, number=1, modifier=0):
return size * number + modifier
def roll_stats(size=6, number=1, modifier=0, reroll=0, statistics='all'):
if statistics in ['min', 'all']:
stat = minroll(size, number, modifier)
if statistics == 'all':
mi = stat
if statistics in ['max', 'all']:
stat = maxroll(size, number, modifier)
if statistics == 'all':
ma = stat
if statistics in ['mean', 'all']:
stat = meanroll(size, number, modifier, reroll, repeat=1e4)
if statistics == 'all':
me = stat
if statistics == 'all':
return mi, me, ma
return stat
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='simple command line utility for dice roller')
parser.add_argument('-d', '--dice_size', metavar='D', default=6, type=int,
help="The number of faces of the dice (default 6)")
parser.add_argument('-n', '--dice_number', metavar='N', default=1, type=int,
help="The number of rolled dices (default 1)")
parser.add_argument('-m', '--modifier', metavar='M', default=0, type=int,
help="A modifier for the roll (default 0)")
parser.add_argument('-r', '--reroll', metavar='R', default=0, type=int,
choices=[0, 1, 2],
help="A modifier for the roll (default 0)")
args = parser.parse_args()
sz = args.dice_size
n = args.dice_number
m = args.modifier
r = args.reroll
result = roll_dice(size=sz,
number=n,
modifier=m,
reroll=r)
mi, me, ma = all_stats(size=sz,
number=n,
modifier=m,
reroll=r)
print('Result of {0}d{1}{2}{3}: {4}'.format(
n, sz, f'+{m}' if m > 0 else '', ' with reroll' if r > 0 else '',
result))
print('Result in [{} - {}], Mean: {:.2f}'.format(mi, ma, me))
| [
"numpy.random.default_rng",
"numpy.argpartition",
"argparse.ArgumentParser",
"numpy.argmin"
] | [((64, 77), 'numpy.random.default_rng', 'default_rng', ([], {}), '()\n', (75, 77), False, 'from numpy.random import default_rng\n'), ((1690, 1777), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""simple command line utility for dice roller"""'}), "(description=\n 'simple command line utility for dice roller')\n", (1713, 1777), False, 'import argparse\n'), ((364, 380), 'numpy.argmin', 'np.argmin', (['rolls'], {}), '(rolls)\n', (373, 380), True, 'import numpy as np\n'), ((471, 496), 'numpy.argpartition', 'np.argpartition', (['rolls', '(2)'], {}), '(rolls, 2)\n', (486, 496), True, 'import numpy as np\n')] |
"""
A set of functions for running prediction in various settings
"""
import numpy as np
def predict_on_generator(model, generator, argmax=False):
"""
Takes a tf.keras model and uses it to predict on all batches in a generator
Stacks the predictions over all batches on axis 0 (vstack)
Args:
model: A tf.keras module instance. Should accept batches as output
from 'generator'
generator: A generator object yielding one or more batches of data to
predict on
argmax: Whether to return argmax values or model output values
Returns:
If argmax is true, returns integer predictions of shape [-1, 1].
Otherwise, returns floating values of shape [-1, n_classes]
"""
pred = []
end_of_data = False
while not end_of_data:
try:
X_batch, _ = next(generator)
except StopIteration:
end_of_data = True
else:
# Predict
pred_batch = model.predict_on_batch(X_batch)
if argmax:
pred_batch = pred_batch.argmax(-1).reshape(-1, 1)
pred.append(pred_batch)
return np.vstack(pred)
def predict_by_id(model, sequencer, study_id, argmax=False):
"""
Takes a tf.keras model and predicts on all batches of data in a SleepStudy
object.
Args:
model: A tf.keras model instance. Should accept batches of data
as output by the 'sequence' Sequence object.
sequencer: A Sequence object which stores at least the passed
SleepStudy object of 'sleep_study'.
study_id: The identifier string of a SleepStudy object in 'sequence'.
argmax: See predict_on_generator docstring.
Returns:
Predictions of 'model' on all batches of data in a SleepStudy
Please refer to the 'predict_on_generator' docstring.
"""
# Get generator
gen = sequencer.to_batch_generator(study_id=study_id)
return predict_on_generator(model, gen, argmax)
def sequence_predict_generator(model, total_seq_length, generator,
argmax=False, overlapping=True, verbose=True):
"""
Takes a tf.keras model and predicts on segments of data from a generator.
This function takes a few additional values needed to derive an
understanding of the data produced by 'generator', see below:
Args:
model: A tf.keras model to predict with. Should accept data
as output by the generator.
total_seq_length: The total number of 'segments/epochs/stages' in the
generator. This is needed to initialize the
predictions array.
generator: A generator which produces batches of data
argmax: Whether to return argmax values or model output values
overlapping: Specifies whether the sequences output of 'generator'
represent overlapping segments or contagious data.
verbose: If True, prints the prediction progess to screen.
Returns:
An array of shape [total_seq_length, n_classes] or
[total_seq_length, -1, n_classes] if data_per_prediction != input_dims.
If argmax = True axis -1 (now shape 1) is squeezed.
"""
n_classes = model.outputs[0].get_shape()[-1]
s = model.outputs[0].get_shape().as_list()
pred = np.zeros(shape=[total_seq_length] + s[2:], dtype=np.float64)
cur_pos = 0
for X, _, _ in generator:
if verbose:
print(" pos: {}/{}".format(cur_pos+1, total_seq_length),
end="\r", flush=True)
batch_pred = model.predict_on_batch(X)
if overlapping:
for p in batch_pred:
pred[cur_pos:cur_pos+p.shape[0]] += p
cur_pos += 1
else:
batch_pred = batch_pred.reshape(-1, n_classes)
n_vals = batch_pred.shape[0]
pred[cur_pos:cur_pos+n_vals] += batch_pred
cur_pos += n_vals
if argmax:
pred = pred.argmax(-1)
print()
return pred
| [
"numpy.zeros",
"numpy.vstack"
] | [((1186, 1201), 'numpy.vstack', 'np.vstack', (['pred'], {}), '(pred)\n', (1195, 1201), True, 'import numpy as np\n'), ((3489, 3549), 'numpy.zeros', 'np.zeros', ([], {'shape': '([total_seq_length] + s[2:])', 'dtype': 'np.float64'}), '(shape=[total_seq_length] + s[2:], dtype=np.float64)\n', (3497, 3549), True, 'import numpy as np\n')] |
import zipfile
import numpy as np
import torch
from torch.utils.data import Dataset
import matplotlib.pyplot as plt
class wafer_dataset(Dataset):
def __init__(self, transform=None, folder_path=None, test_gen=False):
"""
:param transform: for augmentation
:param folder_path: Data set path
Classes:
Center (0)
Donut (1)
Edge-Loc (2)
Edge-Ring (3)
Loc (4)
Near-full (5)
Random (6)
Scratch (7)
None (8)
"""
self.classes = ['Center',
'Donut',
'Edge-Loc',
'Edge-Ring',
'Loc',
'Near-full',
'Random',
'Scratch',
'None']
data_set = {}
with zipfile.ZipFile(folder_path) as zf:
dataset = np.load(folder_path)
for file_name in zf.namelist():
data_set[file_name] = dataset[file_name]
self.test_gen = test_gen
if not self.test_gen:
self.data = data_set['data.npy']
self.label = data_set['label.npy']
else:
self.data = data_set['gen_data.npy']
self.label = data_set['gen_label.npy']
self.transform = transform
def __getitem__(self, index):
data = self.data[index]
label = self.label[index]
if self.transform is not None:
data = self.transform(data)
return data, label
def __len__(self):
return len(self.data)
def imshow(truth_img, decode_img, class_name, test=False):
"""
Plot image.
if test, then plot each class data.
:param truth_img: Original image
:param class_name: Wafer map classes
:param decode_img: A list of generate images
:param test: for testing
"""
if not test:
truth_img = truth_img.cpu().numpy()
f, axes = plt.subplots(1, 6)
for i in range(6):
if i == 0:
axes[i].set_title('Original image: ' + class_name)
axes[i].imshow(np.argmax(np.transpose(truth_img, (1, 2, 0)), axis=2))
else:
decode_img[i-1] = decode_img[i-1].cpu().numpy()
axes[i].set_title('Generate image')
axes[i].imshow(np.argmax(np.transpose(decode_img[i-1], (1, 2, 0)), axis=2))
else:
f, axes = plt.subplots(2, 9)
f.suptitle('Original image', x=0.5, y=0.9, fontsize=20)
# Adjust vertical_spacing = 0.5 * axes_height
plt.subplots_adjust(hspace=0.5)
# Add text in figure coordinates
plt.figtext(0.5, 0.5, 'Generated image', ha='center', va='center', fontsize=20)
for i in range(2):
for j in range(9):
if i == 0:
axes[i, j].set_title(class_name[j])
axes[i, j].imshow(np.argmax(np.transpose(truth_img[j], (1, 2, 0)), axis=2))
else:
axes[i, j].set_title(class_name[j])
axes[i, j].imshow(np.argmax(np.transpose(decode_img[j], (1, 2, 0)), axis=2))
plt.show()
def plot_learning_curve(step, loss, x):
"""
Plot the loss curve for every epoch.
"""
fig = plt.figure(num=0)
plt.xlabel('Epoch')
plt.plot(step, loss, label='training')
fig.suptitle('Loss' + "\n", fontsize=25)
plt.axis([1, x, 0, 0.2])
plt.legend(loc='lower right')
plt.show()
class AddGaussianNoise(object):
"""
Add Gaussian Noise to input tensor.
"""
def __init__(self, mean=0., std=1.):
self.std = std
self.mean = mean
def __call__(self, tensor):
return tensor + torch.randn(tensor.size()) * self.std + self.mean
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
| [
"numpy.load",
"matplotlib.pyplot.show",
"zipfile.ZipFile",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.transpose",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figtext",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.xlabel",
"matplotlib.py... | [((3177, 3187), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3185, 3187), True, 'import matplotlib.pyplot as plt\n'), ((3297, 3314), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': '(0)'}), '(num=0)\n', (3307, 3314), True, 'import matplotlib.pyplot as plt\n'), ((3319, 3338), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (3329, 3338), True, 'import matplotlib.pyplot as plt\n'), ((3343, 3381), 'matplotlib.pyplot.plot', 'plt.plot', (['step', 'loss'], {'label': '"""training"""'}), "(step, loss, label='training')\n", (3351, 3381), True, 'import matplotlib.pyplot as plt\n'), ((3432, 3456), 'matplotlib.pyplot.axis', 'plt.axis', (['[1, x, 0, 0.2]'], {}), '([1, x, 0, 0.2])\n', (3440, 3456), True, 'import matplotlib.pyplot as plt\n'), ((3461, 3490), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (3471, 3490), True, 'import matplotlib.pyplot as plt\n'), ((3495, 3505), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3503, 3505), True, 'import matplotlib.pyplot as plt\n'), ((1979, 1997), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(6)'], {}), '(1, 6)\n', (1991, 1997), True, 'import matplotlib.pyplot as plt\n'), ((2455, 2473), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(9)'], {}), '(2, 9)\n', (2467, 2473), True, 'import matplotlib.pyplot as plt\n'), ((2600, 2631), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.5)'}), '(hspace=0.5)\n', (2619, 2631), True, 'import matplotlib.pyplot as plt\n'), ((2681, 2760), 'matplotlib.pyplot.figtext', 'plt.figtext', (['(0.5)', '(0.5)', '"""Generated image"""'], {'ha': '"""center"""', 'va': '"""center"""', 'fontsize': '(20)'}), "(0.5, 0.5, 'Generated image', ha='center', va='center', fontsize=20)\n", (2692, 2760), True, 'import matplotlib.pyplot as plt\n'), ((866, 894), 'zipfile.ZipFile', 'zipfile.ZipFile', (['folder_path'], {}), '(folder_path)\n', (881, 894), False, 'import zipfile\n'), ((924, 944), 'numpy.load', 'np.load', (['folder_path'], {}), '(folder_path)\n', (931, 944), True, 'import numpy as np\n'), ((2156, 2190), 'numpy.transpose', 'np.transpose', (['truth_img', '(1, 2, 0)'], {}), '(truth_img, (1, 2, 0))\n', (2168, 2190), True, 'import numpy as np\n'), ((2376, 2418), 'numpy.transpose', 'np.transpose', (['decode_img[i - 1]', '(1, 2, 0)'], {}), '(decode_img[i - 1], (1, 2, 0))\n', (2388, 2418), True, 'import numpy as np\n'), ((2950, 2987), 'numpy.transpose', 'np.transpose', (['truth_img[j]', '(1, 2, 0)'], {}), '(truth_img[j], (1, 2, 0))\n', (2962, 2987), True, 'import numpy as np\n'), ((3124, 3162), 'numpy.transpose', 'np.transpose', (['decode_img[j]', '(1, 2, 0)'], {}), '(decode_img[j], (1, 2, 0))\n', (3136, 3162), True, 'import numpy as np\n')] |
import re
import typing
import os
import copy
import numpy
import nidigital
from enum import Enum
from nidigital import enums
from datetime import datetime
from nidigital.history_ram_cycle_information import HistoryRAMCycleInformation
from nitsm.codemoduleapi import SemiconductorModuleContext
class SSCDigital(typing.NamedTuple):
session: nidigital.Session
channel_list: str
site_list: str
class TSMDigital(typing.NamedTuple):
pin_query_context: typing.Any
ssc: typing.List[SSCDigital]
site_numbers: typing.List[int]
pins: typing.List[str]
class Location_1D_Array(typing.NamedTuple):
location_1d_array: typing.List[int]
class Location_2D(typing.NamedTuple):
row: int
col: int
class Location_2D_Array(typing.NamedTuple):
location_2d_array: typing.List[Location_2D]
class Session_Properties(typing.NamedTuple):
instrument_name: str
voh: float
vol: float
vih: float
vil: float
vterm: float
measurement_time: float
class LevelTypeToSet(Enum):
VIL = 0
VIH = 1
VOL = 2
VOH = 3
VTERM = 4
LOL = 5
LOH = 6
VCOM = 7
class HRAM_Configuration:
finite_samples: bool = True
cycles_to_acquire: enums.HistoryRAMCyclesToAcquire = enums.HistoryRAMCyclesToAcquire.FAILED
max_samples_to_acquire_per_site: int = 8191
buffer_size_per_site: int = 32000
pretrigger_samples: int = 0
trigger_type: enums.HistoryRAMTriggerType = enums.HistoryRAMTriggerType.FIRST_FAILURE
cycle_number: int = 0
pattern_label: str = ""
vector_offset: int = 0
cycle_offset: int = 0
class PXITriggerLine(typing.NamedTuple):
NONE: str
PXI_TRIG0: str
PXI_TRIG1: str
PXI_TRIG2: str
PXI_TRIG3: str
PXI_TRIG4: str
PXI_TRIG5: str
PXI_TRIG6: str
PXI_TRIG7: str
PXI_TRIGGER_LINE = PXITriggerLine(
"",
"PXI_Trig0",
"PXI_Trig1",
"PXI_Trig2",
"PXI_Trig3",
"PXI_Trig4",
"PXI_Trig5",
"PXI_Trig6",
"PXI_Trig7",
)
class SignalId(typing.NamedTuple):
PATTERN_OPCODE_EVENT0: str
PATTERN_OPCODE_EVENT1: str
PATTERN_OPCODE_EVENT2: str
PATTERN_OPCODE_EVENT3: str
SIGNAL_ID = SignalId(
"patternOpcodeEvent0",
"patternOpcodeEvent1",
"patternOpcodeEvent2",
"patternOpcodeEvent3",
)
# Clock Generation #
def tsm_ssc_clock_generator_abort(tsm: TSMDigital):
_ssc_clock_generator_abort(tsm.ssc)
return tsm
def tsm_ssc_clock_generator_generate_clock(
tsm: TSMDigital, frequency: float, select_digital_function: bool = True
):
_ssc_clock_generator_generate_clock(tsm.ssc, frequency, select_digital_function)
return tsm
def tsm_ssc_modify_time_set_for_clock_generation(
tsm: TSMDigital, frequency: float, duty_cycle: float, time_set: str
):
_ssc_modify_time_set_for_clock_generation(tsm.ssc, frequency, duty_cycle, time_set)
return tsm
# End of Clock Generation #
# Configuration #
def tsm_ssc_clear_start_trigger_signal(tsm: TSMDigital):
_ssc_clear_start_trigger_signal(tsm.ssc)
return tsm
def tsm_ssc_configure_trigger_signal(
tsm: TSMDigital, source: str, edge: enums.DigitalEdge = enums.DigitalEdge.RISING
):
_ssc_configure_trigger_signal(tsm.ssc, source, edge)
return tsm
def tsm_ssc_select_function(tsm: TSMDigital, function: enums.SelectedFunction):
_ssc_select_function(tsm.ssc, function)
return tsm
def tsm_ssc_export_opcode_trigger_signal(
tsm: TSMDigital, signal_id: str, output_terminal: str = ""
):
_ssc_export_opcode_trigger_signal(tsm.ssc, signal_id, output_terminal)
return tsm
# End of Configuration #
# Frequency Measurement #
def tsm_ssc_frequency_counter_configure_measurement_time(tsm: TSMDigital, measurement_time: float):
_ssc_frequency_counter_configure_measurement_time(tsm.ssc, measurement_time)
return tsm
def tsm_ssc_frequency_counter_measure_frequency(tsm: TSMDigital):
initialized_array = [[0.0 for _ in tsm.pins] for _ in tsm.site_numbers]
per_instrument_to_per_site_per_pin_lut = _ssc_calculate_per_instrument_to_per_site_per_pin_lut(
tsm.ssc, tsm.site_numbers, tsm.pins
)
_, per_instrument_frequencies = _ssc_frequency_counter_measure_frequency(tsm.ssc)
per_site_per_pin_frequency_measurements = _apply_lut_per_instrument_to_per_site_per_pin(
initialized_array,
per_instrument_to_per_site_per_pin_lut,
per_instrument_frequencies,
)
return tsm, per_site_per_pin_frequency_measurements
# End of Frequency Measurement #
# HRAM #
def tsm_ssc_configure_hram(
tsm: TSMDigital, hram_configuration: HRAM_Configuration = HRAM_Configuration()
):
number_of_samples_is_finite = hram_configuration.finite_samples
cycles_to_acquire = hram_configuration.cycles_to_acquire
pretrigger_samples = hram_configuration.pretrigger_samples
buffer_size_per_site = hram_configuration.buffer_size_per_site
max_samples_to_acquire_per_site = hram_configuration.max_samples_to_acquire_per_site
triggers_type = hram_configuration.trigger_type
cycle_number = hram_configuration.cycle_number
pattern_label = hram_configuration.pattern_label
cycle_offset = hram_configuration.cycle_offset
vector_offset = hram_configuration.vector_offset
_ssc_configure_hram_settings(
tsm.ssc,
cycles_to_acquire,
pretrigger_samples,
max_samples_to_acquire_per_site,
number_of_samples_is_finite,
buffer_size_per_site,
)
_ssc_configure_hram_trigger(
tsm.ssc, triggers_type, cycle_number, pattern_label, cycle_offset, vector_offset
)
return tsm
def tsm_ssc_get_hram_configuration(tsm: TSMDigital):
(
_,
per_instrument_cycles_to_acquire,
per_instrument_pretrigger_samples,
per_instrument_max_samples_to_acquire_per_site,
per_instrument_number_of_samples_is_finite,
per_instrument_buffer_size_per_site,
) = _ssc_get_hram_settings(tsm.ssc)
(
_,
per_instrument_triggers_type,
per_instrument_cycle_number,
per_instrument_pattern_label,
per_instrument_cycle_offset,
per_instrument_vector_offset,
) = _ssc_get_hram_trigger_settings(tsm.ssc)
# Assumes all instruments have the same settings
hram_configuration: HRAM_Configuration = HRAM_Configuration()
hram_configuration.finite_samples = per_instrument_number_of_samples_is_finite[-1]
hram_configuration.trigger_type = per_instrument_triggers_type[-1]
hram_configuration.cycle_number = per_instrument_cycle_number[-1]
hram_configuration.pattern_label = per_instrument_pattern_label[-1]
hram_configuration.vector_offset = per_instrument_vector_offset[-1]
hram_configuration.cycle_offset = per_instrument_cycle_offset[-1]
hram_configuration.cycles_to_acquire = per_instrument_cycles_to_acquire[-1]
hram_configuration.pretrigger_samples = per_instrument_pretrigger_samples[-1]
hram_configuration.buffer_size_per_site = per_instrument_buffer_size_per_site[-1]
hram_configuration.max_samples_to_acquire_per_site = (
per_instrument_max_samples_to_acquire_per_site[-1]
)
return tsm, hram_configuration
def tsm_ssc_log_hram_results(
tsm: TSMDigital,
per_site_cycle_information: typing.List[typing.List[HistoryRAMCycleInformation]],
pattern_name: str,
destination_dir: str,
):
if not os.path.exists(destination_dir):
os.mkdir(destination_dir)
os.chdir(destination_dir)
files_generated: typing.List[str] = []
for cycle_informations, site_number in zip(per_site_cycle_information, tsm.site_numbers):
results: typing.List[typing.List[typing.Any]] = []
if not cycle_informations or all(
[not cycle_information.per_pin_pass_fail for cycle_information in cycle_informations]
):
results.append(["PATTERN PASSED - NO FAILURES"])
else:
for cycle_information in cycle_informations:
results.append(
[
str(cycle_information.vector_number),
cycle_information.time_set_name,
str(cycle_information.cycle_number),
str(cycle_information.scan_cycle_number),
str(
(lambda x: "P" if x == True else "F")(
all(cycle_information.per_pin_pass_fail)
)
),
"{" + ",".join(tsm.pins) + "}",
"{"
+ ",".join(
[
(lambda x: "P" if x == True else "F")(value)
for value in cycle_information.per_pin_pass_fail
]
)
+ "}",
"{"
+ ",".join([str(value) for value in cycle_information.expected_pin_states])
+ "}",
"{"
+ ",".join([str(value) for value in cycle_information.actual_pin_states])
+ "}",
]
)
results.insert(
0,
[
"Vector",
"Timeset",
"Cycle",
"Scan Cycle",
"Pass/Fail",
"Pin List",
"Per Pin Pass/Fail",
"Expected Pin States",
"Actual Pin States",
],
)
filename = (
"HRAM_Results_site"
+ str(site_number)
+ "_"
+ datetime.now().strftime("%d-%b-%Y-%H-%M-%S")
+ ".csv"
)
files_generated.append(filename)
filehandle = open(filename, "w")
for row in results:
for col in row:
filehandle.write("%s\t" % col)
filehandle.write("\n")
filehandle.close()
return tsm, files_generated
def tsm_ssc_stream_hram_results(tsm: TSMDigital):
(
_,
per_instrument_per_site_cycle_information,
number_of_samples,
) = _ssc_stream_hram_results(tsm.ssc)
per_instrument_per_site_to_per_site_lut = (
_ssc_calculate_per_instrument_per_site_to_per_site_lut(tsm.ssc, tsm.site_numbers)
)
per_site_cycle_information = [
[HistoryRAMCycleInformation() for _ in range(number_of_samples)] for _ in tsm.site_numbers
]
for lut, cycle_information in zip(
per_instrument_per_site_to_per_site_lut,
per_instrument_per_site_cycle_information,
):
for index in lut.location_1d_array:
per_site_cycle_information[index] = cycle_information
return tsm, per_site_cycle_information
# End of HRAM #
# Pattern Actions #
def tsm_ssc_abort(tsm: TSMDigital):
_ssc_abort(tsm.ssc)
return tsm
def tsm_ssc_burst_pattern_pass_fail(
tsm: TSMDigital,
start_label: str,
select_digital_function: bool = True,
timeout: float = 10,
):
initialized_array = [False for _ in tsm.site_numbers]
per_instrument_to_per_site_lut = _ssc_calculate_per_instrument_to_per_site_lut(
tsm.ssc, tsm.site_numbers
)
_, per_instrument_pass = _ssc_burst_pattern_pass_fail(
tsm.ssc, start_label, select_digital_function, timeout
)
per_site_pass = _apply_lut_per_instrument_to_per_site(
initialized_array, per_instrument_to_per_site_lut, per_instrument_pass
)
return tsm, per_site_pass
def tsm_ssc_burst_pattern(
tsm: TSMDigital,
start_label: str,
select_digital_function: bool = True,
timeout: float = 10,
wait_until_done: bool = True,
):
_ssc_burst_pattern(tsm.ssc, start_label, select_digital_function, timeout, wait_until_done)
return tsm
def tsm_ssc_get_fail_count(tsm: TSMDigital):
initialized_array = [[0 for _ in tsm.pins] for _ in tsm.site_numbers]
per_instrument_to_per_site_per_pin_lut = _ssc_calculate_per_instrument_to_per_site_per_pin_lut(
tsm.ssc, tsm.site_numbers, tsm.pins
)
_, per_instrument_failure_counts = _ssc_get_fail_count(tsm.ssc)
per_site_per_pin_fail_counts = _apply_lut_per_instrument_to_per_site_per_pin(
initialized_array,
per_instrument_to_per_site_per_pin_lut,
per_instrument_failure_counts,
)
return tsm, per_site_per_pin_fail_counts
def tsm_ssc_get_site_pass_fail(tsm: TSMDigital):
initialized_array = [False for _ in tsm.site_numbers]
per_instrument_to_per_site_lut = _ssc_calculate_per_instrument_to_per_site_lut(
tsm.ssc, tsm.site_numbers
)
_, per_instrument_pass = _ssc_get_site_pass_fail(tsm.ssc)
per_site_pass = _apply_lut_per_instrument_to_per_site(
initialized_array, per_instrument_to_per_site_lut, per_instrument_pass
)
return tsm, per_site_pass
def tsm_ssc_wait_until_done(tsm: TSMDigital, timeout: float = 10):
_ssc_wait_until_done(tsm.ssc, timeout)
return tsm
# End of Pattern Actions #
# Pin Levels and Timing #
def tsm_ssc_apply_levels_and_timing(tsm: TSMDigital, levels_sheet: str, timing_sheet: str):
_ssc_apply_levels_and_timing(tsm.ssc, levels_sheet, timing_sheet)
return tsm
def tsm_ssc_apply_tdr_offsets_per_site_per_pin(
tsm: TSMDigital, per_site_per_pin_tdr_values: typing.List[typing.List[float]]
):
(
per_site_per_pin_to_per_instrument_lut,
instrument_count,
max_sites_on_instrument,
) = _ssc_calculate_per_site_per_pin_to_per_instrument_lut(tsm.ssc, tsm.site_numbers, tsm.pins)
initialized_array = [
[0.0 for _ in range(max_sites_on_instrument)] for _ in range(instrument_count)
]
per_instrument_tdr_values = _apply_lut_per_site_per_pin_to_per_instrument(
initialized_array,
per_site_per_pin_to_per_instrument_lut,
per_site_per_pin_tdr_values,
)
_ssc_apply_tdr_offsets(tsm.ssc, per_instrument_tdr_values)
def tsm_ssc_apply_tdr_offsets(
tsm: TSMDigital, per_instrument_offsets: typing.List[typing.List[float]]
):
_ssc_apply_tdr_offsets(tsm.ssc, per_instrument_offsets)
return tsm
def tsm_ssc_configure_active_load(tsm: TSMDigital, vcom: float, iol: float, ioh: float):
_ssc_configure_active_load(tsm.ssc, vcom, iol, ioh)
return tsm
def tsm_ssc_configure_single_level_per_site(
tsm: TSMDigital,
level_type_to_set: LevelTypeToSet,
per_site_value: typing.List[float],
):
(
per_site_to_per_instrument_lut,
instrument_count,
max_sites_on_instrument,
) = _ssc_calculate_per_site_to_per_instrument_lut(tsm.ssc, tsm.site_numbers)
initialized_array = [
[0.0 for _ in range(max_sites_on_instrument)] for _ in range(instrument_count)
]
per_instrument_value = _apply_lut_per_site_to_per_instrument(
initialized_array, per_site_to_per_instrument_lut, per_site_value
)
_ssc_configure_single_level_per_site(tsm.ssc, level_type_to_set, per_instrument_value)
return tsm
def tsm_ssc_configure_single_level(
tsm: TSMDigital, level_type_to_set: LevelTypeToSet, setting: float
):
_ssc_configure_single_level(tsm.ssc, level_type_to_set, setting)
return tsm
def tsm_ssc_configure_termination_mode(tsm: TSMDigital, termination_mode: enums.TerminationMode):
_ssc_configure_termination_mode(tsm.ssc, termination_mode)
return tsm
def tsm_ssc_configure_time_set_compare_edge_per_site_per_pin(
tsm: TSMDigital,
time_set: str,
per_site_per_pin_compare_strobe: typing.List[typing.List[float]],
):
(
per_site_per_pin_to_per_instrument_lut,
instrument_count,
max_sites_on_instrument,
) = _ssc_calculate_per_site_per_pin_to_per_instrument_lut(tsm.ssc, tsm.site_numbers, tsm.pins)
initialized_array = [
[0.0 for _ in range(max_sites_on_instrument)] for _ in range(instrument_count)
]
per_instrument_compare_strobe = _apply_lut_per_site_per_pin_to_per_instrument(
initialized_array,
per_site_per_pin_to_per_instrument_lut,
per_site_per_pin_compare_strobe,
)
_ssc_configure_time_set_compare_edge_per_site_per_pin(
tsm.ssc, time_set, per_instrument_compare_strobe
)
return tsm
def tsm_ssc_configure_time_set_compare_edge_per_site(
tsm: TSMDigital, time_set: str, per_site_compare_strobe: typing.List[float]
):
(
per_site_to_per_instrument_lut,
instrument_count,
max_sites_on_instrument,
) = _ssc_calculate_per_site_to_per_instrument_lut(tsm.ssc, tsm.site_numbers)
initialized_array = [
[0.0 for _ in range(max_sites_on_instrument)] for _ in range(instrument_count)
]
per_instrument_compare_strobe = _apply_lut_per_site_to_per_instrument(
initialized_array, per_site_to_per_instrument_lut, per_site_compare_strobe
)
_ssc_configure_time_set_compare_edge_per_site(tsm.ssc, time_set, per_instrument_compare_strobe)
return tsm
def tsm_ssc_configure_time_set_compare_edge(tsm: TSMDigital, time_set: str, compare_strobe: float):
_ssc_configure_time_set_compare_edge(tsm.ssc, time_set, compare_strobe)
return tsm
def tsm_ssc_configure_time_set_period(tsm: TSMDigital, time_set: str, period: float):
_, configured_period = _ssc_configure_time_set_period(tsm.ssc, time_set, period)
return tsm, configured_period
def tsm_ssc_configure_voltage_levels(
tsm: TSMDigital, vil: float, vih: float, vol: float, voh: float, vterm: float
):
_ssc_configure_voltage_levels(tsm.ssc, vil, vih, vol, voh, vterm)
return tsm
# End of Pin Levels and Timing #
# PPMU #
def tsm_ssc_ppmu_configure_aperture_time(tsm: TSMDigital, aperture_time: float):
_ssc_ppmu_configure_aperture_time(tsm.ssc, aperture_time)
return tsm
def tsm_ssc_ppmu_configure_current_limit_range(tsm: TSMDigital, current_limit_range: float):
_ssc_ppmu_configure_current_limit_range(tsm.ssc, current_limit_range)
return tsm
def tsm_ssc_ppmu_configure_voltage_limits(
tsm: TSMDigital, voltage_limit_high: float, voltage_limit_low: float
):
_ssc_ppmu_configure_voltage_limits(tsm.ssc, voltage_limit_high, voltage_limit_low)
return tsm
def tsm_ssc_ppmu_measure_current(tsm: TSMDigital):
initialized_array = [[0.0 for _ in tsm.pins] for _ in tsm.site_numbers]
per_instrument_to_per_site_per_pin_lut = _ssc_calculate_per_instrument_to_per_site_per_pin_lut(
tsm.ssc, tsm.site_numbers, tsm.pins
)
_, per_instrument_measurements = _ssc_ppmu_measure(tsm.ssc, enums.PPMUMeasurementType.CURRENT)
per_site_per_pin_measurements = _apply_lut_per_instrument_to_per_site_per_pin(
initialized_array,
per_instrument_to_per_site_per_pin_lut,
per_instrument_measurements,
)
return tsm, per_site_per_pin_measurements
def tsm_ssc_ppmu_measure_voltage(tsm: TSMDigital):
initialized_array = [[0.0 for _ in tsm.pins] for _ in tsm.site_numbers]
per_instrument_to_per_site_per_pin_lut = _ssc_calculate_per_instrument_to_per_site_per_pin_lut(
tsm.ssc, tsm.site_numbers, tsm.pins
)
_, per_instrument_measurements = _ssc_ppmu_measure(tsm.ssc, enums.PPMUMeasurementType.VOLTAGE)
per_site_per_pin_measurements = _apply_lut_per_instrument_to_per_site_per_pin(
initialized_array,
per_instrument_to_per_site_per_pin_lut,
per_instrument_measurements,
)
return tsm, per_site_per_pin_measurements
def tsm_ssc_ppmu_source_current(
tsm: TSMDigital, current_level: float, current_level_range: float = 0
):
_ssc_ppmu_source_current(tsm.ssc, current_level, current_level_range)
return tsm
def tsm_ssc_ppmu_source_voltage_per_site_per_pin(
tsm: TSMDigital,
current_limit_range: float,
per_site_per_pin_source_voltages: typing.List[typing.List[float]],
):
(
per_site_per_pin_to_per_instrument_lut,
instrument_count,
max_sites_on_instrument,
) = _ssc_calculate_per_site_per_pin_to_per_instrument_lut(tsm.ssc, tsm.site_numbers, tsm.pins)
initialized_array = [
[0 for _ in range(max_sites_on_instrument)] for _ in range(instrument_count)
]
per_instrument_source_voltages = _apply_lut_per_site_per_pin_to_per_instrument(
initialized_array,
per_site_per_pin_to_per_instrument_lut,
per_site_per_pin_source_voltages,
)
_ssc_ppmu_source_voltage_per_site_per_pin(
tsm.ssc, current_limit_range, per_instrument_source_voltages
)
return tsm
def tsm_ssc_ppmu_source_voltage_per_site(
tsm: TSMDigital,
current_limit_range: float,
per_site_source_voltages: typing.List[float],
):
(
per_site_to_per_instrument_lut,
instrument_count,
max_sites_on_instrument,
) = _ssc_calculate_per_site_to_per_instrument_lut(tsm.ssc, tsm.site_numbers)
initialized_array = [
[0 for _ in range(max_sites_on_instrument)] for _ in range(instrument_count)
]
per_instrument_source_voltages = _apply_lut_per_site_to_per_instrument(
initialized_array, per_site_to_per_instrument_lut, per_site_source_voltages
)
_ssc_ppmu_source_voltage_per_site(tsm.ssc, current_limit_range, per_instrument_source_voltages)
return tsm
def tsm_ssc_ppmu_source_voltage(tsm: TSMDigital, voltage_level: float, current_limit_range: float):
_ssc_ppmu_source_voltage(tsm.ssc, voltage_level, current_limit_range)
return tsm
def tsm_ssc_ppmu_source(tsm: TSMDigital):
_ssc_ppmu_source(tsm.ssc)
return tsm
# End of PPMU #
# Sequencer Flags and Registers #
def tsm_ssc_read_sequencer_flag(tsm: TSMDigital, sequencer_flag: enums.SequencerFlag):
_, per_instrument_state = _ssc_read_sequencer_flag(tsm.ssc, sequencer_flag)
return tsm, per_instrument_state
def tsm_ssc_read_sequencer_register(tsm: TSMDigital, sequencer_register: enums.SequencerRegister):
_, per_instrument_register_values = _ssc_read_sequencer_register(tsm.ssc, sequencer_register)
return tsm, per_instrument_register_values
def tsm_ssc_write_sequencer_flag(
tsm: TSMDigital, sequencer_flag: enums.SequencerFlag, state: bool = True
):
_ssc_write_sequencer_flag(tsm.ssc, sequencer_flag, state)
return tsm
def tsm_ssc_write_sequencer_register(
tsm: TSMDigital, sequencer_register: enums.SequencerRegister, value: int = 0
):
_ssc_write_sequencer_register(tsm.ssc, sequencer_register, value)
return tsm
# End of Sequencer Flags and Registers #
# Session Properties #
def tsm_ssc_get_properties(tsm: TSMDigital): # checked _
session_properties: typing.List[Session_Properties] = []
for _ssc in tsm.ssc:
instrument_name = ""
match = re.search(r"[A-Za-z]+[1-9]+", str(_ssc.session))
if match:
instrument_name = match.group()
session_properties.append(
Session_Properties(
instrument_name,
_ssc.session.channels[_ssc.channel_list].voh,
_ssc.session.channels[_ssc.channel_list].vol,
_ssc.session.channels[_ssc.channel_list].vih,
_ssc.session.channels[_ssc.channel_list].vil,
_ssc.session.channels[_ssc.channel_list].vterm,
_ssc.session.channels[_ssc.channel_list].frequency_counter_measurement_time,
)
)
return tsm, session_properties
# End of Session Properties #
# Source and Capture Waveforms #
def tsm_ssc_fetch_capture_waveform(
tsm: TSMDigital, waveform_name: str, samples_to_read: int, timeout: float = 10
):
initialized_array = [[0 for _ in range(samples_to_read)] for _ in range(len(tsm.site_numbers))]
per_instrument_to_per_site_lut = _ssc_calculate_per_instrument_to_per_site_lut(
tsm.ssc, tsm.site_numbers
)
_, per_instrument_capture = _ssc_fetch_capture_waveform(
tsm.ssc, waveform_name, samples_to_read, timeout
)
per_site_waveforms = _apply_lut_per_instrument_to_per_site(
initialized_array, per_instrument_to_per_site_lut, per_instrument_capture
)
return tsm, per_site_waveforms
def tsm_ssc_write_source_waveform_broadcast(
tsm: TSMDigital,
waveform_name: str,
waveform_data: typing.List[int],
expand_to_minimum_size: bool = False,
minimum_size: int = 128,
):
_ssc_write_source_waveform_broadcast(
tsm.ssc, waveform_name, waveform_data, expand_to_minimum_size, minimum_size
)
return tsm
def tsm_ssc_write_source_waveform_site_unique(
tsm: TSMDigital,
waveform_name: str,
per_site_waveforms: typing.List[typing.List[int]],
expand_to_minimum_size: bool = False,
minimum_size: int = 128,
): # checked _
_, cols = numpy.shape(per_site_waveforms)
(
per_site_to_per_instrument_lut,
instrument_count,
max_sites_on_instrument,
) = _ssc_calculate_per_site_to_per_instrument_lut(tsm.ssc, tsm.site_numbers)
initialized_array = [
[[0 for _ in range(cols)] for _ in range(max_sites_on_instrument)]
for _ in range(instrument_count)
]
per_instrument_waveforms = _apply_lut_per_site_to_per_instrument(
initialized_array, per_site_to_per_instrument_lut, per_site_waveforms
)
_ssc_write_source_waveform_site_unique(
tsm.ssc,
waveform_name,
per_instrument_waveforms,
expand_to_minimum_size,
minimum_size,
)
return tsm
# End of Source and Capture Waveforms #
# SSC Digital #
# Clock Generation #
def _ssc_clock_generator_abort(ssc: typing.List[SSCDigital]):
for _ssc in ssc:
_ssc.session.channels[_ssc.channel_list].clock_generator_abort()
return ssc
def _ssc_clock_generator_generate_clock(
ssc: typing.List[SSCDigital], frequency: float, select_digital_function: bool = True
):
for _ssc in ssc:
_ssc.session.channels[_ssc.channel_list].clock_generator_generate_clock(
frequency, select_digital_function
)
return ssc
def _ssc_modify_time_set_for_clock_generation(
ssc: typing.List[SSCDigital], frequency: float, duty_cycle: float, time_set: str
):
period = 1 / frequency
for _ssc in ssc:
_ssc.session.configure_time_set_period(time_set, period)
_ssc.session.channels[_ssc.channel_list].configure_time_set_drive_edges(
time_set,
enums.DriveFormat.RL,
0,
0,
period * duty_cycle,
period * duty_cycle,
)
return ssc
# End of Clock Generation #
# Configuration #
def _ssc_select_function(ssc: typing.List[SSCDigital], function: enums.SelectedFunction):
for _ssc in ssc:
_ssc.session.abort()
_ssc.session.channels[_ssc.channel_list].selected_function = function
return ssc
# End of Configuration #
# Frequency Measurement #
def _ssc_frequency_counter_configure_measurement_time(
ssc: typing.List[SSCDigital], measurement_time: float
):
for _ssc in ssc:
_ssc.session.channels[
_ssc.channel_list
].frequency_counter_measurement_time = measurement_time
return ssc
def _ssc_frequency_counter_measure_frequency(ssc: typing.List[SSCDigital]):
per_instrument_frequencies: typing.List[typing.List[float]] = []
for _ssc in ssc:
per_instrument_frequencies.append(
_ssc.session.channels[_ssc.channel_list].frequency_counter_measure_frequency()
)
return ssc, per_instrument_frequencies
# End of Frequency Measurement #
# HRAM #
def _ssc_configure_hram_settings(
ssc: typing.List[SSCDigital],
cycles_to_acquire: enums.HistoryRAMCyclesToAcquire = enums.HistoryRAMCyclesToAcquire.FAILED,
pretrigger_samples: int = 0,
max_samples_to_acquire_per_site: int = 8191,
number_of_samples_is_finite: bool = True,
buffer_size_per_site: int = 32000,
):
for _ssc in ssc:
_ssc.session.history_ram_cycles_to_acquire = cycles_to_acquire
_ssc.session.history_ram_pretrigger_samples = pretrigger_samples
_ssc.session.history_ram_max_samples_to_acquire_per_site = max_samples_to_acquire_per_site
_ssc.session.history_ram_number_of_samples_is_finite = number_of_samples_is_finite
_ssc.session.history_ram_buffer_size_per_site = buffer_size_per_site
return ssc
def _ssc_configure_hram_trigger(
ssc: typing.List[SSCDigital],
triggers_type: enums.HistoryRAMTriggerType,
cycle_number: int = 0,
pattern_label: str = "",
cycle_offset: int = 0,
vector_offset: int = 0,
):
for _ssc in ssc:
if triggers_type == enums.HistoryRAMTriggerType.FIRST_FAILURE:
_ssc.session.history_ram_trigger_type = triggers_type
elif triggers_type == enums.HistoryRAMTriggerType.CYCLE_NUMBER:
_ssc.session.history_ram_trigger_type = triggers_type
_ssc.session.cycle_number_history_ram_trigger_cycle_number = cycle_number
elif triggers_type == enums.HistoryRAMTriggerType.PATTERN_LABEL:
_ssc.session.history_ram_trigger_type = triggers_type
_ssc.session.pattern_label_history_ram_trigger_label = pattern_label
_ssc.session.pattern_label_history_ram_trigger_cycle_offset = cycle_offset
_ssc.session.pattern_label_history_ram_trigger_vector_offset = vector_offset
return ssc
def _ssc_get_hram_settings(ssc: typing.List[SSCDigital]):
per_instrument_cycles_to_acquire: typing.List[enums.HistoryRAMCyclesToAcquire] = []
per_instrument_pretrigger_samples: typing.List[int] = []
per_instrument_max_samples_to_acquire_per_site: typing.List[int] = []
per_instrument_number_of_samples_is_finite: typing.List[bool] = []
per_instrument_buffer_size_per_site: typing.List[int] = []
for _ssc in ssc:
per_instrument_cycles_to_acquire.append(_ssc.session.history_ram_cycles_to_acquire)
per_instrument_pretrigger_samples.append(_ssc.session.history_ram_pretrigger_samples)
per_instrument_max_samples_to_acquire_per_site.append(
_ssc.session.history_ram_max_samples_to_acquire_per_site
)
per_instrument_number_of_samples_is_finite.append(
_ssc.session.history_ram_number_of_samples_is_finite
)
per_instrument_buffer_size_per_site.append(_ssc.session.history_ram_buffer_size_per_site)
return (
ssc,
per_instrument_cycles_to_acquire,
per_instrument_pretrigger_samples,
per_instrument_max_samples_to_acquire_per_site,
per_instrument_number_of_samples_is_finite,
per_instrument_buffer_size_per_site,
)
def _ssc_get_hram_trigger_settings(ssc: typing.List[SSCDigital]):
per_instrument_triggers_type: typing.List[enums.HistoryRAMTriggerType] = []
per_instrument_cycle_number: typing.List[int] = []
per_instrument_pattern_label: typing.List[str] = []
per_instrument_cycle_offset: typing.List[int] = []
per_instrument_vector_offset: typing.List[int] = []
for _ssc in ssc:
per_instrument_triggers_type.append(_ssc.session.history_ram_trigger_type)
per_instrument_cycle_number.append(
_ssc.session.cycle_number_history_ram_trigger_cycle_number
)
per_instrument_pattern_label.append(_ssc.session.pattern_label_history_ram_trigger_label)
per_instrument_cycle_offset.append(
_ssc.session.pattern_label_history_ram_trigger_cycle_offset
)
per_instrument_vector_offset.append(
_ssc.session.pattern_label_history_ram_trigger_vector_offset
)
return (
ssc,
per_instrument_triggers_type,
per_instrument_cycle_number,
per_instrument_pattern_label,
per_instrument_cycle_offset,
per_instrument_vector_offset,
)
def _ssc_stream_hram_results(ssc: typing.List[SSCDigital]):
per_instrument_per_site_array: typing.List[SSCDigital] = []
for _ssc in ssc:
channel_list_array, site_list_array, _ = _arrange_channels_per_site(
_ssc.channel_list, _ssc.site_list
)
for channel, site in zip(channel_list_array, site_list_array):
per_instrument_per_site_array.append(SSCDigital(_ssc.session, channel, site))
per_instrument_per_site_cycle_information: typing.List[
typing.List[HistoryRAMCycleInformation]
] = []
number_of_samples = 0
for _ssc in per_instrument_per_site_array:
cycle_information: typing.List[HistoryRAMCycleInformation] = []
read_position = 0
sum_of_samples_to_read = 0
stop = False
while not stop:
done = _ssc.session.is_done()
_, pins, _ = _channel_list_to_pins(_ssc.channel_list)
sample_count = _ssc.session.sites[_ssc.site_list].get_history_ram_sample_count()
samples_to_read = sample_count - read_position
cycle_information += (
_ssc.session.sites[_ssc.site_list]
.pins[pins]
.fetch_history_ram_cycle_information(read_position, samples_to_read)
)
read_position = sample_count
sum_of_samples_to_read += samples_to_read
if not samples_to_read and done:
stop = True
per_instrument_per_site_cycle_information.append(cycle_information)
number_of_samples = max(number_of_samples, sum_of_samples_to_read)
return ssc, per_instrument_per_site_cycle_information, number_of_samples
# End of HRAM #
# Pattern Actions #
def _ssc_abort(ssc: typing.List[SSCDigital]):
for _ssc in ssc:
_ssc.session.abort()
return ssc
def _ssc_burst_pattern_pass_fail(
ssc: typing.List[SSCDigital],
start_label: str,
select_digital_function: bool = True,
timeout: float = 10,
):
per_instrument_pass: typing.List[typing.List[bool]] = []
for _ssc in ssc:
per_instrument_pass.append(
list(
_ssc.session.sites[_ssc.site_list]
.burst_pattern(start_label, select_digital_function, True, timeout)
.values()
)
)
return ssc, per_instrument_pass
def _ssc_burst_pattern(
ssc: typing.List[SSCDigital],
start_label: str,
select_digital_function: bool = True,
timeout: float = 10,
wait_until_done: bool = True,
):
for _ssc in ssc:
_ssc.session.sites[_ssc.site_list].burst_pattern(
start_label, select_digital_function, wait_until_done, timeout
)
return ssc
def _ssc_get_fail_count(ssc: typing.List[SSCDigital]):
per_instrument_failure_counts: typing.List[typing.List[int]] = []
for _ssc in ssc:
per_instrument_failure_counts.append(
_ssc.session.channels[_ssc.channel_list].get_fail_count()
)
return ssc, per_instrument_failure_counts
def _ssc_get_site_pass_fail(ssc: typing.List[SSCDigital]):
per_instrument_pass: typing.List[typing.List[bool]] = []
for _ssc in ssc:
per_instrument_pass.append(
list(_ssc.session.sites[_ssc.site_list].get_site_pass_fail().values())
)
return ssc, per_instrument_pass
def _ssc_wait_until_done(ssc: typing.List[SSCDigital], timeout: float = 10):
for _ssc in ssc:
_ssc.session.wait_until_done(timeout)
return ssc
# End of Pattern Actions #
# Pin Levels and Timing #
def _ssc_apply_levels_and_timing(
ssc: typing.List[SSCDigital], levels_sheet: str, timing_sheet: str
):
for _ssc in ssc:
_ssc.session.sites[_ssc.site_list].apply_levels_and_timing(levels_sheet, timing_sheet)
return ssc
def _ssc_apply_tdr_offsets(
ssc: typing.List[SSCDigital],
per_instrument_offsets: typing.List[typing.List[float]],
):
for _ssc, per_instrument_offset in zip(ssc, per_instrument_offsets):
_ssc.session.channels[_ssc.channel_list].apply_tdr_offsets(per_instrument_offset)
return ssc
def _ssc_configure_active_load(ssc: typing.List[SSCDigital], vcom: float, iol: float, ioh: float):
for _ssc in ssc:
_ssc.session.channels[_ssc.channel_list].configure_active_load_levels(iol, ioh, vcom)
return ssc
def _ssc_configure_single_level_per_site(
ssc: typing.List[SSCDigital],
level_type_to_set: LevelTypeToSet,
per_site_value: typing.List[typing.List[float]],
):
for _ssc, settings in zip(ssc, per_site_value):
channel_list_array, _, _ = _arrange_channels_per_site(_ssc.channel_list, _ssc.site_list)
for channel, setting in zip(channel_list_array, settings):
if level_type_to_set == LevelTypeToSet.VIL:
_ssc.session.channels[channel].vil = setting
elif level_type_to_set == LevelTypeToSet.VIH:
_ssc.session.channels[channel].vih = setting
elif level_type_to_set == LevelTypeToSet.VOL:
_ssc.session.channels[channel].vol = setting
elif level_type_to_set == LevelTypeToSet.VOH:
_ssc.session.channels[channel].voh = setting
elif level_type_to_set == LevelTypeToSet.VTERM:
_ssc.session.channels[channel].vterm = setting
elif level_type_to_set == LevelTypeToSet.LOL:
_ssc.session.channels[channel].lol = setting
elif level_type_to_set == LevelTypeToSet.LOH:
_ssc.session.channels[channel].loh = setting
elif level_type_to_set == LevelTypeToSet.VCOM:
_ssc.session.channels[channel].vcom = setting
_ssc.session.commit()
return ssc
def _ssc_configure_single_level(
ssc: typing.List[SSCDigital], level_type_to_set: LevelTypeToSet, setting: float
):
for _ssc in ssc:
if level_type_to_set == LevelTypeToSet.VIL:
_ssc.session.channels[_ssc.channel_list].vil = setting
elif level_type_to_set == LevelTypeToSet.VIH:
_ssc.session.channels[_ssc.channel_list].vih = setting
elif level_type_to_set == LevelTypeToSet.VOL:
_ssc.session.channels[_ssc.channel_list].vol = setting
elif level_type_to_set == LevelTypeToSet.VOH:
_ssc.session.channels[_ssc.channel_list].voh = setting
elif level_type_to_set == LevelTypeToSet.VTERM:
_ssc.session.channels[_ssc.channel_list].vterm = setting
elif level_type_to_set == LevelTypeToSet.LOL:
_ssc.session.channels[_ssc.channel_list].lol = setting
elif level_type_to_set == LevelTypeToSet.LOH:
_ssc.session.channels[_ssc.channel_list].loh = setting
elif level_type_to_set == LevelTypeToSet.VCOM:
_ssc.session.channels[_ssc.channel_list].vcom = setting
_ssc.session.commit()
return ssc
def _ssc_configure_termination_mode(
ssc: typing.List[SSCDigital], termination_mode: enums.TerminationMode
):
for _ssc in ssc:
_ssc.session.channels[_ssc.channel_list].termination_mode = termination_mode
return ssc
def _ssc_configure_time_set_compare_edge_per_site_per_pin(
ssc: typing.List[SSCDigital],
time_set: str,
per_site_per_pin_compare_strobe: typing.List[typing.List[float]],
):
for _ssc, compare_strobes in zip(ssc, per_site_per_pin_compare_strobe):
channels, _, _ = _channel_list_to_pins(_ssc.channel_list)
for channel, compare_strobe in zip(channels, compare_strobes):
_ssc.session.channels[channel].configure_time_set_compare_edges_strobe(
time_set, compare_strobe
)
return ssc
def _ssc_configure_time_set_compare_edge_per_site(
ssc: typing.List[SSCDigital],
time_set: str,
per_site_compare_strobe: typing.List[typing.List[float]],
):
for _ssc, compare_strobes in zip(ssc, per_site_compare_strobe):
channel_list_array, _, _ = _arrange_channels_per_site(_ssc.channel_list, _ssc.site_list)
for channel, compare_strobe in zip(channel_list_array, compare_strobes):
_ssc.session.channels[channel].configure_time_set_compare_edges_strobe(
time_set, compare_strobe
)
return ssc
def _ssc_configure_time_set_compare_edge(
ssc: typing.List[SSCDigital], time_set: str, compare_strobe: float
):
for _ssc in ssc:
_ssc.session.channels[_ssc.channel_list].configure_time_set_compare_edges_strobe(
time_set, compare_strobe
)
return ssc
def _ssc_configure_time_set_period(ssc: typing.List[SSCDigital], time_set: str, period: float):
configured_period = period
if configured_period > 40e-6:
configured_period = 40e-6
elif configured_period < 10e-9:
configured_period = 10e-9
for _ssc in ssc:
_ssc.session.configure_time_set_period(time_set, configured_period)
return ssc, configured_period
def _ssc_configure_voltage_levels(
ssc: typing.List[SSCDigital],
vil: float,
vih: float,
vol: float,
voh: float,
vterm: float,
):
for _ssc in ssc:
_ssc.session.channels[_ssc.channel_list].configure_voltage_levels(vil, vih, vol, voh, vterm)
return ssc
# End of Pin Levels and Timing #
# PPMU #
def _ssc_ppmu_configure_aperture_time(ssc: typing.List[SSCDigital], aperture_time: float):
for _ssc in ssc:
_ssc.session.channels[_ssc.channel_list].ppmu_aperture_time = aperture_time
return ssc
def _ssc_ppmu_configure_current_limit_range(
ssc: typing.List[SSCDigital], current_limit_range: float
):
current_limit_range = abs(current_limit_range)
for _ssc in ssc:
_ssc.session.channels[_ssc.channel_list].ppmu_current_limit_range = current_limit_range
return ssc
def _ssc_ppmu_configure_voltage_limits(
ssc: typing.List[SSCDigital], voltage_limit_high: float, voltage_limit_low: float
):
for _ssc in ssc:
_ssc.session.channels[_ssc.channel_list].ppmu_voltage_limit_high = voltage_limit_high
_ssc.session.channels[_ssc.channel_list].ppmu_voltage_limit_low = voltage_limit_low
return ssc
def _ssc_ppmu_measure(ssc: typing.List[SSCDigital], measurement_type: enums.PPMUMeasurementType):
per_instrument_measurements: typing.List[typing.List[float]] = []
for _ssc in ssc:
per_instrument_measurements.append(
_ssc.session.channels[_ssc.channel_list].ppmu_measure(measurement_type)
)
return ssc, per_instrument_measurements
def _ssc_ppmu_source_current(
ssc: typing.List[SSCDigital], current_level: float, current_level_range: float = 0
):
if current_level_range == 0:
current_level_range = abs(current_level)
if current_level_range > 32e-3:
current_level_range = 32e-3
elif current_level_range < 2e-6:
current_level_range = 2e-6
for _ssc in ssc:
_ssc.session.channels[
_ssc.channel_list
].ppmu_output_function = enums.PPMUOutputFunction.CURRENT
_ssc.session.channels[_ssc.channel_list].ppmu_current_level_range = current_level_range
_ssc.session.channels[_ssc.channel_list].ppmu_current_level = current_level
_ssc.session.channels[_ssc.channel_list].ppmu_source()
return ssc
def _ssc_ppmu_source_voltage_per_site_per_pin(
ssc: typing.List[SSCDigital],
current_limit_range: float,
per_site_per_pin_source_voltages: typing.List[typing.List[float]],
):
current_limit_range = abs(current_limit_range)
for _ssc, source_voltages in zip(ssc, per_site_per_pin_source_voltages):
_ssc.session.channels[
_ssc.channel_list
].ppmu_output_function = enums.PPMUOutputFunction.VOLTAGE
_ssc.session.channels[_ssc.channel_list].ppmu_current_limit_range = current_limit_range
channels, _, _ = _channel_list_to_pins(_ssc.channel_list)
for channel, source_voltage in zip(channels, source_voltages):
_ssc.session.channels[channel].ppmu_voltage_level = source_voltage
_ssc.session.channels[_ssc.channel_list].ppmu_source()
return ssc
def _ssc_ppmu_source_voltage_per_site(
ssc: typing.List[SSCDigital],
current_limit_range: float,
per_site_source_voltages: typing.List[typing.List[float]],
):
current_limit_range = abs(current_limit_range)
for _ssc, source_voltages in zip(ssc, per_site_source_voltages):
_ssc.session.channels[
_ssc.channel_list
].ppmu_output_function = enums.PPMUOutputFunction.VOLTAGE
_ssc.session.channels[_ssc.channel_list].ppmu_current_limit_range = current_limit_range
channel_list_array, _, _ = _arrange_channels_per_site(_ssc.channel_list, _ssc.site_list)
for channel, source_voltage in zip(channel_list_array, source_voltages):
_ssc.session.channels[channel].ppmu_voltage_level = source_voltage
_ssc.session.channels[_ssc.channel_list].ppmu_source()
return ssc
def _ssc_ppmu_source_voltage(
ssc: typing.List[SSCDigital], voltage_level: float, current_limit_range: float
):
"""
Current limit is not configured here
The PXIe-6570 and PXIe-6571 do not support current limits in PPMU voltage mode:
http://zone.ni.com/reference/en-XX/help/375145e/nidigitalpropref/pnidigital_ppmucurrentlimit/
"""
current_limit_range = abs(current_limit_range)
for _ssc in ssc:
_ssc.session.channels[
_ssc.channel_list
].ppmu_output_function = enums.PPMUOutputFunction.VOLTAGE
_ssc.session.channels[_ssc.channel_list].ppmu_current_limit_range = current_limit_range
_ssc.session.channels[_ssc.channel_list].ppmu_voltage_level = voltage_level
_ssc.session.channels[_ssc.channel_list].ppmu_source()
return ssc
def _ssc_ppmu_source(ssc: typing.List[SSCDigital]):
for _ssc in ssc:
_ssc.session.channels[_ssc.channel_list].ppmu_source()
return ssc
# End of PPMU #
# Sequencer Flags and Registers #
def _ssc_read_sequencer_flag(ssc: typing.List[SSCDigital], sequencer_flag: enums.SequencerFlag):
per_instrument_state: typing.List[bool] = []
for _ssc in ssc:
per_instrument_state.append(_ssc.session.read_sequencer_flag(sequencer_flag))
return ssc, per_instrument_state
def _ssc_read_sequencer_register(
ssc: typing.List[SSCDigital], sequencer_register: enums.SequencerRegister
):
per_instrument_register_values: typing.List[int] = []
for _ssc in ssc:
per_instrument_register_values.append(
_ssc.session.read_sequencer_register(sequencer_register)
)
return ssc, per_instrument_register_values
def _ssc_write_sequencer_flag(
ssc: typing.List[SSCDigital],
sequencer_flag: enums.SequencerFlag,
state: bool = True,
):
for _ssc in ssc:
_ssc.session.write_sequencer_flag(sequencer_flag, state)
return ssc
def _ssc_write_sequencer_register(
ssc: typing.List[SSCDigital],
sequencer_register: enums.SequencerRegister,
value: int = 0,
):
for _ssc in ssc:
_ssc.session.write_sequencer_register(sequencer_register, value)
return ssc
# End of Sequencer Flags and Registers #
# Source and Capture Waveforms #
def _ssc_fetch_capture_waveform(
ssc: typing.List[SSCDigital],
waveform_name: str,
samples_to_read: int,
timeout: float = 10,
):
per_instrument_capture: typing.List[typing.List[typing.List[int]]] = []
for _ssc in ssc:
waveforms = _ssc.session.sites[_ssc.site_list].fetch_capture_waveform(
waveform_name, samples_to_read, timeout
)
per_instrument_capture.append([list(waveforms[i]) for i in waveforms.keys()])
return ssc, per_instrument_capture
def _ssc_write_source_waveform_broadcast(
ssc: typing.List[SSCDigital],
waveform_name: str,
waveform_data: typing.List[int],
expand_to_minimum_size: bool = False,
minimum_size: int = 128,
):
if minimum_size > len(waveform_data) and expand_to_minimum_size:
initialized_array = [0 for _ in range(minimum_size)]
for i in range(len(waveform_data)):
initialized_array[i] = waveform_data[i]
waveform_data = initialized_array
for _ssc in ssc:
_ssc.session.write_source_waveform_broadcast(waveform_name, waveform_data)
return ssc
def _ssc_write_source_waveform_site_unique(
ssc: typing.List[SSCDigital],
waveform_name: str,
per_instrument_waveforms: typing.List[typing.List[typing.List[int]]],
expand_to_minimum_size: bool = False,
minimum_size: int = 128,
):
for _ssc, per_instrument_waveform in zip(ssc, per_instrument_waveforms):
rows, cols = numpy.shape(per_instrument_waveform)
site_numbers, _ = _site_list_to_site_numbers(_ssc.site_list)
if minimum_size > cols and expand_to_minimum_size:
initialized_array = [[0 for _ in range(minimum_size)] for _ in range(len(site_numbers))]
for row in range(rows):
for col in range(cols):
initialized_array[row][col] = per_instrument_waveform[row][col]
per_instrument_waveform = initialized_array
waveform_data = {}
for site_number, waveform in zip(site_numbers, per_instrument_waveform):
waveform_data[site_number] = waveform
_ssc.session.write_source_waveform_site_unique(waveform_name, waveform_data)
return ssc
# End of Source and Capture Waveforms #
# Static #
def _ssc_read_static(ssc: typing.List[SSCDigital]):
per_instrument_data: typing.List[typing.List[enums.PinState]] = []
for _ssc in ssc:
per_instrument_data.append(_ssc.session.channels[_ssc.channel_list].read_static())
return ssc, per_instrument_data
def _ssc_write_static(ssc: typing.List[SSCDigital], state: enums.WriteStaticPinState):
for _ssc in ssc:
_ssc.session.channels[_ssc.channel_list].write_static(state)
return ssc
def _ssc_write_static_per_site_per_pin(
ssc: typing.List[SSCDigital],
per_site_per_pin_state: typing.List[typing.List[enums.WriteStaticPinState]],
):
for _ssc, states in zip(ssc, per_site_per_pin_state):
channels, _, _ = _channel_list_to_pins(_ssc.channel_list)
for channel, state in zip(channels, states):
_ssc.session.channels[channel].write_static(state)
return ssc
def _ssc_write_static_per_site(
ssc: typing.List[SSCDigital],
per_site_state: typing.List[typing.List[enums.WriteStaticPinState]],
):
for _ssc, states in zip(ssc, per_site_state):
channel_list_array, _, _ = _arrange_channels_per_site(_ssc.channel_list, _ssc.site_list)
for channel, state in zip(channel_list_array, states):
_ssc.session.channels[channel].write_static(state)
return ssc
# End of Static #
# Trigger #
def _ssc_clear_start_trigger_signal(ssc: typing.List[SSCDigital]):
for _ssc in ssc:
_ssc.session.start_trigger_type = enums.TriggerType.NONE
return ssc
def _ssc_configure_trigger_signal(
ssc: typing.List[SSCDigital],
source: str,
edge: enums.DigitalEdge = enums.DigitalEdge.RISING,
):
for _ssc in ssc:
_ssc.session.digital_edge_start_trigger_source = source
_ssc.session.digital_edge_start_trigger_edge = edge
return ssc
def _ssc_export_opcode_trigger_signal(
ssc: typing.List[SSCDigital], signal_id: str, output_terminal: str = ""
):
for _ssc in ssc:
_ssc.session.pattern_opcode_events[
signal_id
].exported_pattern_opcode_event_output_terminal = output_terminal
return ssc
# End of Trigger #
def _ssc_filter_sites(ssc: typing.List[SSCDigital], desired_sites: typing.List[int]):
ssc_with_requested_sites: typing.List[SSCDigital] = []
for _ssc in ssc:
channel_list_array, site_list_array, site_numbers = _arrange_channels_per_site(
_ssc.channel_list, _ssc.site_list
)
channel_list: typing.List[str] = []
site_list: typing.List[str] = []
for _channel_list, _site_list, site_number in zip(
channel_list_array, site_list_array, site_numbers
):
if site_number in desired_sites:
channel_list.append(_channel_list)
site_list.append(_site_list)
if site_list:
ssc_with_requested_sites.append(
SSCDigital(_ssc.session, ",".join(channel_list), ",".join(site_list))
)
return ssc_with_requested_sites
def _ssc_initiate(ssc: typing.List[SSCDigital]):
for _ssc in ssc:
_ssc.session.initiate()
return ssc
# End of SSC Digital #
# Static #
def tsm_ssc_read_static(tsm: TSMDigital):
initialized_array = [[enums.PinState.ZERO for _ in tsm.pins] for _ in tsm.site_numbers]
per_instrument_to_per_site_per_pin_lut = _ssc_calculate_per_instrument_to_per_site_per_pin_lut(
tsm.ssc, tsm.site_numbers, tsm.pins
)
_, per_instrument_data = _ssc_read_static(tsm.ssc)
per_site_per_pin_data = _apply_lut_per_instrument_to_per_site_per_pin(
initialized_array, per_instrument_to_per_site_per_pin_lut, per_instrument_data
)
return tsm, per_site_per_pin_data
def tsm_ssc_write_static_per_site_per_pin(
tsm: TSMDigital,
per_site_per_pin_state: typing.List[typing.List[enums.WriteStaticPinState]],
):
(
per_site_per_pin_to_per_instrument_lut,
instrument_count,
max_sites_on_instrument,
) = _ssc_calculate_per_site_per_pin_to_per_instrument_lut(tsm.ssc, tsm.site_numbers, tsm.pins)
initialized_array = [
[enums.WriteStaticPinState.ZERO for _ in range(max_sites_on_instrument)]
for _ in range(instrument_count)
]
per_instrument_state = _apply_lut_per_site_per_pin_to_per_instrument(
initialized_array,
per_site_per_pin_to_per_instrument_lut,
per_site_per_pin_state,
)
_ssc_write_static_per_site_per_pin(tsm.ssc, per_instrument_state)
return tsm
def tsm_ssc_write_static_per_site(
tsm: TSMDigital, per_site_state: typing.List[enums.WriteStaticPinState]
):
(
per_site_to_per_instrument_lut,
instrument_count,
max_sites_on_instrument,
) = _ssc_calculate_per_site_to_per_instrument_lut(tsm.ssc, tsm.site_numbers)
initialized_array = [
[enums.WriteStaticPinState.X for _ in range(max_sites_on_instrument)]
for _ in range(instrument_count)
]
per_instrument_state = _apply_lut_per_site_to_per_instrument(
initialized_array, per_site_to_per_instrument_lut, per_site_state
)
_ssc_write_static_per_site(tsm.ssc, per_instrument_state)
return tsm
def tsm_ssc_write_static(tsm: TSMDigital, state: enums.WriteStaticPinState):
_ssc_write_static(tsm.ssc, state)
return tsm
# End of Static #
# Subroutines #
def _apply_lut_per_instrument_to_per_site_per_pin(
initialized_array: typing.List[typing.List[typing.Any]],
lut: typing.List[Location_2D_Array],
results_to_apply_lut_to: typing.List[typing.List[typing.Any]],
):
array_out = copy.deepcopy(initialized_array)
for _lut, _results_to_apply_lut_to in zip(lut, results_to_apply_lut_to):
for location, result in zip(_lut.location_2d_array, _results_to_apply_lut_to):
array_out[location.row][location.col] = result
return array_out
def _apply_lut_per_instrument_to_per_site(
initialized_array: typing.List[typing.Any],
lut: typing.List[Location_1D_Array],
results_to_apply_lut_to: typing.List[typing.List[typing.Any]],
):
array_out = copy.deepcopy(initialized_array)
for _lut, _results_to_apply_lut_to in zip(lut, results_to_apply_lut_to):
for index, result in zip(_lut.location_1d_array, _results_to_apply_lut_to):
array_out[index] = result
return array_out
def _apply_lut_per_site_per_pin_to_per_instrument(
initialized_array: typing.List[typing.List[typing.Any]],
lut: typing.List[typing.List[Location_2D]],
results_to_apply_lut_to: typing.List[typing.List[typing.Any]],
):
array_out = copy.deepcopy(initialized_array)
for _lut, _results_to_apply_lut_to in zip(lut, results_to_apply_lut_to):
for location, result in zip(_lut, _results_to_apply_lut_to):
array_out[location.row][location.col] = result
return array_out
def _apply_lut_per_site_to_per_instrument(
initialized_array: typing.List[typing.List[typing.Any]],
lut: typing.List[Location_2D],
results_to_apply_lut_to: typing.List[typing.Any],
):
array_out = copy.deepcopy(initialized_array)
for location, result in zip(lut, results_to_apply_lut_to):
array_out[location.row][location.col] = result
return array_out
def _arrange_channels_per_site(channel_list_string: str, site_list_string: str):
site_numbers, site_list_array = _site_list_to_site_numbers(site_list_string)
channels, _, sites = _channel_list_to_pins(channel_list_string)
channel_list_array: typing.List[str] = []
for site_number in site_numbers:
channel_list: typing.List[str] = []
for channel, site in zip(channels, sites):
if site_number == site:
channel_list.append(channel)
channel_list_array.append(",".join(channel_list))
return channel_list_array, site_list_array, site_numbers
def _site_list_to_site_numbers(site_list: str):
sites = re.split(r"\s*,\s*", site_list)
site_numbers = [int(re.match(r"site(\d+)", site).group(1)) for site in sites]
return site_numbers, sites
def _channel_list_to_pins(channel_list: str):
channels = re.split(r"\s*,\s*", channel_list)
sites = [-1] * len(channels)
pins = channels[:]
for i in range(len(channels)):
try:
site, pins[i] = re.split(r"[/\\]", channels[i])
except ValueError:
pass
else:
sites[i] = int(re.match(r"site(\d+)", site).group(1))
return channels, pins, sites
def _ssc_calculate_per_instrument_per_site_to_per_site_lut(
ssc: typing.List[SSCDigital], sites: typing.List[int]
):
per_instrument_per_site_to_per_site_lut: typing.List[Location_1D_Array] = []
for _ssc in ssc:
site_numbers, _ = _site_list_to_site_numbers(_ssc.site_list)
array: typing.List[Location_1D_Array] = []
for site_number in site_numbers:
array.append(Location_1D_Array([sites.index(site_number)]))
per_instrument_per_site_to_per_site_lut += array
return per_instrument_per_site_to_per_site_lut
def _ssc_calculate_per_instrument_to_per_site_lut(
ssc: typing.List[SSCDigital], sites: typing.List[int]
):
per_instrument_to_per_site_lut: typing.List[Location_1D_Array] = []
for _ssc in ssc:
site_numbers, _ = _site_list_to_site_numbers(_ssc.site_list)
array: typing.List[int] = []
for site_number in site_numbers:
array.append(sites.index(site_number))
per_instrument_to_per_site_lut.append(Location_1D_Array(array))
return per_instrument_to_per_site_lut
def _ssc_calculate_per_instrument_to_per_site_per_pin_lut(
ssc: typing.List[SSCDigital], sites: typing.List[int], pins: typing.List[str]
):
per_instrument_to_per_site_per_pin_lut: typing.List[Location_2D_Array] = []
for _ssc in ssc:
_, _pins, _sites = _channel_list_to_pins(_ssc.channel_list)
array: typing.List[Location_2D] = []
for pin, site in zip(_pins, _sites):
array.append(Location_2D(sites.index(site), pins.index(pin)))
per_instrument_to_per_site_per_pin_lut.append(Location_2D_Array(array))
return per_instrument_to_per_site_per_pin_lut
def _ssc_calculate_per_site_per_pin_to_per_instrument_lut(
ssc: typing.List[SSCDigital], sites: typing.List[int], pins: typing.List[str]
):
max_sites_on_instrument = 0
instrument_count = len(ssc)
i = 0
location_2d_array: typing.List[Location_2D] = []
pins_sites_array: typing.List[typing.Any] = []
per_site_per_pin_to_per_instrument_lut: typing.List[typing.List[Location_2D]] = []
for _ssc in ssc:
_, _pins, _sites = _channel_list_to_pins(_ssc.channel_list)
pins_sites_array += list(map(list, zip(_pins, _sites)))
max_sites_on_instrument = max(max_sites_on_instrument, len(_pins))
location_2d_array += [Location_2D(i, j) for j in range(len(_pins))]
i += 1
for site in sites:
array: typing.List[Location_2D] = []
for pin in pins:
index = pins_sites_array.index([pin, site])
array.append(location_2d_array[index])
per_site_per_pin_to_per_instrument_lut.append(array)
return (
per_site_per_pin_to_per_instrument_lut,
instrument_count,
max_sites_on_instrument,
)
def _ssc_calculate_per_site_to_per_instrument_lut(
ssc: typing.List[SSCDigital], sites: typing.List[int]
):
max_sites_on_instrument = 0
instrument_count = len(ssc)
i = 0
location_2d_array: typing.List[Location_2D] = []
sites_array: typing.List[int] = []
per_site_to_per_instrument_lut: typing.List[Location_2D] = []
for _ssc in ssc:
site_numbers, _ = _site_list_to_site_numbers(_ssc.site_list)
sites_array += site_numbers
max_sites_on_instrument = max(max_sites_on_instrument, len(site_numbers))
location_2d_array += [Location_2D(i, j) for j in range(len(site_numbers))]
i += 1
for site in sites:
index = sites_array.index(site)
per_site_to_per_instrument_lut.append(location_2d_array[index])
return per_site_to_per_instrument_lut, instrument_count, max_sites_on_instrument
# End of Subroutines #
# TSM #
def tsm_close_sessions(tsm_context: SemiconductorModuleContext):
sessions = tsm_context.get_all_nidigital_sessions()
for session in sessions:
session.reset()
session.close()
def tsm_initialize_sessions(tsm_context: SemiconductorModuleContext, option_string: str = ""):
instrument_names = tsm_context.get_all_nidigital_instrument_names()
if instrument_names:
pin_map_file_path = tsm_context.pin_map_file_path
specifications_file_paths = tsm_context.nidigital_project_specifications_file_paths
levels_file_paths = tsm_context.nidigital_project_levels_file_paths
timing_file_paths = tsm_context.nidigital_project_timing_file_paths
pattern_file_paths = tsm_context.nidigital_project_pattern_file_paths
source_waveform_file_paths = tsm_context.nidigital_project_source_waveform_file_paths
capture_waveform_file_paths = tsm_context.nidigital_project_capture_waveform_file_paths
for instrument_name in instrument_names:
session = nidigital.Session(instrument_name, options=option_string)
tsm_context.set_nidigital_session(instrument_name, session)
session.load_pin_map(pin_map_file_path)
session.load_specifications_levels_and_timing(
specifications_file_paths, levels_file_paths, timing_file_paths
)
session.unload_all_patterns()
for pattern_file_path in pattern_file_paths:
session.load_pattern(pattern_file_path)
for capture_waveform_file_path in capture_waveform_file_paths:
filename = os.path.basename(capture_waveform_file_path)
waveform_name, _ = filename.split(".")
session.create_capture_waveform_from_file_digicapture(
waveform_name, capture_waveform_file_path
)
for source_waveform_file_path in source_waveform_file_paths:
filename = os.path.basename(source_waveform_file_path)
waveform_name, _ = filename.split(".")
session.create_source_waveform_from_file_tdms(
waveform_name, source_waveform_file_path, False
)
def tsm_ssc_1_pin_to_n_sessions(tsm_context: SemiconductorModuleContext, pin: str):
tsm = tsm_ssc_n_pins_to_m_sessions(tsm_context, [pin])
return tsm
def tsm_ssc_n_pins_to_m_sessions(
tsm_context: SemiconductorModuleContext,
pins: typing.List[str],
site_numbers: typing.List[int] = [],
turn_pin_groups_to_pins: bool = True,
):
if len(site_numbers) == 0:
site_numbers = list(tsm_context.site_numbers)
if turn_pin_groups_to_pins:
pins = list(tsm_context.get_pins_in_pin_groups(pins))
ssc: typing.List[SSCDigital] = []
(
pin_query_context,
sessions,
pin_set_strings,
) = tsm_context.pins_to_nidigital_sessions_for_ppmu(pins)
_, _, site_lists = tsm_context.pins_to_nidigital_sessions_for_pattern(pins)
for session, pin_set_string, site_list in zip(sessions, pin_set_strings, site_lists):
ssc.append(SSCDigital(session, pin_set_string, site_list))
tsm = TSMDigital(pin_query_context, ssc, site_numbers, pins)
return tsm
def tsm_ssc_filter_sites(tsm: TSMDigital, desired_sites: typing.List[int]):
ssc = _ssc_filter_sites(tsm.ssc, desired_sites)
tsm = TSMDigital(tsm.pin_query_context, ssc, tsm.site_numbers, tsm.pins)
return tsm
def tsm_ssc_initiate(tsm: TSMDigital):
_ssc_initiate(tsm.ssc)
return tsm
def tsm_ssc_publish(
tsm: TSMDigital,
data_to_publish: typing.List[typing.Any],
published_data_id: str = "",
):
if len(numpy.shape(data_to_publish)) == 1:
(
per_site_to_per_instrument_lut,
instrument_count,
max_sites_on_instrument,
) = _ssc_calculate_per_site_to_per_instrument_lut(tsm.ssc, tsm.site_numbers)
default = {bool: False, float: 0.0}[type(data_to_publish[0])]
initialized_array = [
[default for _ in range(max_sites_on_instrument)] for _ in range(instrument_count)
]
per_instrument_data = _apply_lut_per_site_to_per_instrument(
initialized_array, per_site_to_per_instrument_lut, data_to_publish
)
tsm.pin_query_context.publish(per_instrument_data, published_data_id)
elif len(numpy.shape(data_to_publish)) == 2:
(
per_site_per_pin_to_per_instrument_lut,
instrument_count,
max_sites_on_instrument,
) = _ssc_calculate_per_site_per_pin_to_per_instrument_lut(
tsm.ssc, tsm.site_numbers, tsm.pins
)
default = {bool: False, float: 0.0}[type(data_to_publish[0][0])]
initialized_array = [
[default for _ in range(max_sites_on_instrument)] for _ in range(instrument_count)
]
per_instrument_data = _apply_lut_per_site_per_pin_to_per_instrument(
initialized_array, per_site_per_pin_to_per_instrument_lut, data_to_publish
)
tsm.pin_query_context.publish(per_instrument_data, published_data_id)
else:
raise TypeError("Unexpected data_to_publish array dimension.")
# End of TSM #
| [
"os.mkdir",
"copy.deepcopy",
"re.split",
"os.path.basename",
"os.path.exists",
"re.match",
"numpy.shape",
"nidigital.Session",
"datetime.datetime.now",
"os.chdir",
"nidigital.history_ram_cycle_information.HistoryRAMCycleInformation"
] | [((7448, 7473), 'os.chdir', 'os.chdir', (['destination_dir'], {}), '(destination_dir)\n', (7456, 7473), False, 'import os\n'), ((24769, 24800), 'numpy.shape', 'numpy.shape', (['per_site_waveforms'], {}), '(per_site_waveforms)\n', (24780, 24800), False, 'import numpy\n'), ((54937, 54969), 'copy.deepcopy', 'copy.deepcopy', (['initialized_array'], {}), '(initialized_array)\n', (54950, 54969), False, 'import copy\n'), ((55434, 55466), 'copy.deepcopy', 'copy.deepcopy', (['initialized_array'], {}), '(initialized_array)\n', (55447, 55466), False, 'import copy\n'), ((55935, 55967), 'copy.deepcopy', 'copy.deepcopy', (['initialized_array'], {}), '(initialized_array)\n', (55948, 55967), False, 'import copy\n'), ((56408, 56440), 'copy.deepcopy', 'copy.deepcopy', (['initialized_array'], {}), '(initialized_array)\n', (56421, 56440), False, 'import copy\n'), ((57252, 57284), 're.split', 're.split', (['"""\\\\s*,\\\\s*"""', 'site_list'], {}), "('\\\\s*,\\\\s*', site_list)\n", (57260, 57284), False, 'import re\n'), ((57460, 57495), 're.split', 're.split', (['"""\\\\s*,\\\\s*"""', 'channel_list'], {}), "('\\\\s*,\\\\s*', channel_list)\n", (57468, 57495), False, 'import re\n'), ((7377, 7408), 'os.path.exists', 'os.path.exists', (['destination_dir'], {}), '(destination_dir)\n', (7391, 7408), False, 'import os\n'), ((7418, 7443), 'os.mkdir', 'os.mkdir', (['destination_dir'], {}), '(destination_dir)\n', (7426, 7443), False, 'import os\n'), ((48540, 48576), 'numpy.shape', 'numpy.shape', (['per_instrument_waveform'], {}), '(per_instrument_waveform)\n', (48551, 48576), False, 'import numpy\n'), ((62559, 62616), 'nidigital.Session', 'nidigital.Session', (['instrument_name'], {'options': 'option_string'}), '(instrument_name, options=option_string)\n', (62576, 62616), False, 'import nidigital\n'), ((10490, 10518), 'nidigital.history_ram_cycle_information.HistoryRAMCycleInformation', 'HistoryRAMCycleInformation', ([], {}), '()\n', (10516, 10518), False, 'from nidigital.history_ram_cycle_information import HistoryRAMCycleInformation\n'), ((57627, 57659), 're.split', 're.split', (['"""[/\\\\\\\\]"""', 'channels[i]'], {}), "('[/\\\\\\\\]', channels[i])\n", (57635, 57659), False, 'import re\n'), ((63111, 63155), 'os.path.basename', 'os.path.basename', (['capture_waveform_file_path'], {}), '(capture_waveform_file_path)\n', (63127, 63155), False, 'import os\n'), ((63438, 63481), 'os.path.basename', 'os.path.basename', (['source_waveform_file_path'], {}), '(source_waveform_file_path)\n', (63454, 63481), False, 'import os\n'), ((65139, 65167), 'numpy.shape', 'numpy.shape', (['data_to_publish'], {}), '(data_to_publish)\n', (65150, 65167), False, 'import numpy\n'), ((65835, 65863), 'numpy.shape', 'numpy.shape', (['data_to_publish'], {}), '(data_to_publish)\n', (65846, 65863), False, 'import numpy\n'), ((57308, 57336), 're.match', 're.match', (['"""site(\\\\d+)"""', 'site'], {}), "('site(\\\\d+)', site)\n", (57316, 57336), False, 'import re\n'), ((9758, 9772), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9770, 9772), False, 'from datetime import datetime\n'), ((57744, 57772), 're.match', 're.match', (['"""site(\\\\d+)"""', 'site'], {}), "('site(\\\\d+)', site)\n", (57752, 57772), False, 'import re\n')] |
from .params import default_params
from scipy.interpolate import interp1d
import numpy as np
from scipy.stats import binned_statistic as binnedstat
"""
Covariances
We implement the Gaussian covariance between bandpowers.
"""
def bin_annuli(ells,cls,bin_edges):
numer = binnedstat(ells,ells*cls,bins=bin_edges,statistic=np.nanmean)[0]
denom = binnedstat(ells,ells,bins=bin_edges,statistic=np.nanmean)[0]
return numer/denom
default_binning = bin_annuli
def shot_noise(ngal):
return 1./(ngal*1.18e7)
def lensing_shape_noise(ngal,shape_noise=0.3):
return (shape_noise**2.)/2./shot_noise(ngal)
def get_avail_cls(acls,x,y):
try: return acls[x+"_"+y]
except:
try:
return self.cls[y+"_"+x]
except:
return 0
class GaussianCov(object):
def __init__(self,bin_edges,binning_func=default_binning):
self.cls = {}
self.nls = {}
ellmin,ellmax = bin_edges[0],bin_edges[-1]
self.ells = np.arange(ellmin,ellmax+1,1)
self.bin_edges = bin_edges
self.dls = np.diff(self.bin_edges)
self.ls = (self.bin_edges[1:]+self.bin_edges[:-1])/2.
def add_cls(self,name1,name2,ells,cls,ellsn=None,ncls=None):
assert "_" not in name1
assert "_" not in name2
assert name2+"_"+name1 not in self.cls.keys()
self.cls[name1+"_"+name2] = bin_annuli(self.ells,interp1d(ells,cls)(self.ells),self.bin_edges)
if (ellsn is not None) and (ncls is not None):
self.nls[name1+"_"+name2] = bin_annuli(self.ells,interp1d(ellsn,ncls)(self.ells),self.bin_edges)
def get_scls(self,x,y):
return get_avail_cls(self.cls,x,y)
def get_ncls(self,x,y):
return get_avail_cls(self.nls,x,y)
def get_tcls(self,x,y):
return self.get_scls(x,y) + self.get_ncls(x,y)
def get_cov(self,x,y,w,z,fsky):
clsum = self.get_tcls(x,w)*self.get_tcls(y,z)+self.get_tcls(x,z)*self.get_tcls(y,w)
covs = clsum / (2*self.ls+1.)/self.dls/fsky
return covs
def KnoxCov(self,specTypeXY,specTypeWZ,ellBinEdges,fsky):
'''
returns cov(Cl_XY,Cl_WZ)
'''
def ClTot(spec,ell1,ell2):
return self._bin_cls(spec,ell1,ell2,noise=True)
X, Y = specTypeXY
W, Z = specTypeWZ
ellMids = (ellBinEdges[1:] + ellBinEdges[:-1]) / 2
ellWidths = np.diff(ellBinEdges)
covs = []
sigs1 = []
sigs2 = []
for ell_left,ell_right in zip(ellBinEdges[:-1],ellBinEdges[1:]):
ClSum = ClTot(X+W,ell_left,ell_right)*ClTot(Y+Z,ell_left,ell_right)+ClTot(X+Z,ell_left,ell_right)*ClTot(Y+W,ell_left,ell_right)
ellMid = (ell_right+ell_left)/2.
ellWidth = ell_right-ell_left
var = ClSum/(2.*ellMid+1.)/ellWidth/fsky
covs.append(var)
sigs1.append(self._bin_cls(specTypeXY,ell_left,ell_right,noise=False)**2.*np.nan_to_num(1./var))
sigs2.append(self._bin_cls(specTypeWZ,ell_left,ell_right,noise=False)**2.*np.nan_to_num(1./var)) | [
"numpy.nan_to_num",
"scipy.stats.binned_statistic",
"numpy.diff",
"numpy.arange",
"scipy.interpolate.interp1d"
] | [((2359, 2379), 'numpy.diff', 'np.diff', (['ellBinEdges'], {}), '(ellBinEdges)\n', (2366, 2379), True, 'import numpy as np\n'), ((275, 341), 'scipy.stats.binned_statistic', 'binnedstat', (['ells', '(ells * cls)'], {'bins': 'bin_edges', 'statistic': 'np.nanmean'}), '(ells, ells * cls, bins=bin_edges, statistic=np.nanmean)\n', (285, 341), True, 'from scipy.stats import binned_statistic as binnedstat\n'), ((352, 412), 'scipy.stats.binned_statistic', 'binnedstat', (['ells', 'ells'], {'bins': 'bin_edges', 'statistic': 'np.nanmean'}), '(ells, ells, bins=bin_edges, statistic=np.nanmean)\n', (362, 412), True, 'from scipy.stats import binned_statistic as binnedstat\n'), ((984, 1016), 'numpy.arange', 'np.arange', (['ellmin', '(ellmax + 1)', '(1)'], {}), '(ellmin, ellmax + 1, 1)\n', (993, 1016), True, 'import numpy as np\n'), ((1067, 1090), 'numpy.diff', 'np.diff', (['self.bin_edges'], {}), '(self.bin_edges)\n', (1074, 1090), True, 'import numpy as np\n'), ((1394, 1413), 'scipy.interpolate.interp1d', 'interp1d', (['ells', 'cls'], {}), '(ells, cls)\n', (1402, 1413), False, 'from scipy.interpolate import interp1d\n'), ((2866, 2890), 'numpy.nan_to_num', 'np.nan_to_num', (['(1.0 / var)'], {}), '(1.0 / var)\n', (2879, 2890), True, 'import numpy as np\n'), ((2971, 2995), 'numpy.nan_to_num', 'np.nan_to_num', (['(1.0 / var)'], {}), '(1.0 / var)\n', (2984, 2995), True, 'import numpy as np\n'), ((1556, 1577), 'scipy.interpolate.interp1d', 'interp1d', (['ellsn', 'ncls'], {}), '(ellsn, ncls)\n', (1564, 1577), False, 'from scipy.interpolate import interp1d\n')] |
import os
import h5py
import numpy as np
from keras.callbacks import TensorBoard, TerminateOnNaN
from random import choices
from conf import conf
import tqdm
SIZE = conf['SIZE']
BATCH_SIZE = conf['TRAIN_BATCH_SIZE']
EPOCHS_PER_SAVE = conf['EPOCHS_PER_SAVE']
NUM_WORKERS = conf['NUM_WORKERS']
VALIDATION_SPLIT = conf['VALIDATION_SPLIT']
MOVE_INDEX = conf['MOVE_INDEX']
GAME_FILE = conf['GAME_FILE']
def load_moves(directory):
weights= []
indices = []
with open(os.path.join(directory, conf['MOVE_INDEX']), 'r') as f:
for line in f:
game_n, move_n, variation = line.strip().split(',')
weights.append(float(variation))
indices.append((int(game_n), int(move_n)))
return indices, weights
def train(model, game_model_name, epochs=None):
if epochs is None:
epochs = EPOCHS_PER_SAVE
name = model.name
base_name, index = name.split('_')
new_name = "_".join([base_name, str(int(index) + 1)]) + ".h5"
tf_callback = TensorBoard(log_dir=os.path.join(conf['LOG_DIR'], new_name),
histogram_freq=conf['HISTOGRAM_FREQ'], batch_size=BATCH_SIZE, write_graph=False, write_grads=False)
nan_callback = TerminateOnNaN()
directory = os.path.join("games", game_model_name)
indices, weights = load_moves(directory)
for epoch in tqdm.tqdm(range(epochs), desc="Epochs"):
for worker in tqdm.tqdm(range(NUM_WORKERS), desc="Worker_batch"):
chosen = choices(indices, weights, k = BATCH_SIZE)
X = np.zeros((BATCH_SIZE, SIZE, SIZE, 17))
policy_y = np.zeros((BATCH_SIZE, SIZE*SIZE + 1))
value_y = np.zeros((BATCH_SIZE, 1))
for j, (game_n, move) in enumerate(chosen):
filename = os.path.join(directory, GAME_FILE % game_n)
with h5py.File(filename, 'r') as f:
board = f['move_%s/board' % move][:]
policy = f['move_%s/policy_target' % move][:]
value_target = f['move_%s/value_target' % move][()]
X[j] = board
policy_y[j] = policy
value_y[j] = value_target
fake_epoch = epoch * NUM_WORKERS + worker # For tensorboard
model.fit(X, [policy_y, value_y],
initial_epoch=fake_epoch,
epochs=fake_epoch + 1,
validation_split=VALIDATION_SPLIT, # Needed for TensorBoard histograms and gradi
callbacks=[tf_callback, nan_callback],
verbose=0,
)
model.name = new_name.split('.')[0]
model.save(os.path.join(conf['MODEL_DIR'], new_name))
| [
"h5py.File",
"keras.callbacks.TerminateOnNaN",
"random.choices",
"numpy.zeros",
"os.path.join"
] | [((1188, 1204), 'keras.callbacks.TerminateOnNaN', 'TerminateOnNaN', ([], {}), '()\n', (1202, 1204), False, 'from keras.callbacks import TensorBoard, TerminateOnNaN\n'), ((1222, 1260), 'os.path.join', 'os.path.join', (['"""games"""', 'game_model_name'], {}), "('games', game_model_name)\n", (1234, 1260), False, 'import os\n'), ((2611, 2652), 'os.path.join', 'os.path.join', (["conf['MODEL_DIR']", 'new_name'], {}), "(conf['MODEL_DIR'], new_name)\n", (2623, 2652), False, 'import os\n'), ((475, 518), 'os.path.join', 'os.path.join', (['directory', "conf['MOVE_INDEX']"], {}), "(directory, conf['MOVE_INDEX'])\n", (487, 518), False, 'import os\n'), ((1016, 1055), 'os.path.join', 'os.path.join', (["conf['LOG_DIR']", 'new_name'], {}), "(conf['LOG_DIR'], new_name)\n", (1028, 1055), False, 'import os\n'), ((1460, 1499), 'random.choices', 'choices', (['indices', 'weights'], {'k': 'BATCH_SIZE'}), '(indices, weights, k=BATCH_SIZE)\n', (1467, 1499), False, 'from random import choices\n'), ((1519, 1557), 'numpy.zeros', 'np.zeros', (['(BATCH_SIZE, SIZE, SIZE, 17)'], {}), '((BATCH_SIZE, SIZE, SIZE, 17))\n', (1527, 1557), True, 'import numpy as np\n'), ((1581, 1620), 'numpy.zeros', 'np.zeros', (['(BATCH_SIZE, SIZE * SIZE + 1)'], {}), '((BATCH_SIZE, SIZE * SIZE + 1))\n', (1589, 1620), True, 'import numpy as np\n'), ((1641, 1666), 'numpy.zeros', 'np.zeros', (['(BATCH_SIZE, 1)'], {}), '((BATCH_SIZE, 1))\n', (1649, 1666), True, 'import numpy as np\n'), ((1750, 1793), 'os.path.join', 'os.path.join', (['directory', '(GAME_FILE % game_n)'], {}), '(directory, GAME_FILE % game_n)\n', (1762, 1793), False, 'import os\n'), ((1815, 1839), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (1824, 1839), False, 'import h5py\n')] |
import sys
graph_folder="./"
if sys.version_info.major < 3 or sys.version_info.minor < 4:
print("Please using python3.4 or greater!")
exit(1)
if len(sys.argv) > 1:
graph_folder = sys.argv[1]
import pyrealsense2 as rs
import numpy as np
import cv2
from mvnc import mvncapi as mvnc
from os import system
import io, time
from os.path import isfile, join
import re
LABELS = ('background',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 2)
devices = mvnc.enumerate_devices()
if len(devices) == 0:
print("No devices found")
quit()
print(len(devices))
devHandle = []
graphHandle = []
with open(join(graph_folder, "graph"), mode="rb") as f:
graph_buffer = f.read()
graph = mvnc.Graph('MobileNet-SSD')
for devnum in range(len(devices)):
devHandle.append(mvnc.Device(devices[devnum]))
devHandle[devnum].open()
graphHandle.append(graph.allocate_with_fifos(devHandle[devnum], graph_buffer))
print("\nLoaded Graphs!!!")
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
pipeline.start(config)
try:
#freq = cv2.getTickFrequency()
while True:
t1 = time.perf_counter()
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame or not color_frame:
continue
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
#dnn
im = cv2.resize(color_image, (300, 300))
im = im - 127.5
im = im * 0.007843
#graphHandle[0][0]=input_fifo, graphHandle[0][1]=output_fifo
graph.queue_inference_with_fifo_elem(graphHandle[0][0], graphHandle[0][1], im.astype(np.float32), color_image)
out, input_image = graphHandle[0][1].read_elem()
# Show images
height = color_image.shape[0]
width = color_image.shape[1]
num_valid_boxes = int(out[0])
if num_valid_boxes > 0:
for box_index in range(num_valid_boxes):
base_index = 7+ box_index * 7
if (not np.isfinite(out[base_index]) or
not np.isfinite(out[base_index + 1]) or
not np.isfinite(out[base_index + 2]) or
not np.isfinite(out[base_index + 3]) or
not np.isfinite(out[base_index + 4]) or
not np.isfinite(out[base_index + 5]) or
not np.isfinite(out[base_index + 6])):
continue
x1 = max(0, int(out[base_index + 3] * height))
y1 = max(0, int(out[base_index + 4] * width))
x2 = min(height, int(out[base_index + 5] * height))
y2 = min(width, int(out[base_index + 6] * width))
object_info_overlay = out[base_index:base_index + 7]
min_score_percent = 60
source_image_width = width
source_image_height = height
base_index = 0
class_id = object_info_overlay[base_index + 1]
percentage = int(object_info_overlay[base_index + 2] * 100)
if (percentage <= min_score_percent):
continue
box_left = int(object_info_overlay[base_index + 3] * source_image_width)
box_top = int(object_info_overlay[base_index + 4] * source_image_height)
box_right = int(object_info_overlay[base_index + 5] * source_image_width)
box_bottom = int(object_info_overlay[base_index + 6] * source_image_height)
meters = depth_frame.as_depth_frame().get_distance(box_left+int((box_right-box_left)/2), box_top+int((box_bottom-box_top)/2))
label_text = LABELS[int(class_id)] + " (" + str(percentage) + "%)"+ " {:.2f}".format(meters) + " meters away"
box_color = (255, 128, 0)
box_thickness = 1
cv2.rectangle(color_image, (box_left, box_top), (box_right, box_bottom), box_color, box_thickness)
label_background_color = (125, 175, 75)
label_text_color = (255, 255, 255)
label_size = cv2.getTextSize(label_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[0]
label_left = box_left
label_top = box_top - label_size[1]
if (label_top < 1):
label_top = 1
label_right = label_left + label_size[0]
label_bottom = label_top + label_size[1]
cv2.rectangle(color_image, (label_left - 1, label_top - 1), (label_right + 1, label_bottom + 1), label_background_color, -1)
cv2.putText(color_image, label_text, (label_left, label_bottom), cv2.FONT_HERSHEY_SIMPLEX, 0.5, label_text_color, 1)
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', cv2.resize(color_image,(width, height)))
## Print FPS
t2 = time.perf_counter()
time1 = (t2-t1)#/freq
print(" {:.2f} FPS".format(1/time1))
if cv2.waitKey(1)&0xFF == ord('q'):
break
except:
import traceback
traceback.print_exc()
finally:
# Stop streaming
pipeline.stop()
for devnum in range(len(devices)):
graphHandle[devnum][0].destroy()
graphHandle[devnum][1].destroy()
graph.destroy()
devHandle[devnum].close()
devHandle[devnum].destroy()
print("\n\nFinished\n\n")
sys.exit()
| [
"cv2.resize",
"traceback.print_exc",
"cv2.putText",
"mvnc.mvncapi.Device",
"pyrealsense2.pipeline",
"cv2.waitKey",
"cv2.getTextSize",
"sys.exit",
"time.perf_counter",
"pyrealsense2.config",
"numpy.isfinite",
"cv2.namedWindow",
"cv2.rectangle",
"mvnc.mvncapi.global_set_option",
"os.path.j... | [((646, 703), 'mvnc.mvncapi.global_set_option', 'mvnc.global_set_option', (['mvnc.GlobalOption.RW_LOG_LEVEL', '(2)'], {}), '(mvnc.GlobalOption.RW_LOG_LEVEL, 2)\n', (668, 703), True, 'from mvnc import mvncapi as mvnc\n'), ((714, 738), 'mvnc.mvncapi.enumerate_devices', 'mvnc.enumerate_devices', ([], {}), '()\n', (736, 738), True, 'from mvnc import mvncapi as mvnc\n'), ((950, 977), 'mvnc.mvncapi.Graph', 'mvnc.Graph', (['"""MobileNet-SSD"""'], {}), "('MobileNet-SSD')\n", (960, 977), True, 'from mvnc import mvncapi as mvnc\n'), ((1255, 1268), 'pyrealsense2.pipeline', 'rs.pipeline', ([], {}), '()\n', (1266, 1268), True, 'import pyrealsense2 as rs\n'), ((1278, 1289), 'pyrealsense2.config', 'rs.config', ([], {}), '()\n', (1287, 1289), True, 'import pyrealsense2 as rs\n'), ((6029, 6039), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6037, 6039), False, 'import sys\n'), ((868, 895), 'os.path.join', 'join', (['graph_folder', '"""graph"""'], {}), "(graph_folder, 'graph')\n", (872, 895), False, 'from os.path import isfile, join\n'), ((1035, 1063), 'mvnc.mvncapi.Device', 'mvnc.Device', (['devices[devnum]'], {}), '(devices[devnum])\n', (1046, 1063), True, 'from mvnc import mvncapi as mvnc\n'), ((1538, 1557), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1555, 1557), False, 'import io, time\n'), ((2016, 2051), 'cv2.resize', 'cv2.resize', (['color_image', '(300, 300)'], {}), '(color_image, (300, 300))\n', (2026, 2051), False, 'import cv2\n'), ((5355, 5404), 'cv2.namedWindow', 'cv2.namedWindow', (['"""RealSense"""', 'cv2.WINDOW_AUTOSIZE'], {}), "('RealSense', cv2.WINDOW_AUTOSIZE)\n", (5370, 5404), False, 'import cv2\n'), ((5513, 5532), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5530, 5532), False, 'import io, time\n'), ((5705, 5726), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (5724, 5726), False, 'import traceback\n'), ((5437, 5477), 'cv2.resize', 'cv2.resize', (['color_image', '(width, height)'], {}), '(color_image, (width, height))\n', (5447, 5477), False, 'import cv2\n'), ((4496, 4598), 'cv2.rectangle', 'cv2.rectangle', (['color_image', '(box_left, box_top)', '(box_right, box_bottom)', 'box_color', 'box_thickness'], {}), '(color_image, (box_left, box_top), (box_right, box_bottom),\n box_color, box_thickness)\n', (4509, 4598), False, 'import cv2\n'), ((5088, 5217), 'cv2.rectangle', 'cv2.rectangle', (['color_image', '(label_left - 1, label_top - 1)', '(label_right + 1, label_bottom + 1)', 'label_background_color', '(-1)'], {}), '(color_image, (label_left - 1, label_top - 1), (label_right + \n 1, label_bottom + 1), label_background_color, -1)\n', (5101, 5217), False, 'import cv2\n'), ((5229, 5350), 'cv2.putText', 'cv2.putText', (['color_image', 'label_text', '(label_left, label_bottom)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', 'label_text_color', '(1)'], {}), '(color_image, label_text, (label_left, label_bottom), cv2.\n FONT_HERSHEY_SIMPLEX, 0.5, label_text_color, 1)\n', (5240, 5350), False, 'import cv2\n'), ((5620, 5634), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5631, 5634), False, 'import cv2\n'), ((4733, 4794), 'cv2.getTextSize', 'cv2.getTextSize', (['label_text', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(1)'], {}), '(label_text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)\n', (4748, 4794), False, 'import cv2\n'), ((2642, 2670), 'numpy.isfinite', 'np.isfinite', (['out[base_index]'], {}), '(out[base_index])\n', (2653, 2670), True, 'import numpy as np\n'), ((2698, 2730), 'numpy.isfinite', 'np.isfinite', (['out[base_index + 1]'], {}), '(out[base_index + 1])\n', (2709, 2730), True, 'import numpy as np\n'), ((2758, 2790), 'numpy.isfinite', 'np.isfinite', (['out[base_index + 2]'], {}), '(out[base_index + 2])\n', (2769, 2790), True, 'import numpy as np\n'), ((2818, 2850), 'numpy.isfinite', 'np.isfinite', (['out[base_index + 3]'], {}), '(out[base_index + 3])\n', (2829, 2850), True, 'import numpy as np\n'), ((2878, 2910), 'numpy.isfinite', 'np.isfinite', (['out[base_index + 4]'], {}), '(out[base_index + 4])\n', (2889, 2910), True, 'import numpy as np\n'), ((2938, 2970), 'numpy.isfinite', 'np.isfinite', (['out[base_index + 5]'], {}), '(out[base_index + 5])\n', (2949, 2970), True, 'import numpy as np\n'), ((2998, 3030), 'numpy.isfinite', 'np.isfinite', (['out[base_index + 6]'], {}), '(out[base_index + 6])\n', (3009, 3030), True, 'import numpy as np\n')] |
if __name__ == "__main__":
import numpy as np
# generate the boundary
f = lambda x: (5 * x + 1)
bd_x = np.linspace(-1.0, 1, 200)
bd_y = f(bd_x)
# generate the training data
input_size = 400* 1024
x = np.random.uniform(-1, 1, input_size)
y = f(x) + 2 * np.random.randn(len(x))
# convert training data to 2d space
label = np.asarray([5 * a + 1 > b for (a, b) in zip(x, y)]).astype(np.int32)
data = np.array([[a, b] for (a, b) in zip(x, y)], dtype=np.float32)
print('data shape: {}'.format(data.shape))
print('label shape: {}'.format(label.shape))
print('x shape: {}'.format(x.shape))
print('y shape: {}'.format(y.shape))
| [
"numpy.random.uniform",
"numpy.linspace"
] | [((119, 144), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(1)', '(200)'], {}), '(-1.0, 1, 200)\n', (130, 144), True, 'import numpy as np\n'), ((232, 268), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'input_size'], {}), '(-1, 1, input_size)\n', (249, 268), True, 'import numpy as np\n')] |
"""Classes and methods for Multi-output Gaussian process"""
import tqdm
import numpy as np
import pandas as pd
import torch
import gpytorch
from gpytorch.models import ApproximateGP
from gpytorch.variational import CholeskyVariationalDistribution, LMCVariationalStrategy, VariationalStrategy
from gpytorch.distributions import MultivariateNormal
import botorch
from botorch.optim import optimize_acqf
from botorch.utils.transforms import unnormalize, normalize
from botorch.models.utils import add_output_dim
from botorch.sampling.samplers import SobolQMCNormalSampler
from botorch.posteriors.gpytorch import GPyTorchPosterior
from elfi.methods.posteriors import BolfiPosterior
import scipy.stats as ss
class MOGPProblem(torch.nn.Module):
"""Interface between elfi.DynamicProcess and BoTorch
Attributes
----------
process : elfi.DynamicProcess
num_task : int
ref_point : torch.Tensor or arraylike
reference point to compute Pareto solutions, should be slightly worse than the worse feasible solution
bounds : torch.Tensor
bounds for each input dimension of the process, shape 2 x param_dim
target_name : str
target node name of process
input_dim : int
dimension of input
last_step : int
keeps track of the last step number
tasks : ndarray
current step numbers
"""
def __init__(self, process, num_tasks, ref_point=None, bounds=None):
"""Constructor
Parameters
----------
process : elfi.DynamicProcess
num_tasks : int
ref_point : torch.Tensor or arraylike, optional
reference point, take bounds from process by default
"""
super(MOGPProblem, self).__init__()
if ref_point:
assert num_tasks == len(ref_point), 'ref_point length must be equal to num_tasks'
self.ref_point = ref_point or -5 * torch.ones(num_tasks)
if bounds:
assert torch.is_tensor(bounds), 'bounds must be torch.Tensor'
assert bounds.size == torch.Size([2, process.param_dim]), 'bounds must have size 2 x process.param_dim (' + str(process.param_dim) + ')'
self.bounds = bounds
else:
self.bounds = torch.tensor(process.bounds).T
self.y_bounds = None
self.process = process
self.target_name = self.process.target_name
self.input_dim = self.process.param_dim
self.num_tasks = num_tasks
self.last_step = 0
for i in range(self.num_tasks-1):
self.process.step()
self.last_step = self.last_step + 1
self.tasks = np.arange(0, num_tasks)
def _forward(self, x):
"""Return discrepancies given inputs.
"""
if torch.is_tensor(x):
x = x.cpu().numpy()
t = {}
num_evidence = x.shape[0]
for i in range(len(self.process.param_names)):
t[self.process.param_names[i]] = x[:,i]
for i in range(self.num_tasks):
observed = self.process.get_observed()[self.tasks[i]]
model = self.process.create_model(observed=observed)
net = model.generate(batch_size=num_evidence, with_values=t)
new_y = net[self.target_name]
new_sim = torch.tensor(net['Sim'])
if i == 0:
y = torch.tensor(new_y).view(-1,1)
else:
y = torch.cat((y, torch.tensor(new_y).view(-1,1)), dim=-1)
return y, new_sim
def forward(self, x):
y, _ = self._forward(x)
return
def step(self):
"""Advance 1 step.
"""
self.process.step()
self.last_step = self.last_step + 1
self.tasks = self.tasks + 1
def update_ref(self, train_y):
""" Update reference point when new data is available.
"""
train_y_min = torch.tensor([torch.min(train_y[:,i], dim = 0)[0] for i in range(train_y.size(-1))])
self.ref_point = train_y_min - 0.1 * torch.abs(train_y_min)
def get_evidence(self, num_evidence=None, training_points=None, predictions=None):
"""Generate num_evidence evident points from prior.
"""
train_t = {}
if training_points:
task_shift = self.num_tasks - 1
new_tasks = self.tasks[task_shift:] if len(self.tasks) > 1 else self.tasks
train_x = training_points[0]
train_y = training_points[1]
train_sim = training_points[2]
num_evidence = train_x.shape[0]
for i in range(self.process.param_dim):
param = self.process.param_names[i]
train_t[param] = train_x[:,i].detach().numpy()
train_t['Sim'] = train_sim.detach().numpy()
else:
new_tasks = self.tasks
for i in new_tasks:
observed = self.process.get_observed()[i]
model = self.process.create_model(observed=observed)
if i == 0 or (i == new_tasks[0] and len(new_tasks) != 1):
for param in self.process.param_names:
train_t[param] = model[param].generate(batch_size=num_evidence)
train_x = torch.stack([torch.tensor(train_t[param]) for param in self.process.param_names], dim=-1)
net = model.generate(batch_size=num_evidence, with_values=train_t)
train_y = torch.tensor(net[self.target_name]).view(-1,1)
train_t['Sim'] = net['Sim']
train_sim = torch.tensor(net['Sim'])
else:
net = model.generate(batch_size=num_evidence, with_values=train_t)
new_y = torch.tensor(net[self.target_name]).view(-1,1)
if train_y.shape[1] > 0:
train_y = torch.cat((train_y, new_y), dim=-1)
else:
train_y = new_y
if predictions is not None:
predicted_t = {}
num_evidence = predictions.shape[0]
for i in range(self.process.param_dim):
param = self.process.param_names[i]
predicted_t[param] = predictions[:,i].detach().numpy()
predicted_sim = model.generate(batch_size=num_evidence, with_values=predicted_t)['Sim']
predicted_t['Sim'] = predicted_sim
for i in self.tasks:
observed = self.process.get_observed()[i]
model = self.process.create_model(observed=observed)
if i == self.tasks[0]:
predicted_y = torch.tensor(model.generate(batch_size=num_evidence, with_values=predicted_t)[self.target_name]).view(-1,1)
else:
new_predicted_y = torch.tensor(model.generate(batch_size=num_evidence, with_values=predicted_t)[self.target_name]).view(-1,1)
predicted_y = torch.cat((predicted_y, new_predicted_y), dim=-1)
train_x = torch.cat((train_x, predictions), dim=0)
train_y = torch.cat((train_y, predicted_y), dim=0)
train_sim = torch.cat((train_sim, torch.tensor(predicted_sim)), dim=0)
min_y = torch.zeros(train_y.shape[1])
max_y, _ = torch.max(train_y, -2)
self.y_bounds = torch.vstack((min_y, max_y))
return train_x, train_y, train_sim
class LMC(ApproximateGP):
"""Class for LMC multi-output GP
Attributes
----------
num_latents : int
number of latent GPs
num_tasks : int
number of tasks or time steps
_num_outputs : int
number of tasks, for posterior function
_input_batch_shape : torch.Size
required for posterior function
learn_inducing_locations : bool
set to False to keep inducing location constant
variational_strategy : gpytorch.variational.VariationalStrategy
variational strategy for LMC
mean_module : gpytorch.means.Mean
mean module for LMC
covar_module : gpytorch.kernels.Kernel
kernel module for LMC
"""
def __init__(self, inducing_points, num_latents, num_tasks, bounds, y_bounds=None, learn_inducing_locations=True):
"""Contructor
Parameters
----------
inducing_points : torch.Tensor, required
tensor of inducing points with shape num_latent x num_inducing_points x input_dim
num_latents : int, required
number of latent GPs
num_tasks : int, required
number of tasks or time steps
bounds : Tensor, required
bounds for each input dimension, used for unnormalizing
learn_inducing_locations : bool
set to False to keep inducing location constant
"""
# We have to mark the CholeskyVariationalDistribution as batch so that we learn a variational distribution for each task
self.num_latents = num_latents
self.num_tasks = num_tasks
self.bounds = bounds
self.y_bounds = y_bounds
self._num_outputs = self.num_tasks
self._input_batch_shape = torch.Size([])
self.learn_inducing_locations = learn_inducing_locations
variational_distribution = CholeskyVariationalDistribution(inducing_points.size(-2), batch_shape=torch.Size([num_latents]))
# We have to wrap the VariationalStrategy in a MultitaskVariationalStrategy
# so that the output will be a MultitaskMultivariateNormal rather than a batch output
variational_strategy = LMCVariationalStrategy(VariationalStrategy(self, inducing_points, variational_distribution, learn_inducing_locations=learn_inducing_locations),
num_tasks=num_tasks, num_latents=num_latents, latent_dim=-1)
ApproximateGP.__init__(self, variational_strategy)
self.input_dim = inducing_points.size(-1)
# The mean and covariance modules should be marked as batch so we learn a different set of hyperparameters
self.mean_module = gpytorch.means.LinearMean(self.input_dim, batch_shape=torch.Size([num_latents]))
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel(ard_num_dims = self.input_dim, batch_shape=torch.Size([num_latents])),
batch_shape=torch.Size([num_latents]))
rank = num_tasks if num_tasks > 1 else 0
self.likelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood(num_tasks=self.num_tasks, rank=rank)
def forward(self, x):
# The forward function should be written as if we were dealing with each output dimension in batch
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return MultivariateNormal(mean_x, covar_x)
def predict(self, x):
x = torch.from_numpy(x).float()
x = normalize(x, bounds=self.bounds).float()
y = self(x)
predictions = self.likelihood(y)
mean_y = unnormalize(predictions.mean, bounds=self.y_bounds).float()
var_y = unnormalize(predictions.variance, bounds=self.y_bounds).float()
return mean_y.detach().numpy(), var_y.detach().numpy()
def posterior(self, X, output_indices=None, **kwargs):
"""Return MultitaskMultivariateNormal posterior for acquisition process.
"""
self.eval() # make sure model is in eval mode
with botorch.models.utils.gpt_posterior_settings():
# insert a dimension for the output dimension
if self.num_tasks >= 1:
X, output_dim_idx = add_output_dim(
X=X, original_batch_shape=torch.Size([])
)
#mvn = self.variational_strategy(X, prior=True)
mvn = self(X.float())
posterior = GPyTorchPosterior(mvn=mvn)
return posterior
def linear_grad_only(self):
"""Disable gradient for all parameters except for LMC linear coefficients.
"""
for name, param in self.named_parameters():
if name != 'variational_strategy.lmc_coefficients':
param.requires_grad = False
def full_grad(self):
"""Enable gradient for all parameters.
"""
for name, param in self.named_parameters():
if name == 'variational_strategy.base_variational_strategy.inducing_points':
if self.learn_inducing_locations:
param.requires_grad = True
else:
param.requires_grad = False
else:
param.requires_grad = True
def modify_parameters(self, parameter_dict={}):
"""Replace parameter tensors with those specified in the dictionary.
"""
state_dict = self.state_dict()
for name, tensor in parameter_dict.items():
state_dict[name] = tensor
self.load_state_dict(state_dict)
def reset_parameters(self):
"""Reset parameters to default state.
"""
state_dict = self.state_dict()
for name, tensor in state_dict.items():
if name == 'variational_strategy.lmc_coefficients':
state_dict[name] = torch.randn(state_dict[name].size())
elif name == 'variational_strategy.base_variational_strategy.inducing_points':
state_dict[name] = torch.rand(state_dict[name].size())
elif name == 'variational_strategy.base_variational_strategy._variational_distribution.chol_variational_covar':
state_dict[name] = torch.stack([torch.eye(state_dict[name].size(-1)) for _ in range(state_dict[name].size(0))])
else:
state_dict[name] = torch.zeros(state_dict[name].size())
self.load_state_dict(state_dict)
def non_likelihood_parameters(self):
params = []
for name, param in self.named_parameters():
if 'likelihood' not in name:
params.append(param)
return params
def train_lmc(self, train_x, train_y, num_epochs_max=100, training_batch_size=None, verbose=False, **tkwargs):
"""Main function for training LMC
The function uses BoTorch ExpMAStoppingCriterion to stop optimizing if convergence is reached, other wise trains for num_epochs_max epochs.
Parameters
----------
lmc : LMC
train_x : torch.Tensor
training input, shape batch_size x input_dim
train_y : torch.Tensor
training objectove, shape batch_size x num_tasks
num_epochs_max: int, optional
maximum number of training epochs
training_batch_size : int, optional
verbose : bool, optional
tkwargs : dict
torch keyword arguments
"""
self.train()
self.likelihood.train()
if not torch.is_tensor(train_x):
train_x = torch.tensor(train_x, **tkwargs)
if not torch.is_tensor(train_y):
train_y = torch.tensor(train_y, **tkwargs)
min_y = torch.zeros(train_y.shape[1])
max_y, max_ind = torch.max(train_y, -2)
self.y_bounds = torch.vstack((min_y, max_y))
train_x = normalize(train_x, bounds=self.bounds).float()
train_y = normalize(train_y, bounds=self.y_bounds).float()
training_batch_size = training_batch_size or train_x.size(0) # int(np.max([1, train_x.size(0) / 10.]))
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(train_x, train_y), shuffle=True, batch_size=training_batch_size)
optimizer = torch.optim.Adam([{'params': filter(lambda p: p.requires_grad, self.parameters())}], lr=0.01)
mll = gpytorch.mlls.VariationalELBO(self.likelihood, self, num_data=train_y.size(0))
stopping_criterion = botorch.optim.stopping.ExpMAStoppingCriterion(rel_tol=1e-6)
if verbose:
epochs_iter = tqdm.notebook.tqdm(range(num_epochs_max), desc="Epoch", leave=False)
else:
epochs_iter = range(num_epochs_max)
for _ in epochs_iter:
# Within each iteration, we will go over each minibatch of data
minibatch_iter = train_loader
loss_trajectory = torch.Tensor([])
loss_sum = 0
for x_batch, y_batch in minibatch_iter:
x_batch, y_batch = x_batch.to(**tkwargs), y_batch.to(**tkwargs)
optimizer.zero_grad()
output = self(x_batch.float(), prior=False)
loss = -mll(output, y_batch)
loss.backward()
optimizer.step()
loss_sum = loss_sum + loss.item()
if loss_trajectory.size(0) == 0:
loss_trajectory = torch.Tensor([loss])
else:
loss_trajectory = torch.cat((loss_trajectory, torch.Tensor([loss])))
self.eval()
self.likelihood.eval()
# print('Loss (lmc): ', loss_sum)
# raise ValueError
class LMCPosterior(BolfiPosterior):
def __init__(self, model, task, **kwargs):
super(LMCPosterior, self).__init__(model, **kwargs)
self.task = task
def _unnormalized_loglikelihood(self, x):
x = np.asanyarray(x)
ndim = x.ndim
x = x.reshape((-1, self.dim))
mean, var = self.model.predict(x)
# print(mean)
if type(self.threshold) is np.ndarray:
thr = self.threshold
else:
thr = self.threshold.detach().numpy()
results = list()
for i in range(np.size(mean, 1)):
logpdf = ss.norm.logcdf(thr[:, i], mean[:, i], np.sqrt(var[:, i])).squeeze()
results.append(logpdf)
return results
def sample_lmc_posterior(self, cols, N=10000):
"""Importance sampling for posterior
"""
theta = self.prior.rvs(size=N)
if theta.ndim == 1:
theta = theta.reshape(theta.shape[0], 1)
predicted_values = self._unnormalized_likelihood(theta)
weights = predicted_values + np.random.normal(loc=1e-6, scale=1e-9,size=predicted_values.shape)
n_weights = weights[self.task] / np.sum(weights[self.task])
# importance weighted resampling
resample_index = np.random.choice(N, size=N, replace=True, p=n_weights)
theta_resampled = theta[resample_index,:]
theta_df = pd.DataFrame.from_records(theta_resampled, columns=cols)
return theta_df
def optimize_qehvi_and_get_observation(lmc, problem, train_obj, mc_sample_size=128, batch_size=1, **tkwargs):
"""Optimizes the qEHVI acquisition function, and returns a new candidate and observation.
"""
from botorch.utils.multi_objective.box_decomposition import NondominatedPartitioning
from botorch.acquisition.multi_objective.monte_carlo import qExpectedHypervolumeImprovement
q = batch_size
# partition non-dominated space into disjoint rectangles
standard_bounds = torch.ones(2, problem.input_dim).to(**tkwargs)
standard_bounds[0] = 0
sampler = SobolQMCNormalSampler(num_samples=mc_sample_size).to(**tkwargs)
partitioning = NondominatedPartitioning(num_outcomes=problem.num_tasks, Y=train_obj).to(**tkwargs)
from botorch.utils.transforms import (concatenate_pending_points, t_batch_mode_transform)
from torch import Tensor
class NegativeqEHVI(qExpectedHypervolumeImprovement):
def __init__(self, model, ref_point, partitioning,
sampler, objective=None, constraints=None, X_pending=None, eta=1e-3):
super().__init__(model, ref_point, partitioning,
sampler, objective, constraints, X_pending, eta)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
return -self._compute_qehvi(samples=samples)
acq_func = NegativeqEHVI(
model=lmc,
ref_point=problem.ref_point.tolist(), # use known reference point
partitioning=partitioning,
sampler=sampler,
).to(**tkwargs)
# optimize
candidates, _ = optimize_acqf(
acq_function=acq_func,
bounds=standard_bounds,
q=q,
num_restarts=20,
raw_samples=1024, # used for intialization heuristic
options={"batch_limit": 10, "maxiter": 200, "nonnegative": True},
sequential=True,
)
# observe new values
new_x = unnormalize(candidates.detach(), bounds=problem.bounds)
return new_x
| [
"gpytorch.variational.VariationalStrategy",
"botorch.utils.transforms.t_batch_mode_transform",
"numpy.sum",
"botorch.posteriors.gpytorch.GPyTorchPosterior",
"torch.cat",
"torch.vstack",
"numpy.arange",
"botorch.optim.stopping.ExpMAStoppingCriterion",
"torch.utils.data.TensorDataset",
"numpy.random... | [((20217, 20408), 'botorch.optim.optimize_acqf', 'optimize_acqf', ([], {'acq_function': 'acq_func', 'bounds': 'standard_bounds', 'q': 'q', 'num_restarts': '(20)', 'raw_samples': '(1024)', 'options': "{'batch_limit': 10, 'maxiter': 200, 'nonnegative': True}", 'sequential': '(True)'}), "(acq_function=acq_func, bounds=standard_bounds, q=q,\n num_restarts=20, raw_samples=1024, options={'batch_limit': 10,\n 'maxiter': 200, 'nonnegative': True}, sequential=True)\n", (20230, 20408), False, 'from botorch.optim import optimize_acqf\n'), ((2653, 2676), 'numpy.arange', 'np.arange', (['(0)', 'num_tasks'], {}), '(0, num_tasks)\n', (2662, 2676), True, 'import numpy as np\n'), ((2776, 2794), 'torch.is_tensor', 'torch.is_tensor', (['x'], {}), '(x)\n', (2791, 2794), False, 'import torch\n'), ((7162, 7191), 'torch.zeros', 'torch.zeros', (['train_y.shape[1]'], {}), '(train_y.shape[1])\n', (7173, 7191), False, 'import torch\n'), ((7211, 7233), 'torch.max', 'torch.max', (['train_y', '(-2)'], {}), '(train_y, -2)\n', (7220, 7233), False, 'import torch\n'), ((7258, 7286), 'torch.vstack', 'torch.vstack', (['(min_y, max_y)'], {}), '((min_y, max_y))\n', (7270, 7286), False, 'import torch\n'), ((9057, 9071), 'torch.Size', 'torch.Size', (['[]'], {}), '([])\n', (9067, 9071), False, 'import torch\n'), ((9752, 9802), 'gpytorch.models.ApproximateGP.__init__', 'ApproximateGP.__init__', (['self', 'variational_strategy'], {}), '(self, variational_strategy)\n', (9774, 9802), False, 'from gpytorch.models import ApproximateGP\n'), ((10407, 10496), 'gpytorch.likelihoods.MultitaskGaussianLikelihood', 'gpytorch.likelihoods.MultitaskGaussianLikelihood', ([], {'num_tasks': 'self.num_tasks', 'rank': 'rank'}), '(num_tasks=self.num_tasks,\n rank=rank)\n', (10455, 10496), False, 'import gpytorch\n'), ((10732, 10767), 'gpytorch.distributions.MultivariateNormal', 'MultivariateNormal', (['mean_x', 'covar_x'], {}), '(mean_x, covar_x)\n', (10750, 10767), False, 'from gpytorch.distributions import MultivariateNormal\n'), ((11780, 11806), 'botorch.posteriors.gpytorch.GPyTorchPosterior', 'GPyTorchPosterior', ([], {'mvn': 'mvn'}), '(mvn=mvn)\n', (11797, 11806), False, 'from botorch.posteriors.gpytorch import GPyTorchPosterior\n'), ((15020, 15049), 'torch.zeros', 'torch.zeros', (['train_y.shape[1]'], {}), '(train_y.shape[1])\n', (15031, 15049), False, 'import torch\n'), ((15075, 15097), 'torch.max', 'torch.max', (['train_y', '(-2)'], {}), '(train_y, -2)\n', (15084, 15097), False, 'import torch\n'), ((15122, 15150), 'torch.vstack', 'torch.vstack', (['(min_y, max_y)'], {}), '((min_y, max_y))\n', (15134, 15150), False, 'import torch\n'), ((15779, 15839), 'botorch.optim.stopping.ExpMAStoppingCriterion', 'botorch.optim.stopping.ExpMAStoppingCriterion', ([], {'rel_tol': '(1e-06)'}), '(rel_tol=1e-06)\n', (15824, 15839), False, 'import botorch\n'), ((17235, 17251), 'numpy.asanyarray', 'np.asanyarray', (['x'], {}), '(x)\n', (17248, 17251), True, 'import numpy as np\n'), ((18279, 18333), 'numpy.random.choice', 'np.random.choice', (['N'], {'size': 'N', 'replace': '(True)', 'p': 'n_weights'}), '(N, size=N, replace=True, p=n_weights)\n', (18295, 18333), True, 'import numpy as np\n'), ((18403, 18459), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['theta_resampled'], {'columns': 'cols'}), '(theta_resampled, columns=cols)\n', (18428, 18459), True, 'import pandas as pd\n'), ((19752, 19776), 'botorch.utils.transforms.t_batch_mode_transform', 't_batch_mode_transform', ([], {}), '()\n', (19774, 19776), False, 'from botorch.utils.transforms import concatenate_pending_points, t_batch_mode_transform\n'), ((1978, 2001), 'torch.is_tensor', 'torch.is_tensor', (['bounds'], {}), '(bounds)\n', (1993, 2001), False, 'import torch\n'), ((3304, 3328), 'torch.tensor', 'torch.tensor', (["net['Sim']"], {}), "(net['Sim'])\n", (3316, 3328), False, 'import torch\n'), ((6958, 6998), 'torch.cat', 'torch.cat', (['(train_x, predictions)'], {'dim': '(0)'}), '((train_x, predictions), dim=0)\n', (6967, 6998), False, 'import torch\n'), ((7021, 7061), 'torch.cat', 'torch.cat', (['(train_y, predicted_y)'], {'dim': '(0)'}), '((train_y, predicted_y), dim=0)\n', (7030, 7061), False, 'import torch\n'), ((9504, 9627), 'gpytorch.variational.VariationalStrategy', 'VariationalStrategy', (['self', 'inducing_points', 'variational_distribution'], {'learn_inducing_locations': 'learn_inducing_locations'}), '(self, inducing_points, variational_distribution,\n learn_inducing_locations=learn_inducing_locations)\n', (9523, 9627), False, 'from gpytorch.variational import CholeskyVariationalDistribution, LMCVariationalStrategy, VariationalStrategy\n'), ((11394, 11439), 'botorch.models.utils.gpt_posterior_settings', 'botorch.models.utils.gpt_posterior_settings', ([], {}), '()\n', (11437, 11439), False, 'import botorch\n'), ((14826, 14850), 'torch.is_tensor', 'torch.is_tensor', (['train_x'], {}), '(train_x)\n', (14841, 14850), False, 'import torch\n'), ((14874, 14906), 'torch.tensor', 'torch.tensor', (['train_x'], {}), '(train_x, **tkwargs)\n', (14886, 14906), False, 'import torch\n'), ((14922, 14946), 'torch.is_tensor', 'torch.is_tensor', (['train_y'], {}), '(train_y)\n', (14937, 14946), False, 'import torch\n'), ((14970, 15002), 'torch.tensor', 'torch.tensor', (['train_y'], {}), '(train_y, **tkwargs)\n', (14982, 15002), False, 'import torch\n'), ((15447, 15495), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['train_x', 'train_y'], {}), '(train_x, train_y)\n', (15477, 15495), False, 'import torch\n'), ((16196, 16212), 'torch.Tensor', 'torch.Tensor', (['[]'], {}), '([])\n', (16208, 16212), False, 'import torch\n'), ((17571, 17587), 'numpy.size', 'np.size', (['mean', '(1)'], {}), '(mean, 1)\n', (17578, 17587), True, 'import numpy as np\n'), ((18077, 18146), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(1e-06)', 'scale': '(1e-09)', 'size': 'predicted_values.shape'}), '(loc=1e-06, scale=1e-09, size=predicted_values.shape)\n', (18093, 18146), True, 'import numpy as np\n'), ((18185, 18211), 'numpy.sum', 'np.sum', (['weights[self.task]'], {}), '(weights[self.task])\n', (18191, 18211), True, 'import numpy as np\n'), ((18993, 19025), 'torch.ones', 'torch.ones', (['(2)', 'problem.input_dim'], {}), '(2, problem.input_dim)\n', (19003, 19025), False, 'import torch\n'), ((19081, 19130), 'botorch.sampling.samplers.SobolQMCNormalSampler', 'SobolQMCNormalSampler', ([], {'num_samples': 'mc_sample_size'}), '(num_samples=mc_sample_size)\n', (19102, 19130), False, 'from botorch.sampling.samplers import SobolQMCNormalSampler\n'), ((19164, 19233), 'botorch.utils.multi_objective.box_decomposition.NondominatedPartitioning', 'NondominatedPartitioning', ([], {'num_outcomes': 'problem.num_tasks', 'Y': 'train_obj'}), '(num_outcomes=problem.num_tasks, Y=train_obj)\n', (19188, 19233), False, 'from botorch.utils.multi_objective.box_decomposition import NondominatedPartitioning\n'), ((1917, 1938), 'torch.ones', 'torch.ones', (['num_tasks'], {}), '(num_tasks)\n', (1927, 1938), False, 'import torch\n'), ((2067, 2101), 'torch.Size', 'torch.Size', (['[2, process.param_dim]'], {}), '([2, process.param_dim])\n', (2077, 2101), False, 'import torch\n'), ((2255, 2283), 'torch.tensor', 'torch.tensor', (['process.bounds'], {}), '(process.bounds)\n', (2267, 2283), False, 'import torch\n'), ((4030, 4052), 'torch.abs', 'torch.abs', (['train_y_min'], {}), '(train_y_min)\n', (4039, 4052), False, 'import torch\n'), ((5539, 5563), 'torch.tensor', 'torch.tensor', (["net['Sim']"], {}), "(net['Sim'])\n", (5551, 5563), False, 'import torch\n'), ((9243, 9268), 'torch.Size', 'torch.Size', (['[num_latents]'], {}), '([num_latents])\n', (9253, 9268), False, 'import torch\n'), ((10050, 10075), 'torch.Size', 'torch.Size', (['[num_latents]'], {}), '([num_latents])\n', (10060, 10075), False, 'import torch\n'), ((10304, 10329), 'torch.Size', 'torch.Size', (['[num_latents]'], {}), '([num_latents])\n', (10314, 10329), False, 'import torch\n'), ((10808, 10827), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (10824, 10827), False, 'import torch\n'), ((10849, 10881), 'botorch.utils.transforms.normalize', 'normalize', (['x'], {'bounds': 'self.bounds'}), '(x, bounds=self.bounds)\n', (10858, 10881), False, 'from botorch.utils.transforms import unnormalize, normalize\n'), ((10969, 11020), 'botorch.utils.transforms.unnormalize', 'unnormalize', (['predictions.mean'], {'bounds': 'self.y_bounds'}), '(predictions.mean, bounds=self.y_bounds)\n', (10980, 11020), False, 'from botorch.utils.transforms import unnormalize, normalize\n'), ((11045, 11100), 'botorch.utils.transforms.unnormalize', 'unnormalize', (['predictions.variance'], {'bounds': 'self.y_bounds'}), '(predictions.variance, bounds=self.y_bounds)\n', (11056, 11100), False, 'from botorch.utils.transforms import unnormalize, normalize\n'), ((15170, 15208), 'botorch.utils.transforms.normalize', 'normalize', (['train_x'], {'bounds': 'self.bounds'}), '(train_x, bounds=self.bounds)\n', (15179, 15208), False, 'from botorch.utils.transforms import unnormalize, normalize\n'), ((15235, 15275), 'botorch.utils.transforms.normalize', 'normalize', (['train_y'], {'bounds': 'self.y_bounds'}), '(train_y, bounds=self.y_bounds)\n', (15244, 15275), False, 'from botorch.utils.transforms import unnormalize, normalize\n'), ((3914, 3945), 'torch.min', 'torch.min', (['train_y[:, i]'], {'dim': '(0)'}), '(train_y[:, i], dim=0)\n', (3923, 3945), False, 'import torch\n'), ((5808, 5843), 'torch.cat', 'torch.cat', (['(train_y, new_y)'], {'dim': '(-1)'}), '((train_y, new_y), dim=-1)\n', (5817, 5843), False, 'import torch\n'), ((6885, 6934), 'torch.cat', 'torch.cat', (['(predicted_y, new_predicted_y)'], {'dim': '(-1)'}), '((predicted_y, new_predicted_y), dim=-1)\n', (6894, 6934), False, 'import torch\n'), ((7108, 7135), 'torch.tensor', 'torch.tensor', (['predicted_sim'], {}), '(predicted_sim)\n', (7120, 7135), False, 'import torch\n'), ((10204, 10229), 'torch.Size', 'torch.Size', (['[num_latents]'], {}), '([num_latents])\n', (10214, 10229), False, 'import torch\n'), ((16749, 16769), 'torch.Tensor', 'torch.Tensor', (['[loss]'], {}), '([loss])\n', (16761, 16769), False, 'import torch\n'), ((3373, 3392), 'torch.tensor', 'torch.tensor', (['new_y'], {}), '(new_y)\n', (3385, 3392), False, 'import torch\n'), ((5234, 5262), 'torch.tensor', 'torch.tensor', (['train_t[param]'], {}), '(train_t[param])\n', (5246, 5262), False, 'import torch\n'), ((5420, 5455), 'torch.tensor', 'torch.tensor', (['net[self.target_name]'], {}), '(net[self.target_name])\n', (5432, 5455), False, 'import torch\n'), ((5689, 5724), 'torch.tensor', 'torch.tensor', (['net[self.target_name]'], {}), '(net[self.target_name])\n', (5701, 5724), False, 'import torch\n'), ((11633, 11647), 'torch.Size', 'torch.Size', (['[]'], {}), '([])\n', (11643, 11647), False, 'import torch\n'), ((17649, 17667), 'numpy.sqrt', 'np.sqrt', (['var[:, i]'], {}), '(var[:, i])\n', (17656, 17667), True, 'import numpy as np\n'), ((16858, 16878), 'torch.Tensor', 'torch.Tensor', (['[loss]'], {}), '([loss])\n', (16870, 16878), False, 'import torch\n'), ((3456, 3475), 'torch.tensor', 'torch.tensor', (['new_y'], {}), '(new_y)\n', (3468, 3475), False, 'import torch\n')] |
"""
Utility functions for configuring ebtel++ simulations
"""
import os
import subprocess
import warnings
from collections import OrderedDict
import tempfile
import xml.etree.ElementTree as ET
import xml.dom.minidom as xdm
import numpy as np
__all__ = ['run_ebtel', 'read_xml', 'write_xml']
class EbtelPlusPlusError(Exception):
"""
Raise this exception when there's an ebtel++ error
"""
pass
def run_ebtel(config, ebtel_dir):
"""
Run an ebtel++ simulation
Parameters
----------
config: `dict`
Dictionary of configuration options
ebtel_dir: `str`
Path to directory containing ebtel++ source code.
"""
with tempfile.TemporaryDirectory() as tmpdir:
config_filename = os.path.join(tmpdir, 'ebtelplusplus.tmp.xml')
results_filename = os.path.join(tmpdir, 'ebtelplusplus.tmp')
config['output_filename'] = results_filename
write_xml(config, config_filename)
cmd = subprocess.run(
[os.path.join(ebtel_dir, 'bin/ebtel++.run'), '-c', config_filename],
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if cmd.stderr:
raise EbtelPlusPlusError(f"{cmd.stderr.decode('utf-8')}")
data = np.loadtxt(results_filename)
results = {
'time': data[:, 0],
'electron_temperature': data[:, 1],
'ion_temperature': data[:, 2],
'density': data[:, 3],
'electron_pressure': data[:, 4],
'ion_pressure': data[:, 5],
'velocity': data[:, 6],
'heat': data[:, 7],
}
results_dem = {}
if config['calculate_dem']:
results_dem['dem_tr'] = np.loadtxt(
config['output_filename'] + '.dem_tr')
results_dem['dem_corona'] = np.loadtxt(
config['output_filename'] + '.dem_corona')
# The first row of both is the temperature bins
results_dem['dem_temperature'] = results_dem['dem_tr'][0, :]
results_dem['dem_tr'] = results_dem['dem_tr'][1:, :]
results_dem['dem_corona'] = results_dem['dem_corona'][1:, :]
return {**results, **results_dem}
def read_xml(input_filename,):
"""
For all input variables, find them in the XML tree and return them to a dictionary
Parameters
----------
input_filename : `str`
"""
tree = ET.parse(input_filename)
root = tree.getroot()
var_list = [child.tag for child in root]
input_dict = {}
for var in var_list:
# Find node
node = root.find(var)
# Check if found
if node is None:
warnings.warn(f'No value found for input {var}. Returning None.')
input_dict[var] = None
else:
input_dict[node.tag] = read_node(node)
return input_dict
def read_node(node):
"""
Read in node values for different configurations
"""
if node.getchildren():
_child_tags = [child.tag for child in node.getchildren()]
if len(_child_tags) != len(set(_child_tags)):
tmp = []
for child in node.getchildren():
tmp.append({child.tag: read_node(child)})
else:
tmp = OrderedDict()
for child in node.getchildren():
tmp[child.tag] = read_node(child)
return tmp
else:
if node.text:
return type_checker(node.text)
elif node.attrib:
return {key: type_checker(node.attrib[key]) for key in node.attrib}
else:
warnings.warn(f'Unrecognized node format for {node.tag}. Returning None.')
return None
def bool_filter(val):
"""
Convert true/false string to Python bool. Otherwise, return string.
"""
trues = ['True', 'TRUE', 'true', 'yes', 'Yes']
falses = ['False', 'FALSE', 'false', 'no', 'No']
if any([val == t for t in trues]):
return True
elif any([val == f for f in falses]):
return False
else:
return val
def type_checker(val):
"""
Convert to int or float if possible
"""
try:
return int(val)
except ValueError:
pass
try:
return float(val)
except ValueError:
pass
return bool_filter(val)
def write_xml(output_dict, output_filename):
"""
Print dictionary to XML file
Parameters
----------
output_dict : `dict`
structure to print to file
output_filename : `str`
filename to print to
"""
root = ET.Element('root')
for key in output_dict:
set_element_recursive(root, output_dict[key], key)
with open(output_filename, 'w') as f:
f.write(pretty_print_xml(root))
def set_element_recursive(root, node, keyname):
"""
Set element tags, values, and attributes. Recursive for arrays/lists.
"""
element = ET.SubElement(root, keyname)
if type(node) is list:
for item in node:
sub_keyname = [k for k in item][0]
set_element_recursive(element, item[sub_keyname], sub_keyname)
elif type(node).__name__ == 'OrderedDict':
for key in node:
set_element_recursive(element, node[key], key)
elif type(node) is dict:
for key in node:
element.set(key, str(node[key]))
else:
element.text = str(node)
def pretty_print_xml(element):
"""
Formatted XML output for writing to file.
"""
unformatted = ET.tostring(element)
xdmparse = xdm.parseString(unformatted)
return xdmparse.toprettyxml(indent=" ")
| [
"xml.etree.ElementTree.parse",
"tempfile.TemporaryDirectory",
"xml.dom.minidom.parseString",
"xml.etree.ElementTree.Element",
"xml.etree.ElementTree.SubElement",
"numpy.loadtxt",
"xml.etree.ElementTree.tostring",
"collections.OrderedDict",
"warnings.warn",
"os.path.join"
] | [((2441, 2465), 'xml.etree.ElementTree.parse', 'ET.parse', (['input_filename'], {}), '(input_filename)\n', (2449, 2465), True, 'import xml.etree.ElementTree as ET\n'), ((4583, 4601), 'xml.etree.ElementTree.Element', 'ET.Element', (['"""root"""'], {}), "('root')\n", (4593, 4601), True, 'import xml.etree.ElementTree as ET\n'), ((4925, 4953), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', 'keyname'], {}), '(root, keyname)\n', (4938, 4953), True, 'import xml.etree.ElementTree as ET\n'), ((5515, 5535), 'xml.etree.ElementTree.tostring', 'ET.tostring', (['element'], {}), '(element)\n', (5526, 5535), True, 'import xml.etree.ElementTree as ET\n'), ((5551, 5579), 'xml.dom.minidom.parseString', 'xdm.parseString', (['unformatted'], {}), '(unformatted)\n', (5566, 5579), True, 'import xml.dom.minidom as xdm\n'), ((675, 704), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (702, 704), False, 'import tempfile\n'), ((742, 787), 'os.path.join', 'os.path.join', (['tmpdir', '"""ebtelplusplus.tmp.xml"""'], {}), "(tmpdir, 'ebtelplusplus.tmp.xml')\n", (754, 787), False, 'import os\n'), ((815, 856), 'os.path.join', 'os.path.join', (['tmpdir', '"""ebtelplusplus.tmp"""'], {}), "(tmpdir, 'ebtelplusplus.tmp')\n", (827, 856), False, 'import os\n'), ((1279, 1307), 'numpy.loadtxt', 'np.loadtxt', (['results_filename'], {}), '(results_filename)\n', (1289, 1307), True, 'import numpy as np\n'), ((1748, 1797), 'numpy.loadtxt', 'np.loadtxt', (["(config['output_filename'] + '.dem_tr')"], {}), "(config['output_filename'] + '.dem_tr')\n", (1758, 1797), True, 'import numpy as np\n'), ((1855, 1908), 'numpy.loadtxt', 'np.loadtxt', (["(config['output_filename'] + '.dem_corona')"], {}), "(config['output_filename'] + '.dem_corona')\n", (1865, 1908), True, 'import numpy as np\n'), ((2694, 2759), 'warnings.warn', 'warnings.warn', (['f"""No value found for input {var}. Returning None."""'], {}), "(f'No value found for input {var}. Returning None.')\n", (2707, 2759), False, 'import warnings\n'), ((3278, 3291), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3289, 3291), False, 'from collections import OrderedDict\n'), ((996, 1038), 'os.path.join', 'os.path.join', (['ebtel_dir', '"""bin/ebtel++.run"""'], {}), "(ebtel_dir, 'bin/ebtel++.run')\n", (1008, 1038), False, 'import os\n'), ((3613, 3687), 'warnings.warn', 'warnings.warn', (['f"""Unrecognized node format for {node.tag}. Returning None."""'], {}), "(f'Unrecognized node format for {node.tag}. Returning None.')\n", (3626, 3687), False, 'import warnings\n')] |
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# <NAME>
# California Institute of Technology
# (C) 2007 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
import os, numpy as np
import journal
debug = journal.debug('mccomponents.sample.diffraction')
nsampling = 100
class ComputationEngineRendererExtension:
def onSingleCrystalDiffractionKernel(self, kernel):
'''handler to create c++ instance of a single crystal diffraction kernel.
'''
# kernel object is created in the parser components
# get unit cell
scatterer = kernel.scatterer_origin
try:
unitcell = scatterer.phase.unitcell
except AttributeError as err:
raise RuntimeError("Cannot obtain unitcell from scatterer %s, %s" % (
scatterer.__class__.__name__, scatterer.name ))
assert np.allclose(unitcell.lattice.base, kernel.basis_vectors, atol=1e-3), (
"basis vectors mismatch. From crystal data: {}; from diffraction data: {}".format(
unitcell.lattice.base, kernel.basis_vectors))
# hkllist is a list of mccomponents.sample.diffraction.singlecrystal.HKL instance
# need to convert to a list of h,k,l,F2
hkllist2 = []
for hkl in kernel.hkllist:
h,k,l = hkl.hkl
F2 = hkl.F_squared
hkllist2.append((h,k,l,F2))
continue
from sampleassembly import cross_sections
abs_xs, inc_xs, coh_xs = cross_sections( scatterer, include_density=False)
abs_xs /= units.area.barn
mosaic = kernel.mosaic / units.angle.radian
delta_d_d = kernel.Dd_over_d
return self.factory.singlecrystaldiffractionkernel(
kernel.basis_vectors, hkllist2, mosaic, delta_d_d, abs_xs)
def onSimplePowderDiffractionKernel(self, kernel):
'''handler to create c++ instance of a simple powder diffraction kernel.
'''
# get unit cell
# scatterer = kernel.scatterer_origin
# try: unitcell = scatterer.phase.unitcell
# except AttributeError, err:
# raise "Cannot obtain unitcell from scatterer %s, %s" % (
# scatterer.__class__.__name__, scatterer.name )
#
from .SimplePowderDiffractionKernel import Data
data = Data()
#
data.Dd_over_d = kernel.Dd_over_d
#
#data.unitcell_volume = unitcell.getVolume()
data.unitcell_volume = kernel.unitcell_volume
debug.log('unitcell volume: %s' % data.unitcell_volume)
# !!!!!
# number_of_atoms is not really used in the kernel implementation
# needs double check
data.number_of_atoms = 0 #unitcell.getNumAtoms()
# !!!!!
# atomic_weight is not really used in the kernel implementation
# needs double check
data.atomic_weight = 0
# !!!!!
# density is not really used in the kernel implementation
# needs double check
data.density = 0
# !!!!!
# debye waller factor probably should be computed by default
# needs improvement
data.DebyeWaller_factor = kernel.DebyeWaller_factor
# This was the old implementation.
# Problem is that the data in the diffraction peaks file (such as laz)
# may not match the unit cell choice in the "scatterer" data object,
# which usually comes from an xyz file.
# from sampleassembly import cross_sections
# abs, inc, coh = cross_sections( scatterer, include_density=False)
#
# The new implementation here assumes the kernel specification
# provides these information
xs = kernel.cross_sections
abs, inc, coh = xs.abs, xs.inc, xs.coh
debug.log('cross sections: abs: %s, inc: %s, coh: %s' % (abs, inc, coh))
data.absorption_cross_section = abs # /units.area.barn
data.incoherent_cross_section = inc # /units.area.barn
data.coherent_cross_section = coh # /units.area.barn
#
data.peaks = kernel.peaks
return self.factory.simplepowderdiffractionkernel(data)
pass # end of ComputationEngineRendererExtension
def register( type, renderer_handler_method, override = False ):
'''register computing engine constructor method for a new type'''
Renderer = ComputationEngineRendererExtension
global _registry
name = type.__name__
methodname = 'on%s' % name
if hasattr(Renderer, methodname):
if not override:
raise ValueError("Cannot register handler for type %s"\
"%s already registered as handler for type %s" % (
type, methodname, _registry[name] ))
pass
setattr( Renderer, methodname, renderer_handler_method )
_registry[ name ] = type
return
_registry = {}
from mccomponents.homogeneous_scatterer import registerRendererExtension
registerRendererExtension( ComputationEngineRendererExtension )
from . import units
# version
__id__ = "$Id$"
# End of file
| [
"sampleassembly.cross_sections",
"numpy.allclose",
"mccomponents.homogeneous_scatterer.registerRendererExtension",
"journal.debug"
] | [((414, 462), 'journal.debug', 'journal.debug', (['"""mccomponents.sample.diffraction"""'], {}), "('mccomponents.sample.diffraction')\n", (427, 462), False, 'import journal\n'), ((5172, 5233), 'mccomponents.homogeneous_scatterer.registerRendererExtension', 'registerRendererExtension', (['ComputationEngineRendererExtension'], {}), '(ComputationEngineRendererExtension)\n', (5197, 5233), False, 'from mccomponents.homogeneous_scatterer import registerRendererExtension\n'), ((1063, 1131), 'numpy.allclose', 'np.allclose', (['unitcell.lattice.base', 'kernel.basis_vectors'], {'atol': '(0.001)'}), '(unitcell.lattice.base, kernel.basis_vectors, atol=0.001)\n', (1074, 1131), True, 'import os, numpy as np\n'), ((1689, 1737), 'sampleassembly.cross_sections', 'cross_sections', (['scatterer'], {'include_density': '(False)'}), '(scatterer, include_density=False)\n', (1703, 1737), False, 'from sampleassembly import cross_sections\n')] |
import os
import csv
import ipdb
import time
import yaml
import random
import argparse
from collections import OrderedDict
import numpy as np
# Local imports
from train_baseline import cnn_val_loss
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str,
help='A YAML file specifying the grid search/random search configuration.')
parser.add_argument('--exp_name', type=str, default=None,
help='Set the name of the experiment.')
parser.add_argument('--seed', type=int, default=11,
help='Set random seed')
args = parser.parse_args()
# torch.manual_seed(args.seed)
# torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
# Load the YAML configuration file
with open(args.config, 'r') as f:
config = yaml.load(f)
def random_generator(min_val, max_val):
return lambda: random.uniform(min_val, max_val)
# def log_uniform_random_generator(min_val, max_val):
# return lambda: random.
def loguniform(min_val, max_val):
return lambda: float(np.exp(np.random.uniform(np.log(min_val), np.log(max_val))))
fixed_hparam_dict = OrderedDict()
tune_hparam_dict = OrderedDict()
search_over = []
for hparam in config['fixed_hparams']:
fixed_hparam_dict[hparam] = config['fixed_hparams'][hparam]
for hparam in config['tune_hparams']:
hparam_values = []
if 'sampling' in config['tune_hparams'][hparam]:
if config['tune_hparams'][hparam]['sampling'] == 'random':
min_val = config['tune_hparams'][hparam]['min_val']
max_val = config['tune_hparams'][hparam]['max_val']
print('{} {} {}'.format(hparam, min_val, max_val))
search_over.append('{}:random'.format(hparam))
if 'scale' in config['tune_hparams'][hparam] and config['tune_hparams'][hparam]['scale'] == 'log':
tune_hparam_dict[hparam] = loguniform(float(min_val), float(max_val))
else:
tune_hparam_dict[hparam] = random_generator(min_val, max_val)
if args.exp_name is None:
args.exp_name = 'cnn-{}'.format('-'.join(search_over))
args.exp_name = '{}_seed_{}'.format(args.exp_name, args.seed)
if not os.path.exists(args.exp_name):
os.makedirs(args.exp_name)
fixed_hparam_dict['save_dir'] = args.exp_name
callback_file = open(os.path.join(args.exp_name, 'callback.csv'), 'w')
callback_writer = csv.DictWriter(callback_file, fieldnames=['elapsed_time', 'epoch', 'train_loss', 'train_acc', 'val_loss', 'val_acc'] + list(tune_hparam_dict.keys()))
callback_writer.writeheader()
def callback(epoch, avg_xentropy, train_acc, val_loss, val_acc, config):
global curr_hparam_dict
elapsed_time = time.time() - start_time
result_dict = { 'elapsed_time': elapsed_time, 'epoch': epoch, 'train_loss': avg_xentropy, 'train_acc': train_acc,
'val_loss': val_loss, 'val_acc': val_acc }
result_dict.update(curr_hparam_dict)
callback_writer.writerow(result_dict)
callback_file.flush()
# Save the final val and test performance to a results CSV file
result_file = open(os.path.join(args.exp_name, 'progress.csv'), 'w')
result_writer = csv.DictWriter(result_file, fieldnames=['elapsed_time', 'train_loss', 'train_acc', 'val_loss', 'val_acc', 'test_loss', 'test_acc'] + list(tune_hparam_dict.keys()))
result_writer.writeheader()
start_time = time.time()
try:
for i in range(400): # Try up to 400 random hyperparamter combinations
curr_hparam_dict = OrderedDict()
for hparam in tune_hparam_dict:
curr_hparam_dict[hparam] = tune_hparam_dict[hparam]() # This calls the random sampler function
params = {**fixed_hparam_dict, **curr_hparam_dict}
train_loss, train_acc, val_loss, val_acc, test_loss, test_acc = cnn_val_loss(params, callback=callback, return_all=True)
elapsed_time = time.time() - start_time
result_dict = { 'elapsed_time': elapsed_time,
'train_loss': train_loss, 'train_acc': train_acc,
'val_loss': val_loss, 'val_acc': val_acc,
'test_loss': test_loss, 'test_acc': test_acc }
result_dict.update(curr_hparam_dict)
result_writer.writerow(result_dict)
result_file.flush()
except KeyboardInterrupt:
print('Exiting out of random search')
# result_writer.close()
| [
"yaml.load",
"numpy.random.seed",
"argparse.ArgumentParser",
"os.makedirs",
"random.uniform",
"numpy.log",
"os.path.exists",
"time.time",
"train_baseline.cnn_val_loss",
"collections.OrderedDict",
"os.path.join"
] | [((243, 268), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (266, 268), False, 'import argparse\n'), ((756, 781), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (770, 781), True, 'import numpy as np\n'), ((1240, 1253), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1251, 1253), False, 'from collections import OrderedDict\n'), ((1277, 1290), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1288, 1290), False, 'from collections import OrderedDict\n'), ((3615, 3626), 'time.time', 'time.time', ([], {}), '()\n', (3624, 3626), False, 'import time\n'), ((878, 890), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (887, 890), False, 'import yaml\n'), ((2371, 2400), 'os.path.exists', 'os.path.exists', (['args.exp_name'], {}), '(args.exp_name)\n', (2385, 2400), False, 'import os\n'), ((2410, 2436), 'os.makedirs', 'os.makedirs', (['args.exp_name'], {}), '(args.exp_name)\n', (2421, 2436), False, 'import os\n'), ((2515, 2558), 'os.path.join', 'os.path.join', (['args.exp_name', '"""callback.csv"""'], {}), "(args.exp_name, 'callback.csv')\n", (2527, 2558), False, 'import os\n'), ((3331, 3374), 'os.path.join', 'os.path.join', (['args.exp_name', '"""progress.csv"""'], {}), "(args.exp_name, 'progress.csv')\n", (3343, 3374), False, 'import os\n'), ((960, 992), 'random.uniform', 'random.uniform', (['min_val', 'max_val'], {}), '(min_val, max_val)\n', (974, 992), False, 'import random\n'), ((2904, 2915), 'time.time', 'time.time', ([], {}), '()\n', (2913, 2915), False, 'import time\n'), ((3748, 3761), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3759, 3761), False, 'from collections import OrderedDict\n'), ((4071, 4127), 'train_baseline.cnn_val_loss', 'cnn_val_loss', (['params'], {'callback': 'callback', 'return_all': '(True)'}), '(params, callback=callback, return_all=True)\n', (4083, 4127), False, 'from train_baseline import cnn_val_loss\n'), ((4156, 4167), 'time.time', 'time.time', ([], {}), '()\n', (4165, 4167), False, 'import time\n'), ((1178, 1193), 'numpy.log', 'np.log', (['min_val'], {}), '(min_val)\n', (1184, 1193), True, 'import numpy as np\n'), ((1195, 1210), 'numpy.log', 'np.log', (['max_val'], {}), '(max_val)\n', (1201, 1210), True, 'import numpy as np\n')] |
import cv2 as cv
import numpy as np
import argparse
import math
import tensorsTools
from tensorsTools import Tensor
from tensorsTools import VectorField
from tensorsTools import Bar
def get_args(sigma1,sigma2,p1,p2,n,epsilon,L,coeff,output):
parser = argparse.ArgumentParser()
parser.add_argument("-i","--image", help="input image")
parser.add_argument("-s1","--sigma1", help="sigma 1")
parser.add_argument("-s2","--sigma2", help="sigma 2")
parser.add_argument("-p1","--power1", help="power 1")
parser.add_argument("-p2","--power2", help="power 2")
parser.add_argument("-n","--number", help="number of strokes")
parser.add_argument("-e","--epsilon", help="epsilon to draw strokes")
parser.add_argument("-l","--length", help="Length of strokes")
parser.add_argument("-c","--coefficient", help="coefficient to reduce the image")
parser.add_argument("-o","--output", help="custom output under ./output/tensors directory")
args = parser.parse_args()
if args.image != None:
print("Image Loaded : "+str(args.image))
else:
print("No image loaded used -i argurment")
quit()
if args.sigma1 != None :
sigma1=float(args.sigma1)
if args.sigma2 != None :
sigma2=float(args.sigma2)
if args.power1 != None :
p1=float(args.power1)
if args.power2 != None :
p2=float(args.power2)
if args.number != None :
n=int(args.number)
if args.epsilon != None :
epsilon=float(args.epsilon)
if args.length != None :
L=float(args.length)
if args.coefficient != None :
coeff=float(args.coefficient)
if args.output != None :
output=str(args.output)
if p1<p2 or p1<0 :
print("You should have p1>=p2>=0")
quit()
return args.image,sigma1,sigma2,p1,p2,n,epsilon,L,coeff,output
def initialization(img,sigma):
img = cv.GaussianBlur(img,(15,15),sigma)
img_lab = cv.cvtColor(np.uint8(img), cv.COLOR_BGR2LAB)
# Estimate the smoothed structure tensor field
# sobel x
img_sobel_x_lab = cv.Sobel(img_lab, cv.CV_64F, 1, 0, ksize=1)
# sobel y
img_sobel_y_lab = cv.Sobel(img_lab, cv.CV_64F, 0, 1, ksize=1)
return img_sobel_x_lab,img_sobel_y_lab
def computeEigen(img_sobel_x_lab,img_sobel_y_lab,sigma):
# compute eigens
A=img_sobel_x_lab[:,:,0]*img_sobel_x_lab[:,:,0]+img_sobel_x_lab[:,:,1]*img_sobel_x_lab[:,:,1]+img_sobel_x_lab[:,:,2]*img_sobel_x_lab[:,:,2]
B=img_sobel_y_lab[:,:,0]*img_sobel_y_lab[:,:,0]+img_sobel_y_lab[:,:,1]*img_sobel_y_lab[:,:,1]+img_sobel_y_lab[:,:,2]*img_sobel_y_lab[:,:,2]
C=img_sobel_x_lab[:,:,0]*img_sobel_y_lab[:,:,0]+img_sobel_x_lab[:,:,1]*img_sobel_y_lab[:,:,1]+img_sobel_x_lab[:,:,2]*img_sobel_y_lab[:,:,2]
# blur
A=cv.GaussianBlur(A,(15,15),sigma)
B=cv.GaussianBlur(B,(15,15),sigma)
C=cv.GaussianBlur(C,(15,15),sigma)
# Convert A,B,C CV_64FC1
A=np.float64(A)
B=np.float64(B)
C=np.float64(C)
return A,B,C
def computeTensors(A,B,C,p1,p2):
bar=tensorsTools.Bar("Compute Tensors",A.shape[0]*A.shape[1])
T =np.zeros(A.shape,Tensor)
for i in range(A.shape[0]) :
for j in range (A.shape[1]) :
# create symetric matrix 2x2 [[A,C][C,B]]
tmp=np.zeros((2,2),np.float64)
tmp[0,0]=A[i,j]
tmp[0,1]=C[i,j]
tmp[1,0]=C[i,j]
tmp[1,1]=B[i,j]
# extract eigenValues and eigenVectors to compute tensor
T[i,j]=Tensor(cv.eigen(tmp),p1,p2)
bar.next()
return T
def computeVectorField(T):
gamma=np.array([0,math.pi/4,math.pi/2,3*math.pi/4])
#gamma = np.array([0, math.pi / 2])
w = []
bar=tensorsTools.Bar("Compute VectorField",T.shape[0]*T.shape[1]*len(gamma))
for i in range(len(gamma)):
w.append(np.zeros(T.shape, VectorField))
w = np.array(w)
for i in range(T.shape[0]) :
for j in range (T.shape[1]) :
for p in range(len(gamma)) :
w[p,i,j]=VectorField(T[i,j],gamma[p])
bar.next()
return w
def main():
# Parameters
time = tensorsTools.Timer()
sigma1 = 0.5
sigma2 = 1.2
p1=1.2 #p1>=p2>=0
p2=0.5
n=100000
epsilon=1
L=80
coeff=1
output=""
# Initialize input image
img_path,sigma1,sigma2,p1,p2,n,epsilon,L,coeff,output=get_args(sigma1,sigma2,p1,p2,n,epsilon,L,coeff,output)
img = cv.imread(str(img_path),cv.IMREAD_COLOR) #bgr
size = (int(img.shape[1]/coeff), int(img.shape[0]/coeff))
img = cv.resize(img, size)
# Algo
img_sobel_x_lab,img_sobel_y_lab=initialization(img,sigma1)
A,B,C=computeEigen(img_sobel_x_lab,img_sobel_y_lab,sigma2)
T=computeTensors(A,B,C,p1,p2)
tensorsTools.draw_ellipses_G(img, T,output=output) #structure
tensorsTools.draw_ellipses_T(img, T,output=output) #trait
w=computeVectorField(T)
tensorsTools.draw_strokes(img, w,T,n,epsilon,L,output=output)
print('Time : '+str(time))
if __name__ == '__main__':
main() | [
"cv2.GaussianBlur",
"numpy.uint8",
"argparse.ArgumentParser",
"tensorsTools.Timer",
"tensorsTools.draw_ellipses_G",
"tensorsTools.Bar",
"numpy.zeros",
"tensorsTools.draw_strokes",
"tensorsTools.VectorField",
"tensorsTools.draw_ellipses_T",
"numpy.array",
"cv2.eigen",
"numpy.float64",
"cv2.... | [((256, 281), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (279, 281), False, 'import argparse\n'), ((1902, 1939), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['img', '(15, 15)', 'sigma'], {}), '(img, (15, 15), sigma)\n', (1917, 1939), True, 'import cv2 as cv\n'), ((2083, 2126), 'cv2.Sobel', 'cv.Sobel', (['img_lab', 'cv.CV_64F', '(1)', '(0)'], {'ksize': '(1)'}), '(img_lab, cv.CV_64F, 1, 0, ksize=1)\n', (2091, 2126), True, 'import cv2 as cv\n'), ((2163, 2206), 'cv2.Sobel', 'cv.Sobel', (['img_lab', 'cv.CV_64F', '(0)', '(1)'], {'ksize': '(1)'}), '(img_lab, cv.CV_64F, 0, 1, ksize=1)\n', (2171, 2206), True, 'import cv2 as cv\n'), ((2781, 2816), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['A', '(15, 15)', 'sigma'], {}), '(A, (15, 15), sigma)\n', (2796, 2816), True, 'import cv2 as cv\n'), ((2820, 2855), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['B', '(15, 15)', 'sigma'], {}), '(B, (15, 15), sigma)\n', (2835, 2855), True, 'import cv2 as cv\n'), ((2859, 2894), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['C', '(15, 15)', 'sigma'], {}), '(C, (15, 15), sigma)\n', (2874, 2894), True, 'import cv2 as cv\n'), ((2927, 2940), 'numpy.float64', 'np.float64', (['A'], {}), '(A)\n', (2937, 2940), True, 'import numpy as np\n'), ((2947, 2960), 'numpy.float64', 'np.float64', (['B'], {}), '(B)\n', (2957, 2960), True, 'import numpy as np\n'), ((2967, 2980), 'numpy.float64', 'np.float64', (['C'], {}), '(C)\n', (2977, 2980), True, 'import numpy as np\n'), ((3041, 3101), 'tensorsTools.Bar', 'tensorsTools.Bar', (['"""Compute Tensors"""', '(A.shape[0] * A.shape[1])'], {}), "('Compute Tensors', A.shape[0] * A.shape[1])\n", (3057, 3101), False, 'import tensorsTools\n'), ((3106, 3131), 'numpy.zeros', 'np.zeros', (['A.shape', 'Tensor'], {}), '(A.shape, Tensor)\n', (3114, 3131), True, 'import numpy as np\n'), ((3601, 3657), 'numpy.array', 'np.array', (['[0, math.pi / 4, math.pi / 2, 3 * math.pi / 4]'], {}), '([0, math.pi / 4, math.pi / 2, 3 * math.pi / 4])\n', (3609, 3657), True, 'import numpy as np\n'), ((3868, 3879), 'numpy.array', 'np.array', (['w'], {}), '(w)\n', (3876, 3879), True, 'import numpy as np\n'), ((4129, 4149), 'tensorsTools.Timer', 'tensorsTools.Timer', ([], {}), '()\n', (4147, 4149), False, 'import tensorsTools\n'), ((4550, 4570), 'cv2.resize', 'cv.resize', (['img', 'size'], {}), '(img, size)\n', (4559, 4570), True, 'import cv2 as cv\n'), ((4748, 4799), 'tensorsTools.draw_ellipses_G', 'tensorsTools.draw_ellipses_G', (['img', 'T'], {'output': 'output'}), '(img, T, output=output)\n', (4776, 4799), False, 'import tensorsTools\n'), ((4814, 4865), 'tensorsTools.draw_ellipses_T', 'tensorsTools.draw_ellipses_T', (['img', 'T'], {'output': 'output'}), '(img, T, output=output)\n', (4842, 4865), False, 'import tensorsTools\n'), ((4904, 4970), 'tensorsTools.draw_strokes', 'tensorsTools.draw_strokes', (['img', 'w', 'T', 'n', 'epsilon', 'L'], {'output': 'output'}), '(img, w, T, n, epsilon, L, output=output)\n', (4929, 4970), False, 'import tensorsTools\n'), ((1963, 1976), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (1971, 1976), True, 'import numpy as np\n'), ((3272, 3300), 'numpy.zeros', 'np.zeros', (['(2, 2)', 'np.float64'], {}), '((2, 2), np.float64)\n', (3280, 3300), True, 'import numpy as np\n'), ((3828, 3858), 'numpy.zeros', 'np.zeros', (['T.shape', 'VectorField'], {}), '(T.shape, VectorField)\n', (3836, 3858), True, 'import numpy as np\n'), ((3506, 3519), 'cv2.eigen', 'cv.eigen', (['tmp'], {}), '(tmp)\n', (3514, 3519), True, 'import cv2 as cv\n'), ((4018, 4048), 'tensorsTools.VectorField', 'VectorField', (['T[i, j]', 'gamma[p]'], {}), '(T[i, j], gamma[p])\n', (4029, 4048), False, 'from tensorsTools import VectorField\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2020 - <NAME>"
__license__ = "MIT"
__version__ = "1.0"
# =============================================================================
import numpy as np
import logging
import random
import click
import h5py
import os
from os.path import join, isfile, splitext
from collections import namedtuple
from tqdm import tqdm
from aertb.core.types import Sample, EvSample, event_dtype
from aertb.core.const import SUPPORTED_EXT
from aertb.core.loaders import get_loader
# =============================================================================
class HDF5FileIterator:
""" Returns an iterator over an HDF5 file, suggested usage is:
iterator = HDF5FileIterator(..)
for elem in iterator:
# do something ...
"""
def __init__(self, file, samples):
"""
Params
------
:param samples: the samples that will be included in the iteration
"""
self.file = file
self.samples = samples
self.index = 0
def __iter__(self):
return self
def __next__(self):
while self.index < len(self.samples):
sample = self.samples[self.index]
data = self.file[sample.group][sample.name]
events_np = np.array(data)
self.index += 1
return EvSample(sample.group, sample.name, events_np)
else:
self.index = 0
raise StopIteration
def __getitem__(self, x):
if isinstance(x, slice):
start = x.start
stop = x.stop
step = x.step
return HDF5FileIterator(self.file, self.samples[start:stop:step])
def reset(self):
""" Resets the iterator"""
self.index = 0
def __len__(self):
return len(self.samples)
# =============================================================================
class HDF5File:
"""
A wrapper offering useful methods over an HDF5 file, to access
the original file use the .file attribute
"""
# ------------------------------------------------------------------------
def __init__(self, filename, groups='all'):
"""
Params
------
:param filename: the name of the HDF5 file
:param groups: the groups in the HDF5 that will be considered
by default all groups
:param n_samples_group: the number of samples that will be considered
by default every sample in the group
"""
self.file = h5py.File(filename, 'r')
self.groups = groups
self.file_stats = self.get_file_stats()
# ------------------------------------------------------------------------
def get_file_stats(self):
"""
Returns a dictionary with key: group and value: sample count
"""
file_stats = {}
groups = list(self.file.keys())
for group in groups:
group_samples = list(self.file[group].keys())
file_stats[group] = len(group_samples)
return file_stats
# ------------------------------------------------------------------------
def load_events(self, group, name):
"""
Params
------
:param group: the group/label of the sample to load
:param name: the name of the sample to load
Returns
-------
np.array
a structured array of events
"""
data = self.file[group][name]
return np.array(data)
# ------------------------------------------------------------------------
def get_sample_names(self, n_samples_group='all', rand=-1):
"""
Returns the samples contained in the file
Params
------
:param rand: if greater than zero it specifies the seed for the
random selection, if negative it is sequential
"""
groups = list(self.file.keys()) if self.groups == 'all' else self.groups
samples = []
for group in groups:
group_samples = list(self.file[group].keys())
if n_samples_group == 'all':
to_sample = len(group_samples)
elif len(group_samples) < n_samples_group:
err_msg = f'There are insufficient samples in group {group}'
click.secho(err_msg, bg='yellow')
to_sample = len(group_samples)
else:
to_sample = n_samples_group
# Within group selection
if rand < 0:
indices = range(0, to_sample)
else:
random.seed(rand)
indices = random.sample(range(0, len(group_samples)), to_sample)
# Add samples to container
for i in indices:
samples.append(Sample(group, group_samples[i]))
# Shuffle between groups
if rand > 0:
random.Random(rand).shuffle(samples)
return samples
# ------------------------------------------------------------------------
def iterator(self, n_samples_group='all', rand=23):
"""returns an iterator over the file samples
Parameters
----------
n_samples_group : str, optional
the samples to consider for each label group, by default 'all'
rand : int, optional
a seed for shuffling, by default 23
Returns
-------
iterator
it can be iterated with next() or a for loop
"""
samples = self.get_sample_names(n_samples_group, rand)
iterator = HDF5FileIterator(self.file, samples)
return iterator
# ------------------------------------------------------------------------
def train_test_split(self, test_percentage, stratify=True, rand=23):
"""
creates a train/test split from a single HDF5 file,
:param test_percentage: specifies in a float [0.0, 1) how big should be
the test set
:param groups: specify the groups as a list of strings from where the samples
should be taken, all other groups will be ignored
:param statify: if stratify=True the percentages will be relative to the
class count and therefore the test set will have the same
distribution as the class count, otherwise the test samples
are taken randomly regardless of their class, in some scenarios
this may cause that some classes may not be in the test set
:param rand: specifies the random seed for shuffling the samples, use negative
numbers or None to return samples in a sequential order
"""
train_samples = []
test_samples = []
groups = list(self.file.keys()) if self.groups == 'all' else self.groups
if stratify:
for group in groups:
group_samples = list(self.file[group].keys())
n_test_samples = round(len(group_samples) * test_percentage)
n_train_samples = len(group_samples) - n_test_samples
# Within group selection
if rand < 0:
indices = range(0, len(group_samples))
else:
random.seed(rand)
indices = random.sample(range(0, len(group_samples)), len(group_samples))
train_indices = indices[0:n_train_samples]
test_indices = indices[-n_test_samples:-1]
for i in train_indices:
train_samples.append(Sample(group, group_samples[i]))
for j in test_indices:
test_samples.append(Sample(group, group_samples[j]))
# Shuffle between groups
if rand > 0:
random.Random(rand).shuffle(train_samples)
random.Random(rand).shuffle(test_samples)
else:
all_samples = self.get_sample_names(rand)
n_test_samples = round(len(all_samples) * test_percentage)
n_train_samples = len(all_samples) - n_test_samples
train_samples = all_samples[0:n_train_samples]
test_samples = all_samples[-n_test_samples:-1]
return (HDF5FileIterator(self.file, train_samples), HDF5FileIterator(self.file, test_samples))
# ------------------------------------------------------------------------
def fixed_train_test_split(self, n_train, n_test, rand=23):
"""
:param n_train: number of train samples per group
:param n_test: number of test samples per group
"""
groups = list(self.file.keys()) if self.groups == 'all' else self.groups
n_all_samples = sum(self.file_stats.values())
train_samples = []
test_samples = []
for group in groups:
group_samples = list(self.file[group].keys())
for i, sample in enumerate(group_samples[:n_train + n_test]):
if i < n_train:
train_samples.append(Sample(group, group_samples[i]))
else:
test_samples.append(Sample(group, group_samples[i]))
if rand > 0:
random.Random(rand).shuffle(train_samples)
random.Random(rand + 1).shuffle(test_samples)
return HDF5FileIterator(self.file, train_samples), HDF5FileIterator(self.file, test_samples)
# =============================================================================
# Conversion code
# =============================================================================
def create_hdf5_dataset(dataset_name, file_or_dir, ext, polarities=[0, 1],
to_secs=True):
"""
Creates an HDF5 file with the specified name, for a parent
directory containing .dat files. It will create a different
group for each subdirectory
Params
------
:param dataset_name: the name of the HDF5 file with file extension
:param parent_dir: the path pointing to the parent directory
where the dat files reside
:param polarities: indicates the polarity encoding for the
data, it can be [0,1] or [-1,1]
"""
with h5py.File(dataset_name, 'w') as fp:
# if we are dealing with only one file
if isfile(file_or_dir):
fname = os.path.split(file_or_dir)[1].split('.')[0]
g = fp.create_group('root')
loader = get_loader(ext)
events = loader.load_events(file_or_dir, polarities, to_secs)
g.create_dataset(f'{fname}', data=events, compression=8)
# else we are dealing with directories
else:
_add_all_files(fp, file_or_dir, 'root', polarities, to_secs, ext)
# Navigate subdirectories
sub_dirs = [f.name for f in os.scandir(file_or_dir) if f.is_dir()]
if '.Ds_Store' in sub_dirs: sub_dirs.remove('.Ds_Store')
logging.info(f'Processing directories: {sub_dirs} ')
# for each subdirectory add all_files
for folder in sub_dirs:
_add_all_files(fp, join(file_or_dir, folder), folder, polarities, to_secs, ext)
# -------------------------------------------------------------------------
def _add_all_files(fp, dir_path, dir_name, polarities, to_secs, ext):
"""
Supporting function for creating a dataset
"""
logging.info(f'Processing {dir_path}')
# Get all file names
all_files = [f for f in os.scandir(dir_path)]
valid_files = [f.name for f in all_files if splitext(f)[1] == f'.{ext}']
logging.info(f'Files: {valid_files}')
if len(valid_files) > 0:
group = fp.create_group(dir_name)
logging.info(f'Found the following valid files {valid_files} in {dir_path}')
for file in tqdm(valid_files, desc=f'Dir: {dir_name}', unit='file'):
loader = get_loader(ext)
events = loader.load_events(join(dir_path, file),polarities, to_secs)
group.create_dataset(f"{file.split('.')[0]}", data=events, compression=8)
# =============================================================================
| [
"h5py.File",
"tqdm.tqdm",
"aertb.core.types.EvSample",
"random.Random",
"logging.info",
"os.path.isfile",
"numpy.array",
"random.seed",
"os.path.splitext",
"aertb.core.types.Sample",
"click.secho",
"os.path.join",
"aertb.core.loaders.get_loader",
"os.scandir",
"os.path.split"
] | [((11934, 11972), 'logging.info', 'logging.info', (['f"""Processing {dir_path}"""'], {}), "(f'Processing {dir_path}')\n", (11946, 11972), False, 'import logging\n'), ((12131, 12168), 'logging.info', 'logging.info', (['f"""Files: {valid_files}"""'], {}), "(f'Files: {valid_files}')\n", (12143, 12168), False, 'import logging\n'), ((2825, 2849), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (2834, 2849), False, 'import h5py\n'), ((3824, 3838), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3832, 3838), True, 'import numpy as np\n'), ((10740, 10768), 'h5py.File', 'h5py.File', (['dataset_name', '"""w"""'], {}), "(dataset_name, 'w')\n", (10749, 10768), False, 'import h5py\n'), ((10835, 10854), 'os.path.isfile', 'isfile', (['file_or_dir'], {}), '(file_or_dir)\n', (10841, 10854), False, 'from os.path import join, isfile, splitext\n'), ((12250, 12326), 'logging.info', 'logging.info', (['f"""Found the following valid files {valid_files} in {dir_path}"""'], {}), "(f'Found the following valid files {valid_files} in {dir_path}')\n", (12262, 12326), False, 'import logging\n'), ((12348, 12403), 'tqdm.tqdm', 'tqdm', (['valid_files'], {'desc': 'f"""Dir: {dir_name}"""', 'unit': '"""file"""'}), "(valid_files, desc=f'Dir: {dir_name}', unit='file')\n", (12352, 12403), False, 'from tqdm import tqdm\n'), ((1503, 1517), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1511, 1517), True, 'import numpy as np\n'), ((1566, 1612), 'aertb.core.types.EvSample', 'EvSample', (['sample.group', 'sample.name', 'events_np'], {}), '(sample.group, sample.name, events_np)\n', (1574, 1612), False, 'from aertb.core.types import Sample, EvSample, event_dtype\n'), ((10981, 10996), 'aertb.core.loaders.get_loader', 'get_loader', (['ext'], {}), '(ext)\n', (10991, 10996), False, 'from aertb.core.loaders import get_loader\n'), ((11480, 11532), 'logging.info', 'logging.info', (['f"""Processing directories: {sub_dirs} """'], {}), "(f'Processing directories: {sub_dirs} ')\n", (11492, 11532), False, 'import logging\n'), ((12027, 12047), 'os.scandir', 'os.scandir', (['dir_path'], {}), '(dir_path)\n', (12037, 12047), False, 'import os\n'), ((12439, 12454), 'aertb.core.loaders.get_loader', 'get_loader', (['ext'], {}), '(ext)\n', (12449, 12454), False, 'from aertb.core.loaders import get_loader\n'), ((4969, 4986), 'random.seed', 'random.seed', (['rand'], {}), '(rand)\n', (4980, 4986), False, 'import random\n'), ((12495, 12515), 'os.path.join', 'join', (['dir_path', 'file'], {}), '(dir_path, file)\n', (12499, 12515), False, 'from os.path import join, isfile, splitext\n'), ((4682, 4715), 'click.secho', 'click.secho', (['err_msg'], {'bg': '"""yellow"""'}), "(err_msg, bg='yellow')\n", (4693, 4715), False, 'import click\n'), ((5169, 5200), 'aertb.core.types.Sample', 'Sample', (['group', 'group_samples[i]'], {}), '(group, group_samples[i])\n', (5175, 5200), False, 'from aertb.core.types import Sample, EvSample, event_dtype\n'), ((5269, 5288), 'random.Random', 'random.Random', (['rand'], {}), '(rand)\n', (5282, 5288), False, 'import random\n'), ((7744, 7761), 'random.seed', 'random.seed', (['rand'], {}), '(rand)\n', (7755, 7761), False, 'import random\n'), ((9688, 9707), 'random.Random', 'random.Random', (['rand'], {}), '(rand)\n', (9701, 9707), False, 'import random\n'), ((9743, 9766), 'random.Random', 'random.Random', (['(rand + 1)'], {}), '(rand + 1)\n', (9756, 9766), False, 'import random\n'), ((11359, 11382), 'os.scandir', 'os.scandir', (['file_or_dir'], {}), '(file_or_dir)\n', (11369, 11382), False, 'import os\n'), ((11654, 11679), 'os.path.join', 'join', (['file_or_dir', 'folder'], {}), '(file_or_dir, folder)\n', (11658, 11679), False, 'from os.path import join, isfile, splitext\n'), ((12097, 12108), 'os.path.splitext', 'splitext', (['f'], {}), '(f)\n', (12105, 12108), False, 'from os.path import join, isfile, splitext\n'), ((8057, 8088), 'aertb.core.types.Sample', 'Sample', (['group', 'group_samples[i]'], {}), '(group, group_samples[i])\n', (8063, 8088), False, 'from aertb.core.types import Sample, EvSample, event_dtype\n'), ((8170, 8201), 'aertb.core.types.Sample', 'Sample', (['group', 'group_samples[j]'], {}), '(group, group_samples[j])\n', (8176, 8201), False, 'from aertb.core.types import Sample, EvSample, event_dtype\n'), ((8282, 8301), 'random.Random', 'random.Random', (['rand'], {}), '(rand)\n', (8295, 8301), False, 'import random\n'), ((8341, 8360), 'random.Random', 'random.Random', (['rand'], {}), '(rand)\n', (8354, 8360), False, 'import random\n'), ((9526, 9557), 'aertb.core.types.Sample', 'Sample', (['group', 'group_samples[i]'], {}), '(group, group_samples[i])\n', (9532, 9557), False, 'from aertb.core.types import Sample, EvSample, event_dtype\n'), ((9621, 9652), 'aertb.core.types.Sample', 'Sample', (['group', 'group_samples[i]'], {}), '(group, group_samples[i])\n', (9627, 9652), False, 'from aertb.core.types import Sample, EvSample, event_dtype\n'), ((10876, 10902), 'os.path.split', 'os.path.split', (['file_or_dir'], {}), '(file_or_dir)\n', (10889, 10902), False, 'import os\n')] |
import numpy as np
PROVERMAP = {'Z3-4.4.1': 'Z', 'Z3-4.3.2':'z', 'CVC4':'4', 'CVC3':'3',
'Yices':'y', 'veriT':'v', 'Alt-Ergo-0.95.2':'a', 'Alt-Ergo-1.01':'A'}
REV_MAP = {val:key for key, val in PROVERMAP.iteritems()}
PROVERS = ['Z3-4.4.1', 'Z3-4.3.2','CVC3','CVC4','Yices','veriT','Alt-Ergo-1.01', 'Alt-Ergo-0.95.2']
RESULT_MAP = {'Valid':0, 'Invalid':0, 'Unknown':10, 'Timeout':15}
# anything else: 20
# this one is for (str -> int) classification used by make_val
CLASS_MAP = {'Valid':0, 'Invalid':5, 'Unknown':10, 'Timeout':15}
CLASS_MAP_REV = {val:key for key, val in CLASS_MAP.iteritems()}
# anything else: 20
OUTPUT = list(RESULT_MAP.keys()) + ['Failure']
LEVELS = ['file','theory', 'goal']
TIMES = [p+' time' for p in PROVERS]
RESULTS = [p+' result' for p in PROVERS]
IGNORE = LEVELS+TIMES+RESULTS
Y_COLUMNS = TIMES+RESULTS
WORST_NDCG = 0.43936240961058232
def euc_distance(v, w):
return np.linalg.norm(np.array(v)-np.array(w))
def score_func_single(result, time):
"""this particular score function uses the result penalties
defined in RESULT_MAP to use as the x axis with (unscaled) time in secs.
Returning the Euclidean distance to the origin (0,0)"""
return euc_distance([RESULT_MAP.get(result, 20), time], [0,0])
def score_func(results, times):
return [score_func_single(r,t) for r,t in zip(results, times)]
def new_score_func_single(result, time, delta):
if result == 'Unknown':
return time+delta
if result in ['Valid','Invalid','Unknown']:
return time
return euc_distance([time, delta], [0,0])
def twice_delta_score_func(result, time, delta):
if result == 'Unknown':
return time+delta
if result in ['Valid','Invalid','Unknown']:
return time
return time+(delta*2)
def new_score_func(results,times,delta):
return [new_score_func_single(r,t,delta) for r,t in zip(results,times)]
def get_best(ser):
"""What is the first prover in the sorted Series?"""
ser.sort_values(inplace=True)
return PROVERMAP[ser.index[0]]
def get_strings(ser):
"""What is the entire ranking of provers in the sorted series?"""
ser.sort_values(inplace=True)
return ''.join([PROVERMAP[c] for c in ser.index])
def get_string_threshold(ser, thresh):
ser.sort_values(inplace=True)
return ''.join([PROVERMAP[c] for c in ser.index if ser[c] <= thresh ])
def same_or_not(x, y):
"""convienent for counting in comprehensions"""
if x == y: return 1
return 0
def random_rank():
l = list(PROVERMAP.values())
np.random.shuffle(l)
return ''.join(l)
def random_prover():
return PROVERMAP[np.random.choice(PROVERS)]
def random_result():
return np.random.choice(OUTPUT)
def provable(ser):
results = [ser[p+' result'] for p in PROVERS]
if 'Valid' in results or 'Invalid' in results:
return 1
return 0
# counting the number of each result in evaluations
def is_valid(res):
if res.startswith('Valid'): return 1
return 0
def is_invalid(res):
if res.startswith('Invalid'): return 1
return 0
def is_unknown(res):
if res.startswith('Unknown'): return 1
return 0
def is_timeout(res):
if res.startswith('Timeout'): return 1
return 0
def is_error(res):
return abs(is_valid(res) + is_invalid(res) + is_unknown(res) + is_timeout(res) - 1)
def make_val(v):
return CLASS_MAP.get(v, 20)
def find_position(s, c):
for i, x in enumerate(s):
if x == c: return i
return -1 #not found - prevent weird results
def relevance(y, c):
#return 2**(len(y)-1) - distance(y, y_pred, c)
pos = find_position(y, c)
if pos == -1:
return 0
return len(y)+1 - pos
def dcg2(y, y_pred, k=6):
return sum( [ (2**(relevance(y, c)) - 1)/np.log2(i+2)
for i, c in enumerate(y_pred[:k])
])
def ndcg(y, y_pred, k=6):
return dcg2(y, y_pred, k=k) / dcg2(y, y, k=k)
def scale_ndcg_8(x):
return (x - WORST_NDCG)/(1.0 - WORST_NDCG)
def ndcg_k(y, y_pred, k):
if k == 8:
return scale_ndcg_8(ndcg(y, y_pred, k=k))
return ndcg(y, y_pred, k=k)
def mae(y, y_pred):
"""mean absolute error for two ranks(encoded as strings)"""
errors = [abs(i - find_position(y, c))
for i,c in enumerate(y_pred) ]
return np.mean(errors)
def sum_score_diff(y, y_pred, scores):
diff = 0
for i,p in enumerate(y_pred):
pred = REV_MAP[p]
actual = REV_MAP[y[i]]
diff += abs(scores[pred] - scores[actual])
return diff
def avg_result(results):
m = results.mode()
return np.random.permutation(m)[0]
def best_result_from_rank(y_pred, results):
if len(y_pred) == 0:
return 'Unknown'
first = results[ REV_MAP[y_pred[0]] +' result']
for p in y_pred:
prover = REV_MAP[p]
res = results[prover+' result']
if res in ['Valid', 'Invalid']:
return res
return first
def best_result_from_rank_ae(y_pred, results):
ae_res = results['Alt-Ergo-1.01 result']
ae_time = results['Alt-Ergo-1.01 time']
if ae_res in ['Valid', 'Invalid'] and ae_time <= 1.0:
return ae_res
else:
return best_result_from_rank(y_pred, results)
def time_to_valid(y_pred, results):
time = 0
for p in y_pred:
prover = REV_MAP[p]
time += results[prover+' time']
if results[prover+' result'] in ['Valid', 'Invalid']:
return time
return time
def time_to_valid_ae(y_pred, results):
ae_res = results['Alt-Ergo-1.01 result']
ae_time = results['Alt-Ergo-1.01 time']
if ae_res in ['Valid', 'Invalid'] and ae_time <= 1.0:
return ae_time
elif(ae_time > 1.0):
return 1.0+time_to_valid(y_pred, results)
else:
return ae_time+time_to_valid(y_pred, results) | [
"numpy.log2",
"numpy.mean",
"numpy.array",
"numpy.random.choice",
"numpy.random.permutation",
"numpy.random.shuffle"
] | [((2438, 2458), 'numpy.random.shuffle', 'np.random.shuffle', (['l'], {}), '(l)\n', (2455, 2458), True, 'import numpy as np\n'), ((2575, 2599), 'numpy.random.choice', 'np.random.choice', (['OUTPUT'], {}), '(OUTPUT)\n', (2591, 2599), True, 'import numpy as np\n'), ((4040, 4055), 'numpy.mean', 'np.mean', (['errors'], {}), '(errors)\n', (4047, 4055), True, 'import numpy as np\n'), ((2518, 2543), 'numpy.random.choice', 'np.random.choice', (['PROVERS'], {}), '(PROVERS)\n', (2534, 2543), True, 'import numpy as np\n'), ((4294, 4318), 'numpy.random.permutation', 'np.random.permutation', (['m'], {}), '(m)\n', (4315, 4318), True, 'import numpy as np\n'), ((922, 933), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (930, 933), True, 'import numpy as np\n'), ((934, 945), 'numpy.array', 'np.array', (['w'], {}), '(w)\n', (942, 945), True, 'import numpy as np\n'), ((3561, 3575), 'numpy.log2', 'np.log2', (['(i + 2)'], {}), '(i + 2)\n', (3568, 3575), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.