code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
from collections import Counter
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def dfs(self, cur):
if cur:
self.subsum += cur.val
yield self.subsum_counter[self.subsum - self.needsum]
self.subsum_counter[self.subsum] += 1
for x in self.dfs(cur.left):
yield x
for x in self.dfs(cur.right):
yield x
self.subsum_counter[self.subsum] -= 1
self.subsum -= cur.val
def pathSum(self, root, needsum):
"""
:type root: TreeNode
:type sum: int
:rtype: int
"""
self.subsum = 0
self.needsum = needsum
self.subsum_counter = Counter()
self.subsum_counter[0] += 1
return sum(self.dfs(root))
|
[
"collections.Counter"
] |
[((855, 864), 'collections.Counter', 'Counter', ([], {}), '()\n', (862, 864), False, 'from collections import Counter\n')]
|
# coding: utf-8
# In[1]:
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
import matplotlib as mpimg
import numpy as np
from IPython.display import HTML
import os, sys
import glob
import moviepy
from moviepy.editor import VideoFileClip
from moviepy.editor import *
from IPython import display
from IPython.core.display import display
from IPython.display import Image
import pylab
import scipy.misc
# In[2]:
def region_of_interest(img):
mask = np.zeros(img.shape, dtype=np.uint8) #mask image
roi_corners = np.array([[(200,675), (1200,675), (700,430),(500,430)]],
dtype=np.int32) # vertisies seted to form trapezoidal scene
channel_count = 1#img.shape[2] # image channels
ignore_mask_color = (255,)*channel_count
cv2.fillPoly(mask, roi_corners, ignore_mask_color)
masked_image = cv2.bitwise_and(img, mask)
return masked_image
# In[3]:
def ColorThreshold(img): # Threshold Yellow anf White Colos from RGB, HSV, HLS color spaces
HSV = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
# For yellow
yellow = cv2.inRange(HSV, (20, 100, 100), (50, 255, 255))
# For white
sensitivity_1 = 68
white = cv2.inRange(HSV, (0,0,255-sensitivity_1), (255,20,255))
sensitivity_2 = 60
HSL = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
white_2 = cv2.inRange(HSL, (0,255-sensitivity_2,0), (255,255,sensitivity_2))
white_3 = cv2.inRange(img, (200,200,200), (255,255,255))
bit_layer = yellow | white | white_2 | white_3
return bit_layer
# In[4]:
from skimage import morphology
def SobelThr(img): # Sobel edge detection extraction
gray=img
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0,ksize=15)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1,ksize=15)
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
scaled_sobelx = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
scaled_sobely = np.uint8(255*abs_sobely/np.max(abs_sobely))
binary_outputabsx = np.zeros_like(scaled_sobelx)
binary_outputabsx[(scaled_sobelx >= 70) & (scaled_sobelx <= 255)] = 1
binary_outputabsy = np.zeros_like(scaled_sobely)
binary_outputabsy[(scaled_sobely >= 100) & (scaled_sobely <= 150)] = 1
mag_thresh=(100, 200)
gradmag = np.sqrt(sobelx**2 + sobely**2)
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
binary_outputmag = np.zeros_like(gradmag)
binary_outputmag[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
combinedS = np.zeros_like(binary_outputabsx)
combinedS[(((binary_outputabsx == 1) | (binary_outputabsy == 1))|(binary_outputmag==1)) ] = 1
return combinedS
# In[5]:
def combinI(b1,b2): ##Combine color threshold + Sobel edge detection
combined = np.zeros_like(b1)
combined[((b1 == 1)|(b2 == 255)) ] = 1
return combined
# In[6]:
def prespectI(img): # Calculate the prespective transform and warp the Image to the eye bird view
src=np.float32([[728,475],
[1058,690],
[242,690],
[565,475]])
dst=np.float32([[1058,20],
[1058,700],
[242,700],
[242,20]])
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, (1280,720), flags=cv2.INTER_LINEAR)
return (warped, M)
# In[7]:
def undistorT(imgorg): # Calculate Undistortion coefficients
nx =9
ny = 6
objpoints = []
imgpoints = []
objp=np.zeros((6*9,3),np.float32)
objp[:,:2]=np.mgrid[0:6,0:9].T.reshape(-1,2)
images=glob.glob('./camera_cal/calibration*.jpg')
for fname in images: # find corner points and Make a list of calibration images
img = cv2.imread(fname)
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (6,9),None)
# If found, draw corners
if ret == True:
imgpoints.append(corners)
objpoints.append(objp)
# Draw and display the corners
#cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
return cv2.calibrateCamera(objpoints,imgpoints,gray.shape[::-1],None,None)
# In[8]:
def undistresult(img, mtx,dist): # undistort frame
undist= cv2.undistort(img, mtx, dist, None, mtx)
return undist
# In[9]:
def LineFitting(wimgun): #Fit Lane Lines
# Set minimum number of pixels found to recenter window
minpix = 20
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
histogram = np.sum(wimgun[350:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((wimgun, wimgun, wimgun))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
nwindows = 9
# Set height of windows
window_height = np.int(wimgun.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = wimgun.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin =80
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = wimgun.shape[0] - (window+1)*window_height
win_y_high = wimgun.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, wimgun.shape[0]-1, wimgun.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Create an image to draw on and an image to show the selection window
# out_img = np.dstack((wimgun, wimgun, wimgun))*255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# plt.plot(left_fitx, ploty, color='yellow')
# plt.plot(right_fitx, ploty, color='yellow')
# plt.xlim(0, 1280)
# plt.ylim(720, 0)
# plt.imshow(out_img)
# # plt.savefig("./output_images/Window Image"+str(n)+".png")
# plt.show()
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
# plt.title("r")
# plt.plot(left_fitx, ploty, color='yellow')
# plt.plot(right_fitx, ploty, color='yellow')
# plt.xlim(0, 1280)
# plt.ylim(720, 0)
# plt.imshow(result)
# # plt.savefig("./output_images/Line Image"+str(n)+".png")
# plt.show()
# Define y-value where we want radius of curvature
# I'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
#print(left_curverad, right_curverad)
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(ploty*ym_per_pix, left_fitx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, right_fitx*xm_per_pix, 2)
# y_eval = np.max(ploty)
# # Calculate the new radias of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# # left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
# # right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
camera_center=wimgun.shape[0]/2
# #lane_center = (right_fitx[719] + left_fitx[719])/2
car_position = (camera_center- (left_fitx[-1]+right_fitx[-1])/2)*xm_per_pix
# print(left_curverad1, right_curverad1, lane_offset)
return (left_fit, ploty,right_fit,left_curverad, right_curverad,car_position)
# Create an image to draw the lines on
def unwrappedframe(img,pm, Minv, left_fit,ploty,right_fit):
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
nonzero = img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
warp_zero = np.zeros_like(pm).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))
# Combine the result with the original image
return cv2.addWeighted(img, 1, newwarp, 0.3, 0)
|
[
"numpy.absolute",
"numpy.sum",
"cv2.bitwise_and",
"numpy.argmax",
"cv2.getPerspectiveTransform",
"numpy.polyfit",
"cv2.fillPoly",
"numpy.mean",
"glob.glob",
"cv2.rectangle",
"cv2.inRange",
"cv2.undistort",
"cv2.warpPerspective",
"numpy.zeros_like",
"numpy.int_",
"cv2.cvtColor",
"numpy.max",
"numpy.int",
"numpy.linspace",
"numpy.dstack",
"numpy.hstack",
"cv2.addWeighted",
"cv2.calibrateCamera",
"cv2.Sobel",
"numpy.concatenate",
"numpy.vstack",
"cv2.findChessboardCorners",
"numpy.float32",
"numpy.zeros",
"cv2.imread",
"numpy.array",
"numpy.sqrt"
] |
[((487, 522), 'numpy.zeros', 'np.zeros', (['img.shape'], {'dtype': 'np.uint8'}), '(img.shape, dtype=np.uint8)\n', (495, 522), True, 'import numpy as np\n'), ((553, 630), 'numpy.array', 'np.array', (['[[(200, 675), (1200, 675), (700, 430), (500, 430)]]'], {'dtype': 'np.int32'}), '([[(200, 675), (1200, 675), (700, 430), (500, 430)]], dtype=np.int32)\n', (561, 630), True, 'import numpy as np\n'), ((800, 850), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', 'roi_corners', 'ignore_mask_color'], {}), '(mask, roi_corners, ignore_mask_color)\n', (812, 850), False, 'import cv2\n'), ((873, 899), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'mask'], {}), '(img, mask)\n', (888, 899), False, 'import cv2\n'), ((1060, 1096), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HSV'], {}), '(img, cv2.COLOR_RGB2HSV)\n', (1072, 1096), False, 'import cv2\n'), ((1128, 1176), 'cv2.inRange', 'cv2.inRange', (['HSV', '(20, 100, 100)', '(50, 255, 255)'], {}), '(HSV, (20, 100, 100), (50, 255, 255))\n', (1139, 1176), False, 'import cv2\n'), ((1229, 1290), 'cv2.inRange', 'cv2.inRange', (['HSV', '(0, 0, 255 - sensitivity_1)', '(255, 20, 255)'], {}), '(HSV, (0, 0, 255 - sensitivity_1), (255, 20, 255))\n', (1240, 1290), False, 'import cv2\n'), ((1319, 1355), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HLS'], {}), '(img, cv2.COLOR_RGB2HLS)\n', (1331, 1355), False, 'import cv2\n'), ((1370, 1442), 'cv2.inRange', 'cv2.inRange', (['HSL', '(0, 255 - sensitivity_2, 0)', '(255, 255, sensitivity_2)'], {}), '(HSL, (0, 255 - sensitivity_2, 0), (255, 255, sensitivity_2))\n', (1381, 1442), False, 'import cv2\n'), ((1451, 1501), 'cv2.inRange', 'cv2.inRange', (['img', '(200, 200, 200)', '(255, 255, 255)'], {}), '(img, (200, 200, 200), (255, 255, 255))\n', (1462, 1501), False, 'import cv2\n'), ((1731, 1774), 'cv2.Sobel', 'cv2.Sobel', (['gray', 'cv2.CV_64F', '(1)', '(0)'], {'ksize': '(15)'}), '(gray, cv2.CV_64F, 1, 0, ksize=15)\n', (1740, 1774), False, 'import cv2\n'), ((1787, 1830), 'cv2.Sobel', 'cv2.Sobel', (['gray', 'cv2.CV_64F', '(0)', '(1)'], {'ksize': '(15)'}), '(gray, cv2.CV_64F, 0, 1, ksize=15)\n', (1796, 1830), False, 'import cv2\n'), ((1852, 1871), 'numpy.absolute', 'np.absolute', (['sobelx'], {}), '(sobelx)\n', (1863, 1871), True, 'import numpy as np\n'), ((1889, 1908), 'numpy.absolute', 'np.absolute', (['sobely'], {}), '(sobely)\n', (1900, 1908), True, 'import numpy as np\n'), ((2070, 2098), 'numpy.zeros_like', 'np.zeros_like', (['scaled_sobelx'], {}), '(scaled_sobelx)\n', (2083, 2098), True, 'import numpy as np\n'), ((2211, 2239), 'numpy.zeros_like', 'np.zeros_like', (['scaled_sobely'], {}), '(scaled_sobely)\n', (2224, 2239), True, 'import numpy as np\n'), ((2361, 2395), 'numpy.sqrt', 'np.sqrt', (['(sobelx ** 2 + sobely ** 2)'], {}), '(sobelx ** 2 + sobely ** 2)\n', (2368, 2395), True, 'import numpy as np\n'), ((2514, 2536), 'numpy.zeros_like', 'np.zeros_like', (['gradmag'], {}), '(gradmag)\n', (2527, 2536), True, 'import numpy as np\n'), ((2635, 2667), 'numpy.zeros_like', 'np.zeros_like', (['binary_outputabsx'], {}), '(binary_outputabsx)\n', (2648, 2667), True, 'import numpy as np\n'), ((2893, 2910), 'numpy.zeros_like', 'np.zeros_like', (['b1'], {}), '(b1)\n', (2906, 2910), True, 'import numpy as np\n'), ((3121, 3182), 'numpy.float32', 'np.float32', (['[[728, 475], [1058, 690], [242, 690], [565, 475]]'], {}), '([[728, 475], [1058, 690], [242, 690], [565, 475]])\n', (3131, 3182), True, 'import numpy as np\n'), ((3246, 3306), 'numpy.float32', 'np.float32', (['[[1058, 20], [1058, 700], [242, 700], [242, 20]]'], {}), '([[1058, 20], [1058, 700], [242, 700], [242, 20]])\n', (3256, 3306), True, 'import numpy as np\n'), ((3365, 3402), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['src', 'dst'], {}), '(src, dst)\n', (3392, 3402), False, 'import cv2\n'), ((3416, 3480), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'M', '(1280, 720)'], {'flags': 'cv2.INTER_LINEAR'}), '(img, M, (1280, 720), flags=cv2.INTER_LINEAR)\n', (3435, 3480), False, 'import cv2\n'), ((3664, 3696), 'numpy.zeros', 'np.zeros', (['(6 * 9, 3)', 'np.float32'], {}), '((6 * 9, 3), np.float32)\n', (3672, 3696), True, 'import numpy as np\n'), ((3755, 3797), 'glob.glob', 'glob.glob', (['"""./camera_cal/calibration*.jpg"""'], {}), "('./camera_cal/calibration*.jpg')\n", (3764, 3797), False, 'import glob\n'), ((4422, 4493), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objpoints', 'imgpoints', 'gray.shape[::-1]', 'None', 'None'], {}), '(objpoints, imgpoints, gray.shape[::-1], None, None)\n', (4441, 4493), False, 'import cv2\n'), ((4579, 4619), 'cv2.undistort', 'cv2.undistort', (['img', 'mtx', 'dist', 'None', 'mtx'], {}), '(img, mtx, dist, None, mtx)\n', (4592, 4619), False, 'import cv2\n'), ((4936, 4967), 'numpy.sum', 'np.sum', (['wimgun[350:, :]'], {'axis': '(0)'}), '(wimgun[350:, :], axis=0)\n', (4942, 4967), True, 'import numpy as np\n'), ((5047, 5082), 'numpy.dstack', 'np.dstack', (['(wimgun, wimgun, wimgun)'], {}), '((wimgun, wimgun, wimgun))\n', (5056, 5082), True, 'import numpy as np\n'), ((5234, 5264), 'numpy.int', 'np.int', (['(histogram.shape[0] / 2)'], {}), '(histogram.shape[0] / 2)\n', (5240, 5264), True, 'import numpy as np\n'), ((5280, 5311), 'numpy.argmax', 'np.argmax', (['histogram[:midpoint]'], {}), '(histogram[:midpoint])\n', (5289, 5311), True, 'import numpy as np\n'), ((5440, 5474), 'numpy.int', 'np.int', (['(wimgun.shape[0] / nwindows)'], {}), '(wimgun.shape[0] / nwindows)\n', (5446, 5474), True, 'import numpy as np\n'), ((5591, 5611), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (5599, 5611), True, 'import numpy as np\n'), ((5627, 5647), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (5635, 5647), True, 'import numpy as np\n'), ((7437, 7467), 'numpy.concatenate', 'np.concatenate', (['left_lane_inds'], {}), '(left_lane_inds)\n', (7451, 7467), True, 'import numpy as np\n'), ((7490, 7521), 'numpy.concatenate', 'np.concatenate', (['right_lane_inds'], {}), '(right_lane_inds)\n', (7504, 7521), True, 'import numpy as np\n'), ((7793, 7820), 'numpy.polyfit', 'np.polyfit', (['lefty', 'leftx', '(2)'], {}), '(lefty, leftx, 2)\n', (7803, 7820), True, 'import numpy as np\n'), ((7837, 7866), 'numpy.polyfit', 'np.polyfit', (['righty', 'rightx', '(2)'], {}), '(righty, rightx, 2)\n', (7847, 7866), True, 'import numpy as np\n'), ((7922, 7974), 'numpy.linspace', 'np.linspace', (['(0)', '(wimgun.shape[0] - 1)', 'wimgun.shape[0]'], {}), '(0, wimgun.shape[0] - 1, wimgun.shape[0])\n', (7933, 7974), True, 'import numpy as np\n'), ((8269, 8291), 'numpy.zeros_like', 'np.zeros_like', (['out_img'], {}), '(out_img)\n', (8282, 8291), True, 'import numpy as np\n'), ((9099, 9148), 'numpy.hstack', 'np.hstack', (['(left_line_window1, left_line_window2)'], {}), '((left_line_window1, left_line_window2))\n', (9108, 9148), True, 'import numpy as np\n'), ((9359, 9410), 'numpy.hstack', 'np.hstack', (['(right_line_window1, right_line_window2)'], {}), '((right_line_window1, right_line_window2))\n', (9368, 9410), True, 'import numpy as np\n'), ((9608, 9655), 'cv2.addWeighted', 'cv2.addWeighted', (['out_img', '(1)', 'window_img', '(0.3)', '(0)'], {}), '(out_img, 1, window_img, 0.3, 0)\n', (9623, 9655), False, 'import cv2\n'), ((10080, 10093), 'numpy.max', 'np.max', (['ploty'], {}), '(ploty)\n', (10086, 10093), True, 'import numpy as np\n'), ((10529, 10586), 'numpy.polyfit', 'np.polyfit', (['(ploty * ym_per_pix)', '(left_fitx * xm_per_pix)', '(2)'], {}), '(ploty * ym_per_pix, left_fitx * xm_per_pix, 2)\n', (10539, 10586), True, 'import numpy as np\n'), ((10602, 10660), 'numpy.polyfit', 'np.polyfit', (['(ploty * ym_per_pix)', '(right_fitx * xm_per_pix)', '(2)'], {}), '(ploty * ym_per_pix, right_fitx * xm_per_pix, 2)\n', (10612, 10660), True, 'import numpy as np\n'), ((11814, 11834), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (11822, 11834), True, 'import numpy as np\n'), ((11850, 11870), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (11858, 11870), True, 'import numpy as np\n'), ((11949, 11993), 'numpy.dstack', 'np.dstack', (['(warp_zero, warp_zero, warp_zero)'], {}), '((warp_zero, warp_zero, warp_zero))\n', (11958, 11993), True, 'import numpy as np\n'), ((12230, 12262), 'numpy.hstack', 'np.hstack', (['(pts_left, pts_right)'], {}), '((pts_left, pts_right))\n', (12239, 12262), True, 'import numpy as np\n'), ((12474, 12541), 'cv2.warpPerspective', 'cv2.warpPerspective', (['color_warp', 'Minv', '(img.shape[1], img.shape[0])'], {}), '(color_warp, Minv, (img.shape[1], img.shape[0]))\n', (12493, 12541), False, 'import cv2\n'), ((12609, 12649), 'cv2.addWeighted', 'cv2.addWeighted', (['img', '(1)', 'newwarp', '(0.3)', '(0)'], {}), '(img, 1, newwarp, 0.3, 0)\n', (12624, 12649), False, 'import cv2\n'), ((2416, 2431), 'numpy.max', 'np.max', (['gradmag'], {}), '(gradmag)\n', (2422, 2431), True, 'import numpy as np\n'), ((3916, 3933), 'cv2.imread', 'cv2.imread', (['fname'], {}), '(fname)\n', (3926, 3933), False, 'import cv2\n'), ((3988, 4025), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (4000, 4025), False, 'import cv2\n'), ((4095, 4140), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', '(6, 9)', 'None'], {}), '(gray, (6, 9), None)\n', (4120, 4140), False, 'import cv2\n'), ((5331, 5362), 'numpy.argmax', 'np.argmax', (['histogram[midpoint:]'], {}), '(histogram[midpoint:])\n', (5340, 5362), True, 'import numpy as np\n'), ((6356, 6456), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(win_xleft_low, win_y_low)', '(win_xleft_high, win_y_high)', '(0, 255, 0)', '(2)'], {}), '(out_img, (win_xleft_low, win_y_low), (win_xleft_high,\n win_y_high), (0, 255, 0), 2)\n', (6369, 6456), False, 'import cv2\n'), ((6455, 6557), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(win_xright_low, win_y_low)', '(win_xright_high, win_y_high)', '(0, 255, 0)', '(2)'], {}), '(out_img, (win_xright_low, win_y_low), (win_xright_high,\n win_y_high), (0, 255, 0), 2)\n', (6468, 6557), False, 'import cv2\n'), ((9489, 9513), 'numpy.int_', 'np.int_', (['[left_line_pts]'], {}), '([left_line_pts])\n', (9496, 9513), True, 'import numpy as np\n'), ((9556, 9581), 'numpy.int_', 'np.int_', (['[right_line_pts]'], {}), '([right_line_pts])\n', (9563, 9581), True, 'import numpy as np\n'), ((10169, 10197), 'numpy.absolute', 'np.absolute', (['(2 * left_fit[0])'], {}), '(2 * left_fit[0])\n', (10180, 10197), True, 'import numpy as np\n'), ((10274, 10303), 'numpy.absolute', 'np.absolute', (['(2 * right_fit[0])'], {}), '(2 * right_fit[0])\n', (10285, 10303), True, 'import numpy as np\n'), ((10825, 10856), 'numpy.absolute', 'np.absolute', (['(2 * left_fit_cr[0])'], {}), '(2 * left_fit_cr[0])\n', (10836, 10856), True, 'import numpy as np\n'), ((10950, 10982), 'numpy.absolute', 'np.absolute', (['(2 * right_fit_cr[0])'], {}), '(2 * right_fit_cr[0])\n', (10961, 10982), True, 'import numpy as np\n'), ((12341, 12355), 'numpy.int_', 'np.int_', (['[pts]'], {}), '([pts])\n', (12348, 12355), True, 'import numpy as np\n'), ((1953, 1971), 'numpy.max', 'np.max', (['abs_sobelx'], {}), '(abs_sobelx)\n', (1959, 1971), True, 'import numpy as np\n'), ((2017, 2035), 'numpy.max', 'np.max', (['abs_sobely'], {}), '(abs_sobely)\n', (2023, 2035), True, 'import numpy as np\n'), ((11897, 11914), 'numpy.zeros_like', 'np.zeros_like', (['pm'], {}), '(pm)\n', (11910, 11914), True, 'import numpy as np\n'), ((7217, 7250), 'numpy.mean', 'np.mean', (['nonzerox[good_left_inds]'], {}), '(nonzerox[good_left_inds])\n', (7224, 7250), True, 'import numpy as np\n'), ((7338, 7372), 'numpy.mean', 'np.mean', (['nonzerox[good_right_inds]'], {}), '(nonzerox[good_right_inds])\n', (7345, 7372), True, 'import numpy as np\n'), ((8941, 8979), 'numpy.vstack', 'np.vstack', (['[left_fitx - margin, ploty]'], {}), '([left_fitx - margin, ploty])\n', (8950, 8979), True, 'import numpy as np\n'), ((9197, 9236), 'numpy.vstack', 'np.vstack', (['[right_fitx - margin, ploty]'], {}), '([right_fitx - margin, ploty])\n', (9206, 9236), True, 'import numpy as np\n'), ((12103, 12132), 'numpy.vstack', 'np.vstack', (['[left_fitx, ploty]'], {}), '([left_fitx, ploty])\n', (12112, 12132), True, 'import numpy as np\n'), ((9038, 9076), 'numpy.vstack', 'np.vstack', (['[left_fitx + margin, ploty]'], {}), '([left_fitx + margin, ploty])\n', (9047, 9076), True, 'import numpy as np\n'), ((9296, 9335), 'numpy.vstack', 'np.vstack', (['[right_fitx + margin, ploty]'], {}), '([right_fitx + margin, ploty])\n', (9305, 9335), True, 'import numpy as np\n'), ((12185, 12215), 'numpy.vstack', 'np.vstack', (['[right_fitx, ploty]'], {}), '([right_fitx, ploty])\n', (12194, 12215), True, 'import numpy as np\n')]
|
from blspy import AugSchemeMPL
from src.types.coin_solution import CoinSolution
from src.types.spend_bundle import SpendBundle
from src.wallet.puzzles import p2_delegated_puzzle
from src.wallet.puzzles.puzzle_utils import make_create_coin_condition
from tests.util.key_tool import KeyTool
from src.util.ints import uint32
from src.wallet.derive_keys import master_sk_to_wallet_sk
MASTER_KEY = AugSchemeMPL.key_gen(bytes([1] * 32))
def puzzle_program_for_index(index: uint32):
return p2_delegated_puzzle.puzzle_for_pk(
bytes(master_sk_to_wallet_sk(MASTER_KEY, index).get_g1())
)
def puzzle_hash_for_index(index: uint32):
return puzzle_program_for_index(index).get_hash()
def conditions_for_payment(puzzle_hash_amount_pairs):
conditions = [
make_create_coin_condition(ph, amount)
for ph, amount in puzzle_hash_amount_pairs
]
return conditions
def make_default_keyUtil():
keychain = KeyTool()
private_keys = [master_sk_to_wallet_sk(MASTER_KEY, uint32(i)) for i in range(10)]
secret_exponents = [int.from_bytes(bytes(_), "big") for _ in private_keys]
keychain.add_secret_exponents(secret_exponents)
return keychain
DEFAULT_KEYTOOL = make_default_keyUtil()
def spend_coin(coin, conditions, index, keychain=DEFAULT_KEYTOOL):
solution = p2_delegated_puzzle.solution_for_conditions(
puzzle_program_for_index(index), conditions
)
return build_spend_bundle(coin, solution, keychain)
def build_spend_bundle(coin, solution, keychain=DEFAULT_KEYTOOL):
coin_solution = CoinSolution(coin, solution)
signature = keychain.signature_for_solution(solution, bytes(coin))
return SpendBundle([coin_solution], signature)
|
[
"src.wallet.derive_keys.master_sk_to_wallet_sk",
"src.types.spend_bundle.SpendBundle",
"src.types.coin_solution.CoinSolution",
"src.wallet.puzzles.puzzle_utils.make_create_coin_condition",
"tests.util.key_tool.KeyTool",
"src.util.ints.uint32"
] |
[((943, 952), 'tests.util.key_tool.KeyTool', 'KeyTool', ([], {}), '()\n', (950, 952), False, 'from tests.util.key_tool import KeyTool\n'), ((1564, 1592), 'src.types.coin_solution.CoinSolution', 'CoinSolution', (['coin', 'solution'], {}), '(coin, solution)\n', (1576, 1592), False, 'from src.types.coin_solution import CoinSolution\n'), ((1675, 1714), 'src.types.spend_bundle.SpendBundle', 'SpendBundle', (['[coin_solution]', 'signature'], {}), '([coin_solution], signature)\n', (1686, 1714), False, 'from src.types.spend_bundle import SpendBundle\n'), ((780, 818), 'src.wallet.puzzles.puzzle_utils.make_create_coin_condition', 'make_create_coin_condition', (['ph', 'amount'], {}), '(ph, amount)\n', (806, 818), False, 'from src.wallet.puzzles.puzzle_utils import make_create_coin_condition\n'), ((1008, 1017), 'src.util.ints.uint32', 'uint32', (['i'], {}), '(i)\n', (1014, 1017), False, 'from src.util.ints import uint32\n'), ((541, 582), 'src.wallet.derive_keys.master_sk_to_wallet_sk', 'master_sk_to_wallet_sk', (['MASTER_KEY', 'index'], {}), '(MASTER_KEY, index)\n', (563, 582), False, 'from src.wallet.derive_keys import master_sk_to_wallet_sk\n')]
|
import json
from typing import TYPE_CHECKING, Any, Dict
import hathor
from hathor.conf import HathorSettings
from hathor.p2p.messages import ProtocolMessages
from hathor.p2p.states.base import BaseState
from hathor.p2p.utils import get_genesis_short_hash, get_settings_hello_dict
if TYPE_CHECKING:
from hathor.p2p.protocol import HathorProtocol # noqa: F401
settings = HathorSettings()
class HelloState(BaseState):
def __init__(self, protocol: 'HathorProtocol') -> None:
super().__init__(protocol)
self.cmd_map.update({
ProtocolMessages.HELLO: self.handle_hello,
})
def _app(self) -> str:
return f'Hathor v{hathor.__version__}'
def _get_hello_data(self) -> Dict[str, Any]:
""" Returns a dict with information about this node that will
be sent to a peer.
"""
protocol = self.protocol
remote = protocol.transport.getPeer()
return {
'app': self._app(),
'network': protocol.network,
'remote_address': '{}:{}'.format(remote.host, remote.port),
'genesis_short_hash': get_genesis_short_hash(),
'timestamp': protocol.node.reactor.seconds(),
'settings_dict': get_settings_hello_dict(),
'capabilities': [],
}
def on_enter(self) -> None:
# After a connection is made, we just send a HELLO message.
self.send_hello()
def send_hello(self) -> None:
""" Send a HELLO message, identifying the app and giving extra
information about this node to the peer.
"""
data = self._get_hello_data()
self.send_message(ProtocolMessages.HELLO, json.dumps(data))
def handle_hello(self, payload: str) -> None:
""" Executed when a HELLO message is received. It basically
checks the application compatibility.
"""
protocol = self.protocol
try:
data = json.loads(payload)
except ValueError:
protocol.send_error_and_close_connection('Invalid payload.')
return
required_fields = {'app', 'network', 'remote_address', 'genesis_short_hash', 'timestamp', 'capabilities'}
# settings_dict is optional
if not set(data).issuperset(required_fields):
# If data does not contain all required fields
protocol.send_error_and_close_connection('Invalid payload.')
return
if data['app'] != self._app():
self.log.warn('different versions', theirs=data['app'], ours=self._app())
if data['network'] != protocol.network:
protocol.send_error_and_close_connection('Wrong network.')
return
if data['genesis_short_hash'] != get_genesis_short_hash():
protocol.send_error_and_close_connection('Different genesis.')
return
if abs(data['timestamp'] - protocol.node.reactor.seconds()) > settings.MAX_FUTURE_TIMESTAMP_ALLOWED/2:
protocol.send_error_and_close_connection('Nodes timestamps too far apart.')
return
settings_dict = get_settings_hello_dict()
if 'settings_dict' in data and data['settings_dict'] != settings_dict:
# If settings_dict is sent we must validate it
protocol.send_error_and_close_connection(
'Settings values are different. {}'.format(json.dumps(settings_dict))
)
return
protocol.app_version = data['app']
protocol.change_state(protocol.PeerState.PEER_ID)
|
[
"json.loads",
"hathor.p2p.utils.get_genesis_short_hash",
"json.dumps",
"hathor.p2p.utils.get_settings_hello_dict",
"hathor.conf.HathorSettings"
] |
[((377, 393), 'hathor.conf.HathorSettings', 'HathorSettings', ([], {}), '()\n', (391, 393), False, 'from hathor.conf import HathorSettings\n'), ((3115, 3140), 'hathor.p2p.utils.get_settings_hello_dict', 'get_settings_hello_dict', ([], {}), '()\n', (3138, 3140), False, 'from hathor.p2p.utils import get_genesis_short_hash, get_settings_hello_dict\n'), ((1125, 1149), 'hathor.p2p.utils.get_genesis_short_hash', 'get_genesis_short_hash', ([], {}), '()\n', (1147, 1149), False, 'from hathor.p2p.utils import get_genesis_short_hash, get_settings_hello_dict\n'), ((1238, 1263), 'hathor.p2p.utils.get_settings_hello_dict', 'get_settings_hello_dict', ([], {}), '()\n', (1261, 1263), False, 'from hathor.p2p.utils import get_genesis_short_hash, get_settings_hello_dict\n'), ((1689, 1705), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1699, 1705), False, 'import json\n'), ((1949, 1968), 'json.loads', 'json.loads', (['payload'], {}), '(payload)\n', (1959, 1968), False, 'import json\n'), ((2751, 2775), 'hathor.p2p.utils.get_genesis_short_hash', 'get_genesis_short_hash', ([], {}), '()\n', (2773, 2775), False, 'from hathor.p2p.utils import get_genesis_short_hash, get_settings_hello_dict\n'), ((3392, 3417), 'json.dumps', 'json.dumps', (['settings_dict'], {}), '(settings_dict)\n', (3402, 3417), False, 'import json\n')]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from .instant_function import data_vars
def create_categorical_onehot(df,category_columns):
category_dataframe = []
for category_column in category_columns:
category_dataframe.append(pd.get_dummies(df[category_column],prefix='col_'+category_column))
category_dataframe_feature = pd.concat(category_dataframe,axis=1)
return category_dataframe_feature
def create_norm_continuos_columns(df, continuos_columns):
df_norm = df[continuos_columns].fillna(0)
norm_continuos_columns = (df_norm[continuos_columns]-df_norm[continuos_columns].mean())/(df_norm[continuos_columns].std())
mean_dict = dict(df[continuos_columns].mean())
std_dict = dict(df[continuos_columns].std())
return norm_continuos_columns, mean_dict, std_dict
def combine_continus_norm_and_categorical_onehot_and_sep_target(df, continuos_columns, category_columns, target_columns):
norm_continuos_columns, mean_dict, std_dict = create_norm_continuos_columns(df, continuos_columns)
category_dataframe_feature = create_categorical_onehot(df,category_columns)
target_df = df[target_columns]
feature_df = pd.concat([norm_continuos_columns, category_dataframe_feature], axis=1)
feature_columns = feature_df.columns
return feature_df, target_df, mean_dict, std_dict, feature_columns
def get_IV(feature_df, target_df):
final_iv, IV = data_vars(feature_df, target_df)
ivs = np.zeros(len(feature_df.columns))
for i,col in enumerate(feature_df.columns):
ivs[i] = IV[IV['VAR_NAME']==col]['IV'].values[0]
return IV, ivs
def norm_mat(x):
return x/(np.sqrt(np.sum(x**2,1))).reshape(-1,1)
def get_pos_feat(feature_df, target_df, ivs):
pos_feat = feature_df.loc[target_df[target_df==1].index].values*ivs
pos_feat_norm = norm_mat(pos_feat)
return pos_feat_norm
def process_test_data(df, continuos_columns, category_columns, mean_dict, std_dict, feature_columns):
df_norm = df[continuos_columns].fillna(0)
norm_continuos_columns = (df[mean_dict] - list(mean_dict.values()))/list(std_dict.values())
category_dataframe = []
for category_column in category_columns:
category_dataframe.append(pd.get_dummies(df[category_column],prefix='col_'+category_column))
category_dataframe_feature = pd.concat(category_dataframe,axis=1)
feature_test = pd.concat([norm_continuos_columns, category_dataframe_feature], axis=1)
non_in_test_columns = list(set(list(feature_columns)) - set(list(feature_test.columns)))
for non_in_test_column in non_in_test_columns:
feature_test[non_in_test_column] = 0
feature_test = feature_test[feature_columns]
return feature_test
|
[
"pandas.get_dummies",
"numpy.sum",
"pandas.concat"
] |
[((377, 414), 'pandas.concat', 'pd.concat', (['category_dataframe'], {'axis': '(1)'}), '(category_dataframe, axis=1)\n', (386, 414), True, 'import pandas as pd\n'), ((1204, 1275), 'pandas.concat', 'pd.concat', (['[norm_continuos_columns, category_dataframe_feature]'], {'axis': '(1)'}), '([norm_continuos_columns, category_dataframe_feature], axis=1)\n', (1213, 1275), True, 'import pandas as pd\n'), ((2364, 2401), 'pandas.concat', 'pd.concat', (['category_dataframe'], {'axis': '(1)'}), '(category_dataframe, axis=1)\n', (2373, 2401), True, 'import pandas as pd\n'), ((2425, 2496), 'pandas.concat', 'pd.concat', (['[norm_continuos_columns, category_dataframe_feature]'], {'axis': '(1)'}), '([norm_continuos_columns, category_dataframe_feature], axis=1)\n', (2434, 2496), True, 'import pandas as pd\n'), ((272, 340), 'pandas.get_dummies', 'pd.get_dummies', (['df[category_column]'], {'prefix': "('col_' + category_column)"}), "(df[category_column], prefix='col_' + category_column)\n", (286, 340), True, 'import pandas as pd\n'), ((2259, 2327), 'pandas.get_dummies', 'pd.get_dummies', (['df[category_column]'], {'prefix': "('col_' + category_column)"}), "(df[category_column], prefix='col_' + category_column)\n", (2273, 2327), True, 'import pandas as pd\n'), ((1687, 1704), 'numpy.sum', 'np.sum', (['(x ** 2)', '(1)'], {}), '(x ** 2, 1)\n', (1693, 1704), True, 'import numpy as np\n')]
|
# Generated by Django 3.0.4 on 2020-05-18 10:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('character', '0004_auto_20200518_1215'),
]
operations = [
migrations.AlterField(
model_name='character',
name='notes',
field=models.TextField(blank=True, default='', max_length=1000),
preserve_default=False,
),
]
|
[
"django.db.models.TextField"
] |
[((340, 397), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(1000)'}), "(blank=True, default='', max_length=1000)\n", (356, 397), False, 'from django.db import migrations, models\n')]
|
# Required modules and libraries
from telegram.ext import Updater, CommandHandler, InlineQueryHandler, MessageHandler, Filters
import telegram
from telegram import InlineQueryResultArticle, ParseMode, \
InputTextMessageContent
import requests
import wikipediaapi
import re
from uuid import uuid4
# Variables used
wiki_wiki = wikipediaapi.Wikipedia('fa')
message = "What do you wanna search for?"
aboutmsg = "Searching Wikipedia has never been easier! Just send a topic."
# Private chat with bot
# uses en wikipedia for english and fa wikipedia for persian inputs
def echo(update, context):
output = re.search(r'^[a-zA-Z]+\Z', update.message.text)
if output:
wiki_wiki = wikipediaapi.Wikipedia('en')
else:
wiki_wiki = wikipediaapi.Wikipedia('fa')
page_py = wiki_wiki.page(update.message.text)
if page_py.exists():
wikimsg = (page_py.fullurl)
update.message.reply_text(wikimsg)
else:
update.message.reply_text("Your search querry had no results.")
# In-line mode
def inlinequery(update, context):
"""Handle the inline query."""
query = update.inline_query.query
output = re.search(r'^[a-zA-Z]+\Z', query)
if output:
wiki_wiki = wikipediaapi.Wikipedia('en')
else:
wiki_wiki = wikipediaapi.Wikipedia('fa')
page_py = wiki_wiki.page(query)
if page_py.exists():
wikimsg = (page_py.fullurl)
pagetitle= page_py.title
results = [InlineQueryResultArticle(
description="Searching for" + " " + query+ " " + "in Wikipedia",
id=uuid4(),
title=pagetitle,
input_message_content=InputTextMessageContent(
message_text=wikimsg))
]
update.inline_query.answer(results)
else:
results = [
InlineQueryResultArticle(
id=uuid4(),
title="No results",
input_message_content=InputTextMessageContent(query)
)]
update.inline_query.answer(results)
def start(update, context):
context.bot.send_message(chat_id=update.effective_chat.id,text =message)
def about(update, context):
context.bot.send_message(chat_id=update.effective_chat.id,text =aboutmsg)
updater = Updater(token = 'TOKEN', use_context=True)
bot = telegram.Bot(token='TOKEN')
dispatcher = updater.dispatcher
def main():
start_handler = CommandHandler('start',start)
dispatcher.add_handler(start_handler)
dispatcher.add_handler(CommandHandler('about',about))
dispatcher.add_handler(MessageHandler(Filters.text, echo))
dispatcher.add_handler(InlineQueryHandler(inlinequery))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
[
"uuid.uuid4",
"telegram.InputTextMessageContent",
"telegram.ext.InlineQueryHandler",
"telegram.ext.Updater",
"telegram.Bot",
"telegram.ext.MessageHandler",
"wikipediaapi.Wikipedia",
"telegram.ext.CommandHandler",
"re.search"
] |
[((343, 371), 'wikipediaapi.Wikipedia', 'wikipediaapi.Wikipedia', (['"""fa"""'], {}), "('fa')\n", (365, 371), False, 'import wikipediaapi\n'), ((2332, 2372), 'telegram.ext.Updater', 'Updater', ([], {'token': '"""TOKEN"""', 'use_context': '(True)'}), "(token='TOKEN', use_context=True)\n", (2339, 2372), False, 'from telegram.ext import Updater, CommandHandler, InlineQueryHandler, MessageHandler, Filters\n'), ((2382, 2409), 'telegram.Bot', 'telegram.Bot', ([], {'token': '"""TOKEN"""'}), "(token='TOKEN')\n", (2394, 2409), False, 'import telegram\n'), ((631, 678), 're.search', 're.search', (['"""^[a-zA-Z]+\\\\Z"""', 'update.message.text'], {}), "('^[a-zA-Z]+\\\\Z', update.message.text)\n", (640, 678), False, 'import re\n'), ((1192, 1225), 're.search', 're.search', (['"""^[a-zA-Z]+\\\\Z"""', 'query'], {}), "('^[a-zA-Z]+\\\\Z', query)\n", (1201, 1225), False, 'import re\n'), ((2479, 2509), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""start"""', 'start'], {}), "('start', start)\n", (2493, 2509), False, 'from telegram.ext import Updater, CommandHandler, InlineQueryHandler, MessageHandler, Filters\n'), ((716, 744), 'wikipediaapi.Wikipedia', 'wikipediaapi.Wikipedia', (['"""en"""'], {}), "('en')\n", (738, 744), False, 'import wikipediaapi\n'), ((777, 805), 'wikipediaapi.Wikipedia', 'wikipediaapi.Wikipedia', (['"""fa"""'], {}), "('fa')\n", (799, 805), False, 'import wikipediaapi\n'), ((1263, 1291), 'wikipediaapi.Wikipedia', 'wikipediaapi.Wikipedia', (['"""en"""'], {}), "('en')\n", (1285, 1291), False, 'import wikipediaapi\n'), ((1324, 1352), 'wikipediaapi.Wikipedia', 'wikipediaapi.Wikipedia', (['"""fa"""'], {}), "('fa')\n", (1346, 1352), False, 'import wikipediaapi\n'), ((2582, 2612), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""about"""', 'about'], {}), "('about', about)\n", (2596, 2612), False, 'from telegram.ext import Updater, CommandHandler, InlineQueryHandler, MessageHandler, Filters\n'), ((2641, 2675), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.text', 'echo'], {}), '(Filters.text, echo)\n', (2655, 2675), False, 'from telegram.ext import Updater, CommandHandler, InlineQueryHandler, MessageHandler, Filters\n'), ((2705, 2736), 'telegram.ext.InlineQueryHandler', 'InlineQueryHandler', (['inlinequery'], {}), '(inlinequery)\n', (2723, 2736), False, 'from telegram.ext import Updater, CommandHandler, InlineQueryHandler, MessageHandler, Filters\n'), ((1629, 1636), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1634, 1636), False, 'from uuid import uuid4\n'), ((1703, 1748), 'telegram.InputTextMessageContent', 'InputTextMessageContent', ([], {'message_text': 'wikimsg'}), '(message_text=wikimsg)\n', (1726, 1748), False, 'from telegram import InlineQueryResultArticle, ParseMode, InputTextMessageContent\n'), ((1917, 1924), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1922, 1924), False, 'from uuid import uuid4\n'), ((1994, 2024), 'telegram.InputTextMessageContent', 'InputTextMessageContent', (['query'], {}), '(query)\n', (2017, 2024), False, 'from telegram import InlineQueryResultArticle, ParseMode, InputTextMessageContent\n')]
|
from data import DataSeq
from bubblesort import BubbleSort
from bucketsort import BucketSort
from combsort import CombSort
from cyclesort import CycleSort
from heapsort import HeapSort
from insertionsort import InsertionSort
from mergesort import MergeSort
from monkeysort import MonkeySort
from quicksort import QuickSort
from radixsort import RadixSort
from selectionsort import SelectionSort
from shellsort import ShellSort
import argparse
parser=argparse.ArgumentParser(description="Sort Visulization")
parser.add_argument('-l','--length',type=int,default=64)
parser.add_argument('-i','--interval',type=int,default=1)
parser.add_argument('-t','--sort-type',type=str,default='BubbleSort',
choices=["BubbleSort","BucketSort","CombSort",
"CycleSort","HeapSort","InsertionSort",
"MergeSort","MonkeySort","QuickSort",
"RadixSort","SelectionSort","ShellSort",])
parser.add_argument('-r','--resample', action='store_true')
parser.add_argument('-s','--sparse', action='store_true')
parser.add_argument('-n','--no-record', action='store_true')
args=parser.parse_args()
if __name__ == "__main__":
MAXLENGTH=1000
Length= args.length if args.length<MAXLENGTH else MAXLENGTH
Interval= args.interval
SortType= args.sort_type
Resampling=args.resample
Sparse= args.sparse
NoRecord= args.no_record
try:
SortMethod=eval(SortType)
except:
print("Sort Type Not Found! Please Check if %s Exists or Not!"%SortType)
exit()
ds=DataSeq(Length, time_interval=Interval, sort_title=SortType, is_resampling=Resampling, is_sparse=Sparse, record=not NoRecord)
ds.Visualize()
ds.StartTimer()
SortMethod(ds)
ds.StopTimer()
ds.SetTimeInterval(0)
ds.Visualize()
|
[
"data.DataSeq",
"argparse.ArgumentParser"
] |
[((451, 507), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Sort Visulization"""'}), "(description='Sort Visulization')\n", (474, 507), False, 'import argparse\n'), ((1654, 1784), 'data.DataSeq', 'DataSeq', (['Length'], {'time_interval': 'Interval', 'sort_title': 'SortType', 'is_resampling': 'Resampling', 'is_sparse': 'Sparse', 'record': '(not NoRecord)'}), '(Length, time_interval=Interval, sort_title=SortType, is_resampling=\n Resampling, is_sparse=Sparse, record=not NoRecord)\n', (1661, 1784), False, 'from data import DataSeq\n')]
|
# -*- coding:utf-8 -*-
# @Time : 2019-12-27 16:11
# @Author : liuqiuxi
# @Email : <EMAIL>
# @File : stockfeedswinddatabase.py
# @Project : datafeeds
# @Software: PyCharm
# @Remark : This is class of stock market
import datetime
import copy
import pandas as pd
import numpy as np
from datafeeds.utils import BarFeedConfig
from datafeeds.winddatabasefeeds import BaseWindDataBase
from datafeeds import logger
class AShareCalendarWindDataBase(BaseWindDataBase):
LOGGER_NAME = "AShareCalendarWindDataBase"
def __init__(self):
super(AShareCalendarWindDataBase, self).__init__()
self.__table_name_dict = {"AShareCalendarWindDataBase": "AShareCalendar"}
def get_calendar(self, begin_datetime, end_datetime):
connect = self.connect()
begin_datetime = begin_datetime.strftime("%Y%m%d")
end_datetime = end_datetime.strftime("%Y%m%d")
table_name = self.__table_name_dict.get(self.LOGGER_NAME)
owner = self.get_oracle_owner(table_name=table_name)
table_parameter = owner + table_name
sqlClause = ("select trade_days as dateTime from " + table_parameter + " where trade_days >= " +
"'" + begin_datetime + "' and trade_days <= '" + end_datetime + "' ")
data = self.get_data_with_sql(sqlClause=sqlClause, connect=connect)
data.rename(columns={"datetime": "dateTime"}, inplace=True)
data.drop_duplicates(subset=["dateTime"], inplace=True)
data.sort_values(by="dateTime", inplace=True)
data.reset_index(inplace=True, drop=True)
data.loc[:, "dateTime"] = data.loc[:, "dateTime"].apply(lambda x: datetime.datetime.strptime(x, "%Y%m%d"))
data = pd.DataFrame(data={"dateTime": data.loc[:, "dateTime"]})
connect.close()
return data
class AShareQuotationWindDataBase(BaseWindDataBase):
LOGGER_NAME = "AShareQuotationWindDataBase"
def __init__(self):
super(AShareQuotationWindDataBase, self).__init__()
self.__need_adjust_columns = ["preClose", "open", "high", "low", "close", "volume", "avgPrice"]
self.__table_name_dict = {"AShareQuotationWindDataBase": "AShareEODPrices"}
def get_quotation(self, securityIds, items, frequency, begin_datetime, end_datetime, adjusted="F"):
limit_numbers = BarFeedConfig.get_wind().get("LimitNumbers")
if len(securityIds) < limit_numbers:
data = self.__get_quotation(securityIds=securityIds, items=items, frequency=frequency,
begin_datetime=begin_datetime, end_datetime=end_datetime, adjusted=adjusted)
else:
data = pd.DataFrame()
for i in range(int(len(securityIds) / limit_numbers) + 1):
data0 = self.__get_quotation(securityIds=securityIds[i*limit_numbers: i*limit_numbers + limit_numbers],
items=items, frequency=frequency, begin_datetime=begin_datetime,
end_datetime=end_datetime, adjusted=adjusted)
data = pd.concat(objs=[data, data0], axis=0, join="outer")
data.sort_values(by=["securityId", "dateTime"], axis=0, ascending=True, inplace=True)
data.reset_index(inplace=True, drop=True)
return data
def __get_quotation(self, securityIds, items, frequency, begin_datetime, end_datetime, adjusted):
connect = self.connect()
begin_datetime = begin_datetime.strftime("%Y%m%d")
end_datetime = end_datetime.strftime("%Y%m%d")
table_name = self.__table_name_dict.get(self.LOGGER_NAME)
owner = self.get_oracle_owner(table_name=table_name)
table_parameter = owner + table_name
if frequency != 86400:
raise BaseException("[%s] we can't supply frequency: %d " % (self.LOGGER_NAME, frequency))
if len(securityIds) == 1:
sqlClause = ("select * from " + table_parameter + " where trade_dt >= '" + begin_datetime + "' " +
"and trade_dt <= '" + end_datetime + "' and s_info_windcode = '" + securityIds[0] + "'")
else:
sqlClause = ("select * from " + table_parameter + " where trade_dt >= '" + begin_datetime + "' and " +
"trade_dt <= '" + end_datetime + "' and s_info_windcode in " + str(tuple(securityIds)) + "")
data = self.get_data_with_sql(sqlClause=sqlClause, connect=connect)
rename_dict = BarFeedConfig.get_wind_database_items().get(self.LOGGER_NAME)
data.rename(columns=rename_dict, inplace=True)
# change some parameters value to normal value
data.loc[:, 'dateTime'] = data.loc[:, 'dateTime'].apply(lambda x: datetime.datetime.strptime(x, "%Y%m%d"))
data.loc[:, "Chg"] = data.loc[:, "Chg"] / 100
data.loc[:, "amount"] = data.loc[:, "amount"] * 1000
# use adjfactor get adj price
if adjusted in ["F", "B"]:
data = data.groupby(by="securityId").apply(lambda x: self.__get_adj_price(DataFrame=x, adjusted=adjusted))
data.reset_index(inplace=True, drop=True)
data.sort_values(by=["securityId", "dateTime"], axis=0, ascending=True, inplace=True)
data.reset_index(inplace=True, drop=True)
# choose items to data
log = logger.get_logger(name=self.LOGGER_NAME)
default_items = list(rename_dict.values())
real_items = []
for item in items:
if item in ["securityId", "dateTime"]:
log.info("There is no need add item: %s to parameters items" % item)
elif item in default_items:
real_items.append(item)
else:
log.warning("item %s not in default items, so we remove this item to data" % item)
data = data.loc[:, ["dateTime", "securityId"] + real_items].copy(deep=True)
connect.close()
return data
def __get_adj_price(self, DataFrame, adjusted):
data = DataFrame.copy(deep=True)
data.sort_values(by="dateTime", axis=0, ascending=True, inplace=True)
data.reset_index(inplace=True, drop=True)
if adjusted == "F":
adjfactor = data.loc[:, "adjfactor"][len(data) - 1]
elif adjusted == "B":
adjfactor = data.loc[:, "adjfactor"][0]
else:
raise ValueError("[%s] adjusted: %s did't support" % (self.LOGGER_NAME, adjusted))
data.loc[:, "adjfactor"] = data.loc[:, "adjfactor"].apply(lambda x: x / adjfactor)
columns = copy.deepcopy(self.__need_adjust_columns)
for column in columns:
data.loc[:, column] = data.loc[:, column] * data.loc[:, "adjfactor"]
return data
class AShareIPOWindDataBase(BaseWindDataBase):
LOGGER_NAME = "AShareIPOWindDataBase"
def __init__(self):
super(AShareIPOWindDataBase, self).__init__()
self.__table_name_dict = {"AShareIPOWindDataBase": "AShareIPO"}
def get_initial_public_offering(self, securityIds):
connect = self.connect()
table_name = self.__table_name_dict.get(self.LOGGER_NAME)
owner = self.get_oracle_owner(table_name=table_name)
table_parameter = owner + table_name
sqlClause = "select * from " + table_parameter + ""
data = self.get_data_with_sql(sqlClause=sqlClause, connect=connect)
default_items = list(BarFeedConfig.get_wind_database_items().get(self.LOGGER_NAME))
drop_items = list(set(data.columns) - set(default_items))
data.drop(labels=drop_items, axis=1, inplace=True)
rename_dict = BarFeedConfig.get_wind_database_items().get(self.LOGGER_NAME)
data.rename(columns=rename_dict, inplace=True)
data0 = pd.DataFrame({"securityId": securityIds})
data = pd.merge(left=data, right=data0, on="securityId", how="right")
# change parameters numbers
data.loc[:, "amount"] = data.loc[:, "amount"] * 10000
data.loc[:, "collection"] = data.loc[:, "collection"] * 10000
data.loc[:, "subDate"] = data.loc[:, "subDate"].apply(
lambda x: datetime.datetime.strptime(x, "%Y%m%d") if isinstance(x, datetime.datetime) else None)
data.loc[:, "listDate"] = data.loc[:, "listDate"].apply(
lambda x: datetime.datetime.strptime(x, "%Y%m%d") if isinstance(x, datetime.datetime) else None)
data.sort_values(by="securityId", axis=0, ascending=True, inplace=True)
data.reset_index(inplace=True, drop=True)
return data
class AShareDayVarsWindDataBase(BaseWindDataBase):
LOGGER_NAME = "AShareDayVarsWindDataBase"
def __init__(self):
super(AShareDayVarsWindDataBase, self).__init__()
self.__table_name_dict = {"AShareDayVarsWindDataBase": ["AShareEODDerivativeIndicator",
"AShareEODPrices",
"AShareST"]}
def get_value(self, date_datetime):
connect = self.connect()
table_name = self.__table_name_dict.get(self.LOGGER_NAME)[0]
owner = self.get_oracle_owner(table_name=table_name)
table_parameter = owner + table_name
date_datetime = date_datetime.strftime("%Y%m%d")
sqlClause = "select * from " + table_parameter + " where trade_dt = "+ date_datetime +""
data = self.get_data_with_sql(sqlClause=sqlClause, connect=connect)
default_items = list(BarFeedConfig.get_wind_database_items().get(self.LOGGER_NAME))
drop_items = list(set(data.columns) - set(default_items))
data.drop(labels=drop_items, axis=1, inplace=True)
rename_dict = BarFeedConfig.get_wind_database_items().get(self.LOGGER_NAME)
data.rename(columns=rename_dict, inplace=True)
# change parameters numbers
data.loc[:, "dateTime"] = data.loc[:, "dateTime"].apply(lambda x: datetime.datetime.strptime(x, "%Y%m%d"))
data.loc[:, "upLimit"] = np.where(data.loc[:, "upOrdown"] == 1, True, False)
data.loc[:, "downLimit"] = np.where(data.loc[:, "upOrdown"] == -1, True, False)
data.loc[:, "turnover"] = data.loc[:, "turnover"] / 100
data.loc[:, "turnover_free"] = data.loc[:, "turnover_free"] / 100
data.loc[:, "totalValue"] = data.loc[:, "totalValue"] * 10000
data.loc[:, "marketValue"] = data.loc[:, "marketValue"] * 10000
data.drop(labels=["upOrdown"], axis=1, inplace=True)
# find stock whether suspend
table_name = self.__table_name_dict.get(self.LOGGER_NAME)[1]
owner = self.get_oracle_owner(table_name=table_name)
table_parameter = owner + table_name
sqlClause = ("select s_info_windcode, trade_dt, s_dq_tradestatus from " + table_parameter + " " \
"where trade_dt = '"+ date_datetime +"'")
data0 = self.get_data_with_sql(sqlClause=sqlClause, connect=connect)
data0.rename(columns=rename_dict, inplace=True)
data0.loc[:, "dateTime"] = data0.loc[:, "dateTime"].apply(lambda x: datetime.datetime.strptime(x, "%Y%m%d"))
data0.loc[:, "isNotSuspended"] = np.where(data0.loc[:, "s_dq_tradestatus"] == "交易", True, False)
data0 = data0.loc[:, ["securityId", "dateTime", "isNotSuspended"]].copy(deep=True)
data = pd.merge(left=data, right=data0, how="outer", on=("dateTime", "securityId"))
# find stock whether ST
table_name = self.__table_name_dict.get(self.LOGGER_NAME)[2]
owner = self.get_oracle_owner(table_name=table_name)
table_parameter = owner + table_name
sqlClause = ("select s_info_windcode, entry_dt, remove_dt, s_type_st from " + table_parameter + " " \
"where entry_dt <= '" + date_datetime + "'")
data0 = self.get_data_with_sql(sqlClause=sqlClause, connect=connect)
data0.rename(columns=rename_dict, inplace=True)
data0.loc[:, "entry_dt"] = data0.loc[:, "entry_dt"].apply(lambda x: datetime.datetime.strptime(x, "%Y%m%d"))
data0.loc[:, "remove_dt"] = data0.loc[:, "remove_dt"].apply(
lambda x: np.nan if x == None else datetime.datetime.strptime(x, "%Y%m%d"))
date_datetime = datetime.datetime.strptime(date_datetime, "%Y%m%d")
data0.loc[:, "isST"] = np.where(pd.isnull(data0.loc[:, "remove_dt"]), True,
np.where(data0.loc[:, "remove_dt"] > date_datetime, True, False))
data0 = data0.loc[data0.loc[:, "isST"] == True, ["securityId", "isST"]].copy(deep=True)
data = pd.merge(left=data, right=data0, how="left", on="securityId")
data.loc[:, "isST"] = np.where(data.loc[:, "isST"] == True, True, False)
return data
|
[
"pandas.DataFrame",
"copy.deepcopy",
"datafeeds.utils.BarFeedConfig.get_wind_database_items",
"datafeeds.utils.BarFeedConfig.get_wind",
"pandas.merge",
"datafeeds.logger.get_logger",
"pandas.isnull",
"datetime.datetime.strptime",
"numpy.where",
"pandas.concat"
] |
[((1741, 1797), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'dateTime': data.loc[:, 'dateTime']}"}), "(data={'dateTime': data.loc[:, 'dateTime']})\n", (1753, 1797), True, 'import pandas as pd\n'), ((5383, 5423), 'datafeeds.logger.get_logger', 'logger.get_logger', ([], {'name': 'self.LOGGER_NAME'}), '(name=self.LOGGER_NAME)\n', (5400, 5423), False, 'from datafeeds import logger\n'), ((6626, 6667), 'copy.deepcopy', 'copy.deepcopy', (['self.__need_adjust_columns'], {}), '(self.__need_adjust_columns)\n', (6639, 6667), False, 'import copy\n'), ((7837, 7878), 'pandas.DataFrame', 'pd.DataFrame', (["{'securityId': securityIds}"], {}), "({'securityId': securityIds})\n", (7849, 7878), True, 'import pandas as pd\n'), ((7895, 7957), 'pandas.merge', 'pd.merge', ([], {'left': 'data', 'right': 'data0', 'on': '"""securityId"""', 'how': '"""right"""'}), "(left=data, right=data0, on='securityId', how='right')\n", (7903, 7957), True, 'import pandas as pd\n'), ((10116, 10167), 'numpy.where', 'np.where', (["(data.loc[:, 'upOrdown'] == 1)", '(True)', '(False)'], {}), "(data.loc[:, 'upOrdown'] == 1, True, False)\n", (10124, 10167), True, 'import numpy as np\n'), ((10204, 10256), 'numpy.where', 'np.where', (["(data.loc[:, 'upOrdown'] == -1)", '(True)', '(False)'], {}), "(data.loc[:, 'upOrdown'] == -1, True, False)\n", (10212, 10256), True, 'import numpy as np\n'), ((11284, 11347), 'numpy.where', 'np.where', (["(data0.loc[:, 's_dq_tradestatus'] == '交易')", '(True)', '(False)'], {}), "(data0.loc[:, 's_dq_tradestatus'] == '交易', True, False)\n", (11292, 11347), True, 'import numpy as np\n'), ((11456, 11532), 'pandas.merge', 'pd.merge', ([], {'left': 'data', 'right': 'data0', 'how': '"""outer"""', 'on': "('dateTime', 'securityId')"}), "(left=data, right=data0, how='outer', on=('dateTime', 'securityId'))\n", (11464, 11532), True, 'import pandas as pd\n'), ((12359, 12410), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date_datetime', '"""%Y%m%d"""'], {}), "(date_datetime, '%Y%m%d')\n", (12385, 12410), False, 'import datetime\n'), ((12716, 12777), 'pandas.merge', 'pd.merge', ([], {'left': 'data', 'right': 'data0', 'how': '"""left"""', 'on': '"""securityId"""'}), "(left=data, right=data0, how='left', on='securityId')\n", (12724, 12777), True, 'import pandas as pd\n'), ((12809, 12859), 'numpy.where', 'np.where', (["(data.loc[:, 'isST'] == True)", '(True)', '(False)'], {}), "(data.loc[:, 'isST'] == True, True, False)\n", (12817, 12859), True, 'import numpy as np\n'), ((2705, 2719), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2717, 2719), True, 'import pandas as pd\n'), ((12452, 12488), 'pandas.isnull', 'pd.isnull', (["data0.loc[:, 'remove_dt']"], {}), "(data0.loc[:, 'remove_dt'])\n", (12461, 12488), True, 'import pandas as pd\n'), ((12537, 12601), 'numpy.where', 'np.where', (["(data0.loc[:, 'remove_dt'] > date_datetime)", '(True)', '(False)'], {}), "(data0.loc[:, 'remove_dt'] > date_datetime, True, False)\n", (12545, 12601), True, 'import numpy as np\n'), ((1684, 1723), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['x', '"""%Y%m%d"""'], {}), "(x, '%Y%m%d')\n", (1710, 1723), False, 'import datetime\n'), ((2361, 2385), 'datafeeds.utils.BarFeedConfig.get_wind', 'BarFeedConfig.get_wind', ([], {}), '()\n', (2383, 2385), False, 'from datafeeds.utils import BarFeedConfig\n'), ((3140, 3191), 'pandas.concat', 'pd.concat', ([], {'objs': '[data, data0]', 'axis': '(0)', 'join': '"""outer"""'}), "(objs=[data, data0], axis=0, join='outer')\n", (3149, 3191), True, 'import pandas as pd\n'), ((4537, 4576), 'datafeeds.utils.BarFeedConfig.get_wind_database_items', 'BarFeedConfig.get_wind_database_items', ([], {}), '()\n', (4574, 4576), False, 'from datafeeds.utils import BarFeedConfig\n'), ((4786, 4825), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['x', '"""%Y%m%d"""'], {}), "(x, '%Y%m%d')\n", (4812, 4825), False, 'import datetime\n'), ((7702, 7741), 'datafeeds.utils.BarFeedConfig.get_wind_database_items', 'BarFeedConfig.get_wind_database_items', ([], {}), '()\n', (7739, 7741), False, 'from datafeeds.utils import BarFeedConfig\n'), ((9811, 9850), 'datafeeds.utils.BarFeedConfig.get_wind_database_items', 'BarFeedConfig.get_wind_database_items', ([], {}), '()\n', (9848, 9850), False, 'from datafeeds.utils import BarFeedConfig\n'), ((10041, 10080), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['x', '"""%Y%m%d"""'], {}), "(x, '%Y%m%d')\n", (10067, 10080), False, 'import datetime\n'), ((11201, 11240), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['x', '"""%Y%m%d"""'], {}), "(x, '%Y%m%d')\n", (11227, 11240), False, 'import datetime\n'), ((12134, 12173), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['x', '"""%Y%m%d"""'], {}), "(x, '%Y%m%d')\n", (12160, 12173), False, 'import datetime\n'), ((7489, 7528), 'datafeeds.utils.BarFeedConfig.get_wind_database_items', 'BarFeedConfig.get_wind_database_items', ([], {}), '()\n', (7526, 7528), False, 'from datafeeds.utils import BarFeedConfig\n'), ((8216, 8255), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['x', '"""%Y%m%d"""'], {}), "(x, '%Y%m%d')\n", (8242, 8255), False, 'import datetime\n'), ((8392, 8431), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['x', '"""%Y%m%d"""'], {}), "(x, '%Y%m%d')\n", (8418, 8431), False, 'import datetime\n'), ((9598, 9637), 'datafeeds.utils.BarFeedConfig.get_wind_database_items', 'BarFeedConfig.get_wind_database_items', ([], {}), '()\n', (9635, 9637), False, 'from datafeeds.utils import BarFeedConfig\n'), ((12293, 12332), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['x', '"""%Y%m%d"""'], {}), "(x, '%Y%m%d')\n", (12319, 12332), False, 'import datetime\n')]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011 Butiá Team <EMAIL>
# Butia is a free open plataform for robotics projects
# www.fing.edu.uy/inco/proyectos/butia
# Universidad de la República del Uruguay
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import apiSumoUY
import math
from TurtleArt.tapalette import make_palette
from TurtleArt.taprimitive import Primitive, ArgSlot
from TurtleArt.tatype import TYPE_INT, TYPE_NUMBER
from gettext import gettext as _
from plugins.plugin import Plugin
class Sumtia(Plugin):
def __init__(self, parent):
Plugin.__init__(self)
self.tw = parent
self.vel = 10
self._inited = False
self.api = apiSumoUY.apiSumoUY()
def setup(self):
palette = make_palette('sumtia', ["#00FF00","#008000"], _('SumBot'), translation=_('sumtia'))
palette.add_block('updateState',
style='basic-style',
label=_('update information'),
prim_name='updateState',
help_string=_('update information from the server'))
self.tw.lc.def_prim('updateState', 0,
Primitive(self.updateState))
palette.add_block('sendVelocities',
style='basic-style-2arg',
label=_('speed SumBot'),
prim_name='sendVelocities',
default=[10,10],
help_string=_('submit the speed to the SumBot'))
self.tw.lc.def_prim('sendVelocities', 2,
Primitive(self.sendVelocities, arg_descs=[ArgSlot(TYPE_NUMBER), ArgSlot(TYPE_NUMBER)]))
palette.add_block('setVel',
style='basic-style-1arg',
label=_('speed SumBot'),
prim_name='setVel',
default=[10],
help_string=_('set the default speed for the movement commands'))
self.tw.lc.def_prim('setVel', 1,
Primitive(self.setVel, arg_descs=[ArgSlot(TYPE_NUMBER)]))
palette.add_block('forwardSumtia',
style='basic-style',
label=_('forward SumBot'),
prim_name='forwardSumtia',
help_string=_('move SumBot forward'))
self.tw.lc.def_prim('forwardSumtia', 0,
Primitive(self.forward))
palette.add_block('backwardSumtia',
style='basic-style',
label=_('backward SumBot'),
prim_name='backwardSumtia',
help_string=_('move SumBot backward'))
self.tw.lc.def_prim('backwardSumtia', 0,
Primitive(self.backward))
palette.add_block('stopSumtia',
style='basic-style',
label=_('stop SumBot'),
prim_name='stopSumtia',
help_string=_('stop the SumBot'))
self.tw.lc.def_prim('stopSumtia', 0,
Primitive(self.stop))
palette.add_block('leftSumtia',
style='basic-style',
label=_('left SumBot'),
prim_name='leftSumtia',
help_string=_('turn left the SumBot'))
self.tw.lc.def_prim('leftSumtia', 0,
Primitive(self.left))
palette.add_block('rightSumtia',
style='basic-style',
label=_('right SumBot'),
prim_name='rightSumtia',
help_string=_('turn right the SumBot'))
self.tw.lc.def_prim('rightSumtia', 0,
Primitive(self.right))
palette.add_block('angleToCenter',
style='box-style',
label=_('angle to center'),
prim_name='angleToCenter',
help_string=_('get the angle to the center of the dohyo'))
self.tw.lc.def_prim('angleToCenter', 0,
Primitive(self.angleToCenter, TYPE_INT))
palette.add_block('angleToOpponent',
style='box-style',
label=_('angle to Enemy'),
prim_name='angleToOpponent',
help_string=_('get the angle to the Enemy'))
self.tw.lc.def_prim('angleToOpponent', 0,
Primitive(self.angleToOpponent, TYPE_INT))
palette.add_block('getX',
style='box-style',
label=_('x coor. SumBot'),
prim_name='getX',
help_string=_('get the x coordinate of the SumBot'))
self.tw.lc.def_prim('getX', 0,
Primitive(self.getX, TYPE_INT))
palette.add_block('getY',
style='box-style',
label=_('y coor. SumBot'),
prim_name='getY',
help_string=_('get the y coordinate of the SumBot'))
self.tw.lc.def_prim('getY', 0,
Primitive(self.getY, TYPE_INT))
palette.add_block('getOpX',
style='box-style',
label=_('x coor. Enemy'),
prim_name='getOpX',
help_string=_('get the x coordinate of the Enemy'))
self.tw.lc.def_prim('getOpX', 0,
Primitive(self.getOpX, TYPE_INT))
palette.add_block('getOpY',
style='box-style',
label=_('y coor. Enemy'),
prim_name='getOpY',
help_string=_('get the y coordinate of the Enemy'))
self.tw.lc.def_prim('getOpY', 0,
Primitive(self.getOpY, TYPE_INT))
palette.add_block('getRot',
style='box-style',
label=_('rotation SumBot'),
prim_name='getRot',
help_string=_('get the rotation of the Sumbot'))
self.tw.lc.def_prim('getRot', 0,
Primitive(self.getRot, TYPE_INT))
palette.add_block('getOpRot',
style='box-style',
label=_('rotation Enemy'),
prim_name='getOpRot',
help_string=_('get the rotation of the Enemy'))
self.tw.lc.def_prim('getOpRot', 0,
Primitive(self.getOpRot, TYPE_INT))
palette.add_block('getDistCenter',
style='box-style',
label=_('distance to center'),
prim_name='getDistCenter',
help_string=_('get the distance to the center of the dohyo'))
self.tw.lc.def_prim('getDistCenter', 0,
Primitive(self.getDistCenter, TYPE_INT))
palette.add_block('getDistOp',
style='box-style',
label=_('distance to Enemy'),
prim_name='getDistOp',
help_string=_('get the distance to the Enemy'))
self.tw.lc.def_prim('getDistOp', 0,
Primitive(self.getDistOp, TYPE_INT))
############################### Turtle signals ############################
def stop(self):
if self._inited:
self.api.enviarVelocidades(0,0)
def quit(self):
if self._inited:
self.api.liberarRecursos()
###########################################################################
# Sumtia helper functions for apiSumoUY.py interaction
def sendVelocities(self,vel_izq = 0, vel_der = 0):
self.api.enviarVelocidades(vel_izq, vel_der)
def setVel(self,vel = 0):
self.vel = int(vel)
def forward(self):
self.api.enviarVelocidades(self.vel, self.vel)
def backward(self):
self.api.enviarVelocidades(-self.vel, -self.vel)
def left(self):
self.api.enviarVelocidades(-self.vel, self.vel)
def right(self):
self.api.enviarVelocidades(self.vel, -self.vel)
def getX(self):
return self.api.getCoorX()
def getY(self):
return self.api.getCoorY()
def getOpX(self):
return self.api.getCoorXOp()
def getOpY(self):
return self.api.getCoorYOp()
def getRot(self):
return self.api.getRot()
def getOpRot(self):
return self.api.getRotOp()
def angleToCenter(self):
rot = math.degrees(math.atan2(self.api.getCoorY(), self.api.getCoorX())) + (180 - self.getRot())
return (rot - 360) if abs(rot) > 180 else rot
def angleToOpponent(self):
x = self.getX() - self.getOpX()
y = self.getY() - self.getOpY()
rot = math.degrees(math.atan2(y, x)) + (180 - self.getRot())
return (rot - 360) if abs(rot) > 180 else rot
def getDistCenter(self):
return math.sqrt(math.pow(self.getX(), 2) + math.pow(self.getY(), 2))
def getDistOp(self):
return math.sqrt(math.pow(self.getX() - self.getOpX(), 2) +
math.pow(self.getY() - self.getOpY(), 2))
def updateState(self):
if not(self._inited):
self.api.setPuertos()
self.api.conectarse()
self._inited = True
self.api.getInformacion()
|
[
"plugins.plugin.Plugin.__init__",
"math.atan2",
"TurtleArt.taprimitive.ArgSlot",
"TurtleArt.taprimitive.Primitive",
"apiSumoUY.apiSumoUY",
"gettext.gettext"
] |
[((1186, 1207), 'plugins.plugin.Plugin.__init__', 'Plugin.__init__', (['self'], {}), '(self)\n', (1201, 1207), False, 'from plugins.plugin import Plugin\n'), ((1303, 1324), 'apiSumoUY.apiSumoUY', 'apiSumoUY.apiSumoUY', ([], {}), '()\n', (1322, 1324), False, 'import apiSumoUY\n'), ((1412, 1423), 'gettext.gettext', '_', (['"""SumBot"""'], {}), "('SumBot')\n", (1413, 1423), True, 'from gettext import gettext as _\n'), ((1744, 1771), 'TurtleArt.taprimitive.Primitive', 'Primitive', (['self.updateState'], {}), '(self.updateState)\n', (1753, 1771), False, 'from TurtleArt.taprimitive import Primitive, ArgSlot\n'), ((2852, 2875), 'TurtleArt.taprimitive.Primitive', 'Primitive', (['self.forward'], {}), '(self.forward)\n', (2861, 2875), False, 'from TurtleArt.taprimitive import Primitive, ArgSlot\n'), ((3163, 3187), 'TurtleArt.taprimitive.Primitive', 'Primitive', (['self.backward'], {}), '(self.backward)\n', (3172, 3187), False, 'from TurtleArt.taprimitive import Primitive, ArgSlot\n'), ((3462, 3482), 'TurtleArt.taprimitive.Primitive', 'Primitive', (['self.stop'], {}), '(self.stop)\n', (3471, 3482), False, 'from TurtleArt.taprimitive import Primitive, ArgSlot\n'), ((3754, 3774), 'TurtleArt.taprimitive.Primitive', 'Primitive', (['self.left'], {}), '(self.left)\n', (3763, 3774), False, 'from TurtleArt.taprimitive import Primitive, ArgSlot\n'), ((4051, 4072), 'TurtleArt.taprimitive.Primitive', 'Primitive', (['self.right'], {}), '(self.right)\n', (4060, 4072), False, 'from TurtleArt.taprimitive import Primitive, ArgSlot\n'), ((4375, 4414), 'TurtleArt.taprimitive.Primitive', 'Primitive', (['self.angleToCenter', 'TYPE_INT'], {}), '(self.angleToCenter, TYPE_INT)\n', (4384, 4414), False, 'from TurtleArt.taprimitive import Primitive, ArgSlot\n'), ((4708, 4749), 'TurtleArt.taprimitive.Primitive', 'Primitive', (['self.angleToOpponent', 'TYPE_INT'], {}), '(self.angleToOpponent, TYPE_INT)\n', (4717, 4749), False, 'from TurtleArt.taprimitive import Primitive, ArgSlot\n'), ((5026, 5056), 'TurtleArt.taprimitive.Primitive', 'Primitive', (['self.getX', 'TYPE_INT'], {}), '(self.getX, TYPE_INT)\n', (5035, 5056), False, 'from TurtleArt.taprimitive import Primitive, ArgSlot\n'), ((5333, 5363), 'TurtleArt.taprimitive.Primitive', 'Primitive', (['self.getY', 'TYPE_INT'], {}), '(self.getY, TYPE_INT)\n', (5342, 5363), False, 'from TurtleArt.taprimitive import Primitive, ArgSlot\n'), ((5644, 5676), 'TurtleArt.taprimitive.Primitive', 'Primitive', (['self.getOpX', 'TYPE_INT'], {}), '(self.getOpX, TYPE_INT)\n', (5653, 5676), False, 'from TurtleArt.taprimitive import Primitive, ArgSlot\n'), ((5957, 5989), 'TurtleArt.taprimitive.Primitive', 'Primitive', (['self.getOpY', 'TYPE_INT'], {}), '(self.getOpY, TYPE_INT)\n', (5966, 5989), False, 'from TurtleArt.taprimitive import Primitive, ArgSlot\n'), ((6269, 6301), 'TurtleArt.taprimitive.Primitive', 'Primitive', (['self.getRot', 'TYPE_INT'], {}), '(self.getRot, TYPE_INT)\n', (6278, 6301), False, 'from TurtleArt.taprimitive import Primitive, ArgSlot\n'), ((6585, 6619), 'TurtleArt.taprimitive.Primitive', 'Primitive', (['self.getOpRot', 'TYPE_INT'], {}), '(self.getOpRot, TYPE_INT)\n', (6594, 6619), False, 'from TurtleArt.taprimitive import Primitive, ArgSlot\n'), ((6936, 6975), 'TurtleArt.taprimitive.Primitive', 'Primitive', (['self.getDistCenter', 'TYPE_INT'], {}), '(self.getDistCenter, TYPE_INT)\n', (6945, 6975), False, 'from TurtleArt.taprimitive import Primitive, ArgSlot\n'), ((7265, 7300), 'TurtleArt.taprimitive.Primitive', 'Primitive', (['self.getDistOp', 'TYPE_INT'], {}), '(self.getDistOp, TYPE_INT)\n', (7274, 7300), False, 'from TurtleArt.taprimitive import Primitive, ArgSlot\n'), ((1437, 1448), 'gettext.gettext', '_', (['"""sumtia"""'], {}), "('sumtia')\n", (1438, 1448), True, 'from gettext import gettext as _\n'), ((1551, 1574), 'gettext.gettext', '_', (['"""update information"""'], {}), "('update information')\n", (1552, 1574), True, 'from gettext import gettext as _\n'), ((1645, 1684), 'gettext.gettext', '_', (['"""update information from the server"""'], {}), "('update information from the server')\n", (1646, 1684), True, 'from gettext import gettext as _\n'), ((1882, 1899), 'gettext.gettext', '_', (['"""speed SumBot"""'], {}), "('speed SumBot')\n", (1883, 1899), True, 'from gettext import gettext as _\n'), ((2006, 2041), 'gettext.gettext', '_', (['"""submit the speed to the SumBot"""'], {}), "('submit the speed to the SumBot')\n", (2007, 2041), True, 'from gettext import gettext as _\n'), ((2293, 2310), 'gettext.gettext', '_', (['"""speed SumBot"""'], {}), "('speed SumBot')\n", (2294, 2310), True, 'from gettext import gettext as _\n'), ((2406, 2458), 'gettext.gettext', '_', (['"""set the default speed for the movement commands"""'], {}), "('set the default speed for the movement commands')\n", (2407, 2458), True, 'from gettext import gettext as _\n'), ((2674, 2693), 'gettext.gettext', '_', (['"""forward SumBot"""'], {}), "('forward SumBot')\n", (2675, 2693), True, 'from gettext import gettext as _\n'), ((2766, 2790), 'gettext.gettext', '_', (['"""move SumBot forward"""'], {}), "('move SumBot forward')\n", (2767, 2790), True, 'from gettext import gettext as _\n'), ((2981, 3001), 'gettext.gettext', '_', (['"""backward SumBot"""'], {}), "('backward SumBot')\n", (2982, 3001), True, 'from gettext import gettext as _\n'), ((3075, 3100), 'gettext.gettext', '_', (['"""move SumBot backward"""'], {}), "('move SumBot backward')\n", (3076, 3100), True, 'from gettext import gettext as _\n'), ((3297, 3313), 'gettext.gettext', '_', (['"""stop SumBot"""'], {}), "('stop SumBot')\n", (3298, 3313), True, 'from gettext import gettext as _\n'), ((3383, 3403), 'gettext.gettext', '_', (['"""stop the SumBot"""'], {}), "('stop the SumBot')\n", (3384, 3403), True, 'from gettext import gettext as _\n'), ((3584, 3600), 'gettext.gettext', '_', (['"""left SumBot"""'], {}), "('left SumBot')\n", (3585, 3600), True, 'from gettext import gettext as _\n'), ((3670, 3695), 'gettext.gettext', '_', (['"""turn left the SumBot"""'], {}), "('turn left the SumBot')\n", (3671, 3695), True, 'from gettext import gettext as _\n'), ((3877, 3894), 'gettext.gettext', '_', (['"""right SumBot"""'], {}), "('right SumBot')\n", (3878, 3894), True, 'from gettext import gettext as _\n'), ((3965, 3991), 'gettext.gettext', '_', (['"""turn right the SumBot"""'], {}), "('turn right the SumBot')\n", (3966, 3991), True, 'from gettext import gettext as _\n'), ((4175, 4195), 'gettext.gettext', '_', (['"""angle to center"""'], {}), "('angle to center')\n", (4176, 4195), True, 'from gettext import gettext as _\n'), ((4268, 4313), 'gettext.gettext', '_', (['"""get the angle to the center of the dohyo"""'], {}), "('get the angle to the center of the dohyo')\n", (4269, 4313), True, 'from gettext import gettext as _\n'), ((4519, 4538), 'gettext.gettext', '_', (['"""angle to Enemy"""'], {}), "('angle to Enemy')\n", (4520, 4538), True, 'from gettext import gettext as _\n'), ((4613, 4644), 'gettext.gettext', '_', (['"""get the angle to the Enemy"""'], {}), "('get the angle to the Enemy')\n", (4614, 4644), True, 'from gettext import gettext as _\n'), ((4851, 4870), 'gettext.gettext', '_', (['"""x coor. SumBot"""'], {}), "('x coor. SumBot')\n", (4852, 4870), True, 'from gettext import gettext as _\n'), ((4934, 4973), 'gettext.gettext', '_', (['"""get the x coordinate of the SumBot"""'], {}), "('get the x coordinate of the SumBot')\n", (4935, 4973), True, 'from gettext import gettext as _\n'), ((5158, 5177), 'gettext.gettext', '_', (['"""y coor. SumBot"""'], {}), "('y coor. SumBot')\n", (5159, 5177), True, 'from gettext import gettext as _\n'), ((5241, 5280), 'gettext.gettext', '_', (['"""get the y coordinate of the SumBot"""'], {}), "('get the y coordinate of the SumBot')\n", (5242, 5280), True, 'from gettext import gettext as _\n'), ((5467, 5485), 'gettext.gettext', '_', (['"""x coor. Enemy"""'], {}), "('x coor. Enemy')\n", (5468, 5485), True, 'from gettext import gettext as _\n'), ((5551, 5589), 'gettext.gettext', '_', (['"""get the x coordinate of the Enemy"""'], {}), "('get the x coordinate of the Enemy')\n", (5552, 5589), True, 'from gettext import gettext as _\n'), ((5780, 5798), 'gettext.gettext', '_', (['"""y coor. Enemy"""'], {}), "('y coor. Enemy')\n", (5781, 5798), True, 'from gettext import gettext as _\n'), ((5864, 5902), 'gettext.gettext', '_', (['"""get the y coordinate of the Enemy"""'], {}), "('get the y coordinate of the Enemy')\n", (5865, 5902), True, 'from gettext import gettext as _\n'), ((6093, 6113), 'gettext.gettext', '_', (['"""rotation SumBot"""'], {}), "('rotation SumBot')\n", (6094, 6113), True, 'from gettext import gettext as _\n'), ((6179, 6214), 'gettext.gettext', '_', (['"""get the rotation of the Sumbot"""'], {}), "('get the rotation of the Sumbot')\n", (6180, 6214), True, 'from gettext import gettext as _\n'), ((6407, 6426), 'gettext.gettext', '_', (['"""rotation Enemy"""'], {}), "('rotation Enemy')\n", (6408, 6426), True, 'from gettext import gettext as _\n'), ((6494, 6528), 'gettext.gettext', '_', (['"""get the rotation of the Enemy"""'], {}), "('get the rotation of the Enemy')\n", (6495, 6528), True, 'from gettext import gettext as _\n'), ((6730, 6753), 'gettext.gettext', '_', (['"""distance to center"""'], {}), "('distance to center')\n", (6731, 6753), True, 'from gettext import gettext as _\n'), ((6826, 6874), 'gettext.gettext', '_', (['"""get the distance to the center of the dohyo"""'], {}), "('get the distance to the center of the dohyo')\n", (6827, 6874), True, 'from gettext import gettext as _\n'), ((7082, 7104), 'gettext.gettext', '_', (['"""distance to Enemy"""'], {}), "('distance to Enemy')\n", (7083, 7104), True, 'from gettext import gettext as _\n'), ((7173, 7207), 'gettext.gettext', '_', (['"""get the distance to the Enemy"""'], {}), "('get the distance to the Enemy')\n", (7174, 7207), True, 'from gettext import gettext as _\n'), ((8896, 8912), 'math.atan2', 'math.atan2', (['y', 'x'], {}), '(y, x)\n', (8906, 8912), False, 'import math\n'), ((2146, 2166), 'TurtleArt.taprimitive.ArgSlot', 'ArgSlot', (['TYPE_NUMBER'], {}), '(TYPE_NUMBER)\n', (2153, 2166), False, 'from TurtleArt.taprimitive import Primitive, ArgSlot\n'), ((2168, 2188), 'TurtleArt.taprimitive.ArgSlot', 'ArgSlot', (['TYPE_NUMBER'], {}), '(TYPE_NUMBER)\n', (2175, 2188), False, 'from TurtleArt.taprimitive import Primitive, ArgSlot\n'), ((2547, 2567), 'TurtleArt.taprimitive.ArgSlot', 'ArgSlot', (['TYPE_NUMBER'], {}), '(TYPE_NUMBER)\n', (2554, 2567), False, 'from TurtleArt.taprimitive import Primitive, ArgSlot\n')]
|
import sys
import os
class SrtFormatter():
def _secs_to_minutes_hours(self , time):
add_formatting = lambda a: '0' + a if len(a) == 1 else a
milli_secs = time - int(time)
secs = add_formatting(str(int(time)%60))
mins = add_formatting((int(time)//60).__str__())
hours = add_formatting((int(time)//3600).__str__())
return f"{hours}:{mins}:{secs},{int(milli_secs*1000)}"
def _format(self, transcript_data):
prev_time = 0
final_srt = ''
for index,each in enumerate(transcript_data) :
start_time = each['start']
end_time = each['start'] + each['duration']
final_srt += f"{index+1}\n"
final_srt += f'{self._secs_to_minutes_hours(start_time)} --> {self._secs_to_minutes_hours(end_time)}\n'
final_srt += each['text'] + '\n\n'
return final_srt
def format_and_save(self , transcript_data , location = os.getcwd() , file_name = r'Transcript'):
file_name +=r'.srt'
path_list = location.split(os.sep)
final_path = os.sep.join(path_list) + "/" + file_name
with open(final_path, 'w', encoding = "utf-8") as srt_file:
final_srt = self._format(transcript_data)
srt_file.write(final_srt)
srt_file.close()
|
[
"os.getcwd",
"os.sep.join"
] |
[((971, 982), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (980, 982), False, 'import os\n'), ((1108, 1130), 'os.sep.join', 'os.sep.join', (['path_list'], {}), '(path_list)\n', (1119, 1130), False, 'import os\n')]
|
import cv2
import tensorflow as tf
import numpy as np
from keras.models import Model
from keras.models import load_model
from numpy import asarray
from PIL import Image, ImageOps
import azure_get_unet as azure_predict
# Since we are using the Azure API, there is not need to save the model to the local filesystem
# model = load_model("/static/model/trees-v1.h5")
# model prediction returns array of prediction
# input is a numpy array
def predict_frame(image):
image = np.expand_dims(image, 0)
result = model.predict(image, batch_size=1)
result = np.squeeze(result, 0)
result = tf.argmax(result, -1)
return result
# for resizing the images after predicting the frames
def resize_frame(arr, shape):
result = Image.fromarray(arr)
result = result.resize(shape)
result = asarray(result)
return result
# change the alpha values of the segmentation masks for overlay
def convert_mask_alpha(image_arr):
img_transparent = Image.fromarray(image_arr)
imga = img_transparent.convert('RGBA')
imga_data = imga.getdata()
newData = []
for item in imga_data:
if (item[0] == 0):
newData.append((0,0,0,0))
else:
# orange transparent mask
newData.append((255,170,0,100))
img_transparent.close()
imga.putdata(newData)
imga = np.array(imga)
return imga
# generate the list for the segmentation frames based on video path
def get_segmentation_frames(video_path):
# Step 1: create the cv2 video capture object
vidObj = cv2.VideoCapture(video_path)
# Step 2: capture the video frames and predict segmentation,
# then append the segmented frames
mask_frames = []
count = 0
success = 1
while (True):
success, image = vidObj.read()
if (success == 0):
break
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Using PIL to get the proper coloration from the cv2 capture
image = Image.fromarray(image)
# 128x128 grayscale for UNet model processing
image = image.resize((128, 128))
image = ImageOps.grayscale(image)
image = asarray(image)
# with the incoming frame, convert to numpy and uint8 dtype
# and resize frames to 1080p values
append = predict_frame(image)
append = np.array(append)
append = append.astype('uint8')
append = resize_frame(append, (480, 270))
# list 1920x1080p numpy arrays
mask_frames.append(append)
# Step 3: convert the lists to numpy, and cast into usable
# black/ white array data for the video writer
mask_frames = np.array(mask_frames)
mask_frames = mask_frames * 255
# just a sanity check for the VideoWriter
mask_frames = mask_frames.astype('uint8')
# return usable arrays for video writing
return mask_frames
# This function will overlay the mask frames with the original video frames
def get_segmentation_frames_compiled(video_path):
# Step 1: retrieve the full sized segmentation frames
print('Generating segmentation frames...')
mask_frames_list = get_segmentation_frames(video_path)
print('Segmentation frames finished')
# Step 2: make a new cv2 video capture object for recycling the image files
vidObj = cv2.VideoCapture(video_path)
compiled_list = []
frame = 0
success = 1
# per frame, compile the values
while (True):
success, image = vidObj.read()
if (success == 0):
break
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = image.resize((480, 270))
image = image.convert('RGBA')
image = np.array(image)
mask = convert_mask_alpha(mask_frames_list[frame])
add_imgs = cv2.addWeighted(image, 1.0, mask, 0.4, 0.0)
add_imgs = Image.fromarray(add_imgs).convert('RGB')
add_imgs = asarray(add_imgs)
compiled_list.append(add_imgs)
frame += 1
# return the RGBA data list
compiled_list = np.array(compiled_list)
print('Frames are finished compiling')
return compiled_list
# expects uint8, numpy preferrable
def frames_to_video(imput_list, name, isRGB):
out = cv2.VideoWriter(name + '.mp4', cv2.VideoWriter_fourcc(*'MP4V'), 24, (480, 270), isRGB)
for i in range(len(imput_list)):
out.write(imput_list[i])
print('finished')
# input will be a PIL image
def overlay_mask_to_img(original_image):
mask = original_image
mask = mask.resize((128,128))
mask = ImageOps.grayscale(mask)
mask = asarray(mask)
mask = predict_frame(mask)
mask = np.array(mask)
mask = mask.astype('uint8')
mask = convert_mask_alpha(mask)
mask = Image.fromarray(mask)
mask = mask.resize((1200, 600))
original_image = original_image.convert('RGBA')
original_image = asarray(original_image)
original_image = original_image.astype('uint8')
mask = asarray(mask).astype('uint8')
print(original_image.shape)
add_imgs = cv2.addWeighted(original_image, 1.0, mask, 0.4, 0.0)
return add_imgs
|
[
"cv2.VideoWriter_fourcc",
"tensorflow.argmax",
"cv2.cvtColor",
"numpy.asarray",
"numpy.expand_dims",
"PIL.ImageOps.grayscale",
"cv2.addWeighted",
"PIL.Image.fromarray",
"cv2.VideoCapture",
"numpy.array",
"numpy.squeeze"
] |
[((474, 498), 'numpy.expand_dims', 'np.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (488, 498), True, 'import numpy as np\n'), ((556, 577), 'numpy.squeeze', 'np.squeeze', (['result', '(0)'], {}), '(result, 0)\n', (566, 577), True, 'import numpy as np\n'), ((589, 610), 'tensorflow.argmax', 'tf.argmax', (['result', '(-1)'], {}), '(result, -1)\n', (598, 610), True, 'import tensorflow as tf\n'), ((723, 743), 'PIL.Image.fromarray', 'Image.fromarray', (['arr'], {}), '(arr)\n', (738, 743), False, 'from PIL import Image, ImageOps\n'), ((787, 802), 'numpy.asarray', 'asarray', (['result'], {}), '(result)\n', (794, 802), False, 'from numpy import asarray\n'), ((939, 965), 'PIL.Image.fromarray', 'Image.fromarray', (['image_arr'], {}), '(image_arr)\n', (954, 965), False, 'from PIL import Image, ImageOps\n'), ((1273, 1287), 'numpy.array', 'np.array', (['imga'], {}), '(imga)\n', (1281, 1287), True, 'import numpy as np\n'), ((1476, 1504), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (1492, 1504), False, 'import cv2\n'), ((2595, 2616), 'numpy.array', 'np.array', (['mask_frames'], {}), '(mask_frames)\n', (2603, 2616), True, 'import numpy as np\n'), ((3241, 3269), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (3257, 3269), False, 'import cv2\n'), ((3997, 4020), 'numpy.array', 'np.array', (['compiled_list'], {}), '(compiled_list)\n', (4005, 4020), True, 'import numpy as np\n'), ((4498, 4522), 'PIL.ImageOps.grayscale', 'ImageOps.grayscale', (['mask'], {}), '(mask)\n', (4516, 4522), False, 'from PIL import Image, ImageOps\n'), ((4532, 4545), 'numpy.asarray', 'asarray', (['mask'], {}), '(mask)\n', (4539, 4545), False, 'from numpy import asarray\n'), ((4584, 4598), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (4592, 4598), True, 'import numpy as np\n'), ((4672, 4693), 'PIL.Image.fromarray', 'Image.fromarray', (['mask'], {}), '(mask)\n', (4687, 4693), False, 'from PIL import Image, ImageOps\n'), ((4797, 4820), 'numpy.asarray', 'asarray', (['original_image'], {}), '(original_image)\n', (4804, 4820), False, 'from numpy import asarray\n'), ((4953, 5005), 'cv2.addWeighted', 'cv2.addWeighted', (['original_image', '(1.0)', 'mask', '(0.4)', '(0.0)'], {}), '(original_image, 1.0, mask, 0.4, 0.0)\n', (4968, 5005), False, 'import cv2\n'), ((1788, 1826), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (1800, 1826), False, 'import cv2\n'), ((1913, 1935), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (1928, 1935), False, 'from PIL import Image, ImageOps\n'), ((2048, 2073), 'PIL.ImageOps.grayscale', 'ImageOps.grayscale', (['image'], {}), '(image)\n', (2066, 2073), False, 'from PIL import Image, ImageOps\n'), ((2090, 2104), 'numpy.asarray', 'asarray', (['image'], {}), '(image)\n', (2097, 2104), False, 'from numpy import asarray\n'), ((2272, 2288), 'numpy.array', 'np.array', (['append'], {}), '(append)\n', (2280, 2288), True, 'import numpy as np\n'), ((3479, 3517), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (3491, 3517), False, 'import cv2\n'), ((3534, 3556), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (3549, 3556), False, 'from PIL import Image, ImageOps\n'), ((3652, 3667), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3660, 3667), True, 'import numpy as np\n'), ((3746, 3789), 'cv2.addWeighted', 'cv2.addWeighted', (['image', '(1.0)', 'mask', '(0.4)', '(0.0)'], {}), '(image, 1.0, mask, 0.4, 0.0)\n', (3761, 3789), False, 'import cv2\n'), ((3869, 3886), 'numpy.asarray', 'asarray', (['add_imgs'], {}), '(add_imgs)\n', (3876, 3886), False, 'from numpy import asarray\n'), ((4213, 4244), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MP4V'"], {}), "(*'MP4V')\n", (4235, 4244), False, 'import cv2\n'), ((4880, 4893), 'numpy.asarray', 'asarray', (['mask'], {}), '(mask)\n', (4887, 4893), False, 'from numpy import asarray\n'), ((3809, 3834), 'PIL.Image.fromarray', 'Image.fromarray', (['add_imgs'], {}), '(add_imgs)\n', (3824, 3834), False, 'from PIL import Image, ImageOps\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 15:55:46 2019
@author: dweckler
"""
import numpy as np, matplotlib.pyplot as plt
from keras import backend as T
import time
import os
from .utilities import flat_avg
from dtmil.configuration.config_dtmil import get_json_config_data
from .prediction_data import Prediction_Data
import math
#%%class def
class Visualizer:
#TODO: Redesign this to work with multiple sources without depending on having all the data at once
def __init__(self, myData, myModel, sample_idx = None, guidelines = True, prediction_data = None, dataset_dir = None, input_json_data = None):
self.myData = myData
self.myModel = myModel
self._current_sample = sample_idx
##FIXME: make this update the visualization parameters every run (grab location of config file from myData?)
if (input_json_data is not None):
json_data = input_json_data
else:
_, json_data, _ = get_json_config_data(dataset_dir)
self.visualization_params = json_data['visualization']
##FIXME: Make this more able to be manually defined
sf = 0.25
self.xvec_scale_factor = sf
self.xvec_timeline=np.arange((self.myData.maxlen-1)*sf,-sf,-sf)
#this is to account for the extra value in the start and end indeces. Will be best practice to fix in the future
self.xvec_temp_time_lookup = np.copy(self.xvec_timeline)
self.xvec_temp_time_lookup = np.append(self.xvec_temp_time_lookup,self.xvec_timeline[-1])
if sample_idx == None:
print(f"sample index is set to None, using default value")
sample_idx = 0
if prediction_data:
self.prediction_data = prediction_data
else:
self.prediction_data = Prediction_Data(myData,myModel,sample_idx)
self.guidelines = guidelines
if (guidelines):
self.get_guidelines()
@classmethod
def frompredictiondata(cls, prediction_data, guidelines = True):
#initialize from preditcion data
return cls(prediction_data.myData, prediction_data.myModel, prediction_data.current_sample, prediction_data = prediction_data)
#%%plot sample timeline function
@property
def current_sample(self):
return self._current_sample
@current_sample.setter
def current_sample(self,value):
self._current_sample = value
self.prediction_data = Prediction_Data(self.myData,self.myModel,value)
def plot_sample_timeline(self, figure_size = None, saveFig = True):
myModel = self.myModel
model_output_directory = myModel.model_output_directory
xtest = myModel.xtest
if (saveFig):
plt.switch_backend('agg')
# function to get an intermediate layer's output (instance probabilities)
inst_layer_output_fn = T.function([myModel.model.layers[0].input],[myModel.model.layers[-2].output])
temp=xtest
L=inst_layer_output_fn([temp])[0]
nex=int(temp.shape[0]/2)
plt.figure(figsize=figure_size)
plt.subplot(2,1,1)
plt.plot(np.transpose(L[:nex,:,0]),'g')
plt.ylim([-0.1,1.1])
#plt.xlabel('Time to adverse event',fontsize=14)
#plt.xlabel('Sample timeline',fontsize=14)
plt.ylabel('Probability of \n adverse event',fontsize=14)
# plt.xticks([0,10,20],['1000 ft \n altitude', '10 mi', '20 mi'],rotation=0)
#plt.gca().invert_xaxis()
plt.subplot(2,1,2)
plt.plot(np.transpose(L[nex:,:,0]),'r')
plt.ylim([-0.1,1.1])
#plt.gca().invert_xaxis()
plt.xlabel('sample timeline',fontsize=14)
#plt.xticks([0,10,20],['1000 ft \n altitude', '10 mi', '20 mi'],rotation=0)
plt.ylabel('Probability of \n adverse event',fontsize=14)
temp=self.myData.xvalid
L=inst_layer_output_fn([temp])[0]
nex=int(temp.shape[0]/2)
np.where(L[nex:,80:,0]>0.5)[0][:10]
if(saveFig):
plt.savefig(os.path.join(model_output_directory,"timeline.png"))
#%%batch visualization function
#FIXME: text sizing
def visualize_sample_parameters(self,figure_size = None, saveFig = False, file_output_dir = "",file_output_type = "pdf",num_columns = 5, subplot_aspect_ratio = (1,1), subplot_size = 3.6):
myData = self.myData
# myModel = self.myModel
if (saveFig):
plt.switch_backend('agg')
#specify the variables to be included in the plot
correlated_states = myData.correlated_states.tolist()
trained_states = myData.parameter_selection.tolist()
parameters_to_plot=correlated_states + trained_states
correlated_indeces = len(correlated_states)
num_plots = len(parameters_to_plot) + 1
num_rows = math.ceil(float(num_plots)/float(num_columns))
if figure_size is None:
width = 4*num_columns
height = num_rows * 3.5
figure_size = (width,height)
fig, axs = plt.subplots(num_rows,num_columns, figsize= figure_size)
axs=axs.ravel()
starting_index = -1-myData.maxlen+1
for pltIdx in np.arange(len(parameters_to_plot)):
selected_parameter = parameters_to_plot[pltIdx]
plot_title = "{}".format(myData.header[selected_parameter])
#add holdout to the title if it's within the correlated indeces
if (pltIdx < correlated_indeces):
plot_title = plot_title + "(H/O)"
self.plot_parameter(selected_parameter,axs[pltIdx],starting_index, plot_title = plot_title)
# plot precursor score in a separate subplot
pltIdx=pltIdx+1
self.plot_precursor_score(axs[pltIdx],'Precursor Score')
fig.tight_layout()
# save figure if needed
if saveFig:
suffix = "_{}".format(self.myData.get_filename(self.current_sample))
file_label, file_dataset_type = self.myData.get_grouping(self.current_sample)
filename = "{}_{}".format(file_label,file_dataset_type)
save_figure(self.myModel,suffix,fig,file_output_dir,filename,file_output_type = 'pdf')
#self.save_figure(fig,file_output_dir)
def special_ranking_visualization(self, states_to_visualize,sorted_ranking_sums,figure_size = (10,10), saveFig = False, file_output_dir = "",file_output_type = "pdf"):
myData = self.myData
fig, axs = plt.subplots(3,3, figsize= figure_size)
axs=axs.ravel()
self.plot_precursor_score(axs[1],'Precursor Score')
for i in range(6):
selected_parameter = states_to_visualize[i]
plot_title = "{} ({})".format(myData.header[selected_parameter],sorted_ranking_sums[i])
#add holdout to the title if it's within the correlated indeces
self.plot_parameter(selected_parameter,axs[i+3],0, plot_title = plot_title)
#TODO: same as below except ordered ranking parameters with a variable number of columns and such
#output with values of ranking
#figure out what the values mean to report to bryan tomorrow
def visualize_top_ranking_parameters(self,ranking_group,feature_num_limit=None,num_columns = 4,displayfig = False):
file_output_dir = "feature_ranking"
myData = self.myData
if (not displayfig):
plt.switch_backend('agg')
#get as many as we can
#score_pair_lists = ranking_group.top_ranking_scores(1)
#response_windows_lists = ranking_group.top_response_windows(1)
response_windows_lists = ranking_group.ordered_response_windows_list
if(feature_num_limit is not None):
if len(response_windows_lists[0])> feature_num_limit:
response_windows_lists = [lst[0:feature_num_limit] for lst in response_windows_lists]
num_windows = len(response_windows_lists)
#print(feature_num_limit,len(response_windows_lists[0]),len(response_windows_lists[1]))
for idx,response_windows in enumerate(response_windows_lists):
parameter_selection = [window.attribute_index for window in response_windows]
# print([window.ranking_score for window in response_windows])
# print([window.most_important_sd_response for window in response_windows])
score_list = [round(window.ranking_score,3) for window in response_windows]
sd_response_list = []
for window in response_windows:
most_important_response = window.most_important_sd_response
if most_important_response is not None:
sd_response_list.append(str(most_important_response))
else:
sd_response_list.append("n/a")
#sd_response_list = [round(window.most_important_sd_response,3) for window in response_windows]
num_plots = len(response_windows) + 1
num_rows = math.ceil(float(num_plots)/float(num_columns))
width = 4*num_columns
height = num_rows * 3.5
figsize = (width,height)
fig, axs = plt.subplots(num_rows,num_columns, figsize= figsize)
axs=axs.ravel()
fig.tight_layout()
xvec_timeline = self.xvec_timeline
plot_idx = 0
axs[plot_idx].plot(xvec_timeline,ranking_group.prediction_data.precursor_score,'r',linewidth=2,label = "Default")
axs[plot_idx].set_title("Precursor Score",fontsize=10)
axs[plot_idx].set_ylim([0,1])
axs[plot_idx].invert_xaxis()
if(self.guidelines):
axs[plot_idx].plot(self.xvec_timeline,self.precursor_score_guideline,'k--')
graph_colors = ['b','g','k','y','c','m','k','w']
color_idx = 0
sd_disturbances = ranking_group.parent.standard_deviation_disturbances
#TODO: condense everything below into one function (rather than writing the same code twice)
parameter_window_indeces = [ranking_group.parameter_list.index(i) for i in parameter_selection]
parameter_windows = [ranking_group.parameter_windows[i] for i in parameter_window_indeces]
#if this process isn't behind an if statement, the algorithm will output blank graphs
#furthermore, it will cause some of the following graphs to come out blank as well
#the cause of this is unknown, but may be useful to investigate in the future
if len(parameter_windows)>0:
#TODO: Figure out why this conditional became necessary and the one above stopped working? (maybe some revisions impacted it?)
if len(parameter_windows[0].start_indeces)>0:
start_index = parameter_windows[0].start_indeces[idx]
end_index = parameter_windows[0].end_indeces[idx]
window_start_idx = self.xvec_temp_time_lookup[start_index]
window_end_idx = self.xvec_temp_time_lookup[end_index]
axs[plot_idx].axvspan(window_start_idx, window_end_idx, alpha=0.1, color='k')
for index,window in enumerate(parameter_windows):
color_idx = 0
plot_idx = index+1
axs[plot_idx].invert_xaxis()
#axs[plot_idx].set(adjustable='box', aspect=1)
axs[plot_idx].plot(xvec_timeline,ranking_group.prediction_data.precursor_score,'r', label = "Default",linewidth=2)
axs[plot_idx].axvspan(window_start_idx, window_end_idx, alpha=0.1, color='k')
for precursor_score in window.modified_precursor_scores:
selected_parameter = parameter_selection[index]
disturbance = sd_disturbances[color_idx]
if disturbance > 0:
label = "+ {} σ response".format(disturbance)
else:
label = "- {} σ response".format(abs(disturbance))
axs[plot_idx].plot(xvec_timeline,precursor_score,graph_colors[color_idx],linewidth=2,label = label)
axs[plot_idx].set_title("{} \n({}, {} σ response)".format(myData.header[selected_parameter],score_list[index],sd_response_list[index]),fontsize=10)
axs[plot_idx].set_ylim([0,1])
if(self.guidelines):
axs[plot_idx].plot(self.xvec_timeline,self.precursor_score_guideline,'k--')
color_idx += 1
if(plot_idx>1):
handles, labels = axs[plot_idx].get_legend_handles_labels()
fig.legend(handles, labels, loc='lower right')
#save the figure
plt.tight_layout()
file_label, file_dataset_type = self.myData.get_grouping(ranking_group.data_ID)
filename = "{}_{}_ranking".format(file_label,file_dataset_type)
suffix = "_{}".format(self.myData.get_filename(ranking_group.data_ID))
if num_windows > 1:
suffix = "{}_precursor_event_{}".format(suffix,idx)
save_figure(self.myModel,suffix,fig,file_output_dir,filename,output_time = False)
else:
#TODO:
print("Precursor score for {} does not cross threshold?".format(self.myData.get_filename(ranking_group.data_ID)))
else:
print("Precursor score for {} does not cross threshold!".format(self.myData.get_filename(ranking_group.data_ID)))
# def visualize_ranking_data(self,ranking_group, output_file = None, parameter_selection = None, num_columns = 7, subplot_aspect_ratio = (1,1), subplot_size = 3.6):
# myData = self.myData
# print("generating ranking data plot")
#
# if parameter_selection is None:
# parameter_selection = myData.parameter_selection.tolist()
#
# #all the paramaeters plus the precursor score in its own plot
# num_plots = len(parameter_selection) + 1
# num_rows = math.ceil(float(num_plots)/float(num_columns))
# dx, dy = subplot_aspect_ratio
# figsize = plt.figaspect(float(dy * num_rows) / float(dx * num_columns)) * subplot_size
#
# fig, axs = plt.subplots(num_rows,num_columns, figsize= figsize)
# #fig, axs = plt.subplots(numRows,numColumns)
# axs=axs.ravel()
# fig.tight_layout()
# #xvec_timeline=np.arange((myData.maxlen-1)*0.25,-0.25,-0.25)
#
# xvec_timeline = self.xvec_timeline
#
# axs[0].plot(xvec_timeline,ranking_group.prediction_data.precursor_score,'r',linewidth=2)
# axs[0].set_title("Normal",fontsize=10)
# axs[0].set_ylim([0,1])
# axs[0].invert_xaxis()
#
# graph_colors = ['b','g','k','y']
# color_idx = 0
#
# parameter_window_indeces = [ranking_group.parameter_list.index(i) for i in parameter_selection]
# parameter_windows = [ranking_group.parameter_windows[i] for i in parameter_window_indeces]
#
# for index,window in enumerate(parameter_windows):
# color_idx = 0
# plot_idx = index+1
# axs[plot_idx].invert_xaxis()
#
# for precursor_score in window.modified_precursor_scores:
# selected_parameter = parameter_selection[index]
#
# axs[plot_idx].plot(xvec_timeline,precursor_score,graph_colors[color_idx],linewidth=2)
# axs[plot_idx].set_title("{} ({})".format(myData.header[selected_parameter],selected_parameter),fontsize=10)
# axs[plot_idx].set_ylim([0,1])
# axs[plot_idx].plot(xvec_timeline,ranking_group.prediction_data.precursor_score,'r',linewidth=1)
# color_idx += 1
#%%save figure
def save_figure(self, fig,file_output_dir,file_output_type = 'pdf'):
save_figure(self.myModel,self.current_sample,fig,file_output_dir,"parameters_graph",file_output_type = 'pdf')
#%%plot precursor score
def plot_precursor_score(self, plot_axis, plot_title = "Precursor Score", start_index = None, end_index = None):
precursor_score = self.prediction_data.precursor_score
plot_axis.plot(self.xvec_timeline[start_index:end_index], precursor_score[start_index:end_index],'r',linewidth=2)
if(self.guidelines):
plot_axis.plot(self.xvec_timeline[start_index:end_index],self.precursor_score_guideline[start_index:end_index],'k--')
plot_axis.invert_xaxis()
plot_axis.set_title(plot_title,fontsize=10)
plot_axis.set_ylim([0,1])
#%%plot indivudual parameter
def plot_parameter(self, selected_parameter, plot_axis,starting_index = 0,end_index = None,plot_title = "", precIdx = None):
##FIXME: Make this more able to be manually defined
xvec_timeline=self.xvec_timeline
#FIXME: Make Prediction Data update states_orig ("visualization_sample")
parameter_values = self.prediction_data.visualization_window[starting_index:end_index,selected_parameter]
# plot time series variable
plot_axis.plot(xvec_timeline[starting_index:end_index],parameter_values,linewidth=2)
##plot the guidelines
# if discrete variable, use discrete nominal data as guideline, else use continuous nominal data
if selected_parameter in self.visualization_params["binary_parameters"]:
plot_axis.plot(xvec_timeline[starting_index:end_index],self.discrete_nominal_guideline[starting_index:end_index,selected_parameter],'k--',linewidth=2)
plot_axis.set_ylim([-0.1,1.1])
else:
plot_axis.plot(xvec_timeline[starting_index:end_index],self.nominal_guideline[0,starting_index:end_index,selected_parameter],'k--',linewidth=2)
plot_axis.plot(xvec_timeline[starting_index:end_index],self.nominal_guideline[1,starting_index:end_index,selected_parameter],'k--',linewidth=2)
##use this if we are dealing with multiple precursor score predictions, otherwise use the one genereated upon class initialization
if (precIdx):
precursor_indeces = precIdx
else:
precursor_indeces = self.prediction_data.precursor_indeces
# plot precursor time instants as an overlay
if len(precursor_indeces)>0:
precursor_overlay_values = self.prediction_data.visualization_window[precursor_indeces,selected_parameter]
self.precursor_overlay_values = precursor_overlay_values
if(end_index):
if end_index >= precursor_indeces[0]:
precursor_end_index = (np.abs(precursor_indeces - (end_index))).argmin()
print(precursor_end_index,end_index)
plot_axis.plot(xvec_timeline[precursor_indeces][0:precursor_end_index],precursor_overlay_values[0:precursor_end_index],'ro', alpha = 0.4)
else:
plot_axis.plot(xvec_timeline[precursor_indeces],precursor_overlay_values,'ro', alpha = 0.4)
#
if plot_title == "":
plot_title = "{} ({})".format(self.myData.header[selected_parameter],selected_parameter)
plot_axis.set_title(plot_title,fontsize=10)
# # invert x-axis so that distance to touchdown reduces as we go towards rightside of the plot
plot_axis.invert_xaxis()
#%%get guidelines
def get_guidelines(self):
myData = self.myData
optimal_values=myData.states_orig[:,np.concatenate((myData.I_opt,myData.I_opt_valid),axis=0)]
#determine guidelines
guideline_type = self.visualization_params["guideline_type"]
if guideline_type == 1:
optimal_standard_dev = np.std(optimal_values, axis=1)
optimal_mean = np.mean(optimal_values,axis = 1)
avg_guideline =flat_avg(optimal_mean)
sdev_guideline = flat_avg(optimal_standard_dev)
sdev_scale = 2.5
upper_guideline = avg_guideline + sdev_scale * sdev_guideline
lower_guideline = avg_guideline - sdev_scale * sdev_guideline
nominal_guideline = np.array([lower_guideline, upper_guideline])
else:
# get nominal percentiles for plotting
nominal_guideline=np.percentile(optimal_values,[10,90],axis=1)
self.nominal_guideline = nominal_guideline
# Get nominal values assuming binary (note that we will only use this if the variable is binary)
self.discrete_nominal_guideline=np.mean(optimal_values,axis=1)
self.precursor_score_guideline = np.full(optimal_values.shape[0],self.prediction_data.precursor_threshold)
def save_figure(myModel, figure_suffix, fig,file_output_dir,filename,file_output_type = 'pdf', output_time = True):
time_start = time.time()
print("Saving figure: {}".format(figure_suffix))
model_output_directory = myModel.model_output_directory
if model_output_directory != "":
model_output_directory = os.path.join(model_output_directory,file_output_dir)
if not os.path.exists(model_output_directory):
print(f"creating directory {model_output_directory}")
os.makedirs(model_output_directory)
filename = "{}{}.{}".format(filename,figure_suffix,"pdf")
filepath = os.path.join(model_output_directory,filename)
#print("Saving figure: {}".format(filepath))
fig.savefig(filepath,format= file_output_type)
# if(output_time):
# print("Total time to save figure: {}".format(time.time()-time_start))
def visualize(myData, myModel,sample_idx = 0, savefig = False):
vis = Visualizer(myData,myModel,sample_idx)
vis.plot_sample_timeline(figure_size = (8,6), saveFig = savefig)
print("Visualizing Sample {}".format(sample_idx))
vis.visualize_sample_parameters(figure_size=(32,24),saveFig = savefig)
|
[
"numpy.abs",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"os.path.join",
"matplotlib.pyplot.tight_layout",
"numpy.full",
"numpy.copy",
"numpy.std",
"numpy.transpose",
"os.path.exists",
"numpy.append",
"matplotlib.pyplot.subplots",
"dtmil.configuration.config_dtmil.get_json_config_data",
"matplotlib.pyplot.ylim",
"keras.backend.function",
"numpy.percentile",
"matplotlib.pyplot.ylabel",
"numpy.concatenate",
"matplotlib.pyplot.switch_backend",
"matplotlib.pyplot.subplot",
"os.makedirs",
"time.time",
"numpy.where",
"numpy.array",
"matplotlib.pyplot.xlabel"
] |
[((22688, 22699), 'time.time', 'time.time', ([], {}), '()\n', (22697, 22699), False, 'import time\n'), ((23198, 23244), 'os.path.join', 'os.path.join', (['model_output_directory', 'filename'], {}), '(model_output_directory, filename)\n', (23210, 23244), False, 'import os\n'), ((1321, 1371), 'numpy.arange', 'np.arange', (['((self.myData.maxlen - 1) * sf)', '(-sf)', '(-sf)'], {}), '((self.myData.maxlen - 1) * sf, -sf, -sf)\n', (1330, 1371), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((1533, 1560), 'numpy.copy', 'np.copy', (['self.xvec_timeline'], {}), '(self.xvec_timeline)\n', (1540, 1560), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((1598, 1659), 'numpy.append', 'np.append', (['self.xvec_temp_time_lookup', 'self.xvec_timeline[-1]'], {}), '(self.xvec_temp_time_lookup, self.xvec_timeline[-1])\n', (1607, 1659), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((3093, 3171), 'keras.backend.function', 'T.function', (['[myModel.model.layers[0].input]', '[myModel.model.layers[-2].output]'], {}), '([myModel.model.layers[0].input], [myModel.model.layers[-2].output])\n', (3103, 3171), True, 'from keras import backend as T\n'), ((3291, 3322), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figure_size'}), '(figsize=figure_size)\n', (3301, 3322), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((3331, 3351), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (3342, 3351), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((3406, 3427), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.1, 1.1]'], {}), '([-0.1, 1.1])\n', (3414, 3427), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((3543, 3604), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability of \n adverse event"""'], {'fontsize': '(14)'}), '("""Probability of \n adverse event""", fontsize=14)\n', (3553, 3604), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((3727, 3747), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (3738, 3747), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((3802, 3823), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.1, 1.1]'], {}), '([-0.1, 1.1])\n', (3810, 3823), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((3865, 3907), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""sample timeline"""'], {'fontsize': '(14)'}), "('sample timeline', fontsize=14)\n", (3875, 3907), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((3999, 4060), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability of \n adverse event"""'], {'fontsize': '(14)'}), '("""Probability of \n adverse event""", fontsize=14)\n', (4009, 4060), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((5326, 5382), 'matplotlib.pyplot.subplots', 'plt.subplots', (['num_rows', 'num_columns'], {'figsize': 'figure_size'}), '(num_rows, num_columns, figsize=figure_size)\n', (5338, 5382), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((6890, 6929), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'figsize': 'figure_size'}), '(3, 3, figsize=figure_size)\n', (6902, 6929), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((22397, 22428), 'numpy.mean', 'np.mean', (['optimal_values'], {'axis': '(1)'}), '(optimal_values, axis=1)\n', (22404, 22428), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((22469, 22543), 'numpy.full', 'np.full', (['optimal_values.shape[0]', 'self.prediction_data.precursor_threshold'], {}), '(optimal_values.shape[0], self.prediction_data.precursor_threshold)\n', (22476, 22543), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((22884, 22937), 'os.path.join', 'os.path.join', (['model_output_directory', 'file_output_dir'], {}), '(model_output_directory, file_output_dir)\n', (22896, 22937), False, 'import os\n'), ((1041, 1074), 'dtmil.configuration.config_dtmil.get_json_config_data', 'get_json_config_data', (['dataset_dir'], {}), '(dataset_dir)\n', (1061, 1074), False, 'from dtmil.configuration.config_dtmil import get_json_config_data\n'), ((2941, 2966), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (2959, 2966), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((3367, 3394), 'numpy.transpose', 'np.transpose', (['L[:nex, :, 0]'], {}), '(L[:nex, :, 0])\n', (3379, 3394), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((3763, 3790), 'numpy.transpose', 'np.transpose', (['L[nex:, :, 0]'], {}), '(L[nex:, :, 0])\n', (3775, 3790), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((4682, 4707), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (4700, 4707), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((7882, 7907), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (7900, 7907), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((9800, 9852), 'matplotlib.pyplot.subplots', 'plt.subplots', (['num_rows', 'num_columns'], {'figsize': 'figsize'}), '(num_rows, num_columns, figsize=figsize)\n', (9812, 9852), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((21556, 21586), 'numpy.std', 'np.std', (['optimal_values'], {'axis': '(1)'}), '(optimal_values, axis=1)\n', (21562, 21586), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((21614, 21645), 'numpy.mean', 'np.mean', (['optimal_values'], {'axis': '(1)'}), '(optimal_values, axis=1)\n', (21621, 21645), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((21996, 22040), 'numpy.array', 'np.array', (['[lower_guideline, upper_guideline]'], {}), '([lower_guideline, upper_guideline])\n', (22004, 22040), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((22143, 22190), 'numpy.percentile', 'np.percentile', (['optimal_values', '[10, 90]'], {'axis': '(1)'}), '(optimal_values, [10, 90], axis=1)\n', (22156, 22190), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((22952, 22990), 'os.path.exists', 'os.path.exists', (['model_output_directory'], {}), '(model_output_directory)\n', (22966, 22990), False, 'import os\n'), ((23070, 23105), 'os.makedirs', 'os.makedirs', (['model_output_directory'], {}), '(model_output_directory)\n', (23081, 23105), False, 'import os\n'), ((4181, 4212), 'numpy.where', 'np.where', (['(L[nex:, 80:, 0] > 0.5)'], {}), '(L[nex:, 80:, 0] > 0.5)\n', (4189, 4212), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((4271, 4323), 'os.path.join', 'os.path.join', (['model_output_directory', '"""timeline.png"""'], {}), "(model_output_directory, 'timeline.png')\n", (4283, 4323), False, 'import os\n'), ((21324, 21382), 'numpy.concatenate', 'np.concatenate', (['(myData.I_opt, myData.I_opt_valid)'], {'axis': '(0)'}), '((myData.I_opt, myData.I_opt_valid), axis=0)\n', (21338, 21382), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((13986, 14004), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14002, 14004), True, 'import numpy as np, matplotlib.pyplot as plt\n'), ((20348, 20385), 'numpy.abs', 'np.abs', (['(precursor_indeces - end_index)'], {}), '(precursor_indeces - end_index)\n', (20354, 20385), True, 'import numpy as np, matplotlib.pyplot as plt\n')]
|
import threading
def DisconnectAfterTimeout(timeout):
def Decorator(function):
def decorated_function(*s, **d):
def disconnect():
disconnectable = s[0]
disconnectable.disconnect()
timer = threading.Timer(timeout, disconnect)
timer.start()
return_value = None
try:
return_value = function(*s, **d)
finally:
timer.cancel()
return return_value
return decorated_function
return Decorator
|
[
"threading.Timer"
] |
[((258, 294), 'threading.Timer', 'threading.Timer', (['timeout', 'disconnect'], {}), '(timeout, disconnect)\n', (273, 294), False, 'import threading\n')]
|
from decimal import Decimal
import unittest, sys
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from unittest.mock import patch
from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv
class Test_Process_Raw_Data(unittest.TestCase):
#Test helper methods
def test_convert_datestring_array_to_datetime(self):
datestrings = ['2020-01-01 00:00:00', '2020-01-02 00:00:00', '2020-01-01 03:00:00']
expected_datetimes = [datetime.strptime('2020-01-01 00:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2020-01-02 00:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2020-01-01 03:00:00', '%Y-%m-%d %H:%M:%S')]
self.assertEqual(expected_datetimes, convert_datestring_array_to_datetime(datestrings))
def test_create_expected_row(self):
input_row = [5,4,3,2,1]
expected_row = np.array([[1,2,3,4,1,2]])
actual_row = create_expected_row(input_row, [1,2])
self.assertTrue(np.array_equal(expected_row, actual_row))
#Test process_raw_data methods
def test_set_intervals(self):
intervals = [5, 5, 5]
set_intervals(intervals)
self.assertEqual(intervals, get_intervals())
def test_set_target_interval(self):
interval = timedelta(minutes=69)
set_target_interval(interval)
self.assertEqual(interval, get_target_interval())
def test_set_const_intervals(self):
expected_intervals = [3, 3, 3, 3, 3]
set_const_intervals(3, 5)
self.assertEqual(expected_intervals, get_intervals())
def test_set_max_input_minutes_missing(self):
minutes = 69
set_max_input_minutes_missing(minutes)
self.assertEqual(minutes, get_max_input_minutes_missing())
def test_set_market(self):
market = 'GBP/JPY'
set_market(market)
self.assertEqual(market, get_market())
def test_categorise_data(self):
self.assertEqual(1, apply_category_label_binary(1.2222, 1.2223))
self.assertEqual(0, apply_category_label_binary(1.2223, 1.2222))
@patch('forex_predictor.data_extraction.process_raw_data.pd')
def test_load_market_csv(self, mock_pd):
load_market_csv('EUR/GBP')
mock_pd.read_csv.assert_called_with('data/EUR_GBP.csv')
def test_get_dates(self):
intervals = [5, 5, 5]
set_intervals(intervals)
training_start = datetime.strptime('2020-01-01 00:00:00', '%Y-%m-%d %H:%M:%S')
validation_start = datetime.strptime('2020-01-01 01:00:00', '%Y-%m-%d %H:%M:%S')
test_start = datetime.strptime('2020-01-01 02:00:00', '%Y-%m-%d %H:%M:%S')
test_end = datetime.strptime('2020-01-01 03:00:00', '%Y-%m-%d %H:%M:%S')
actual_training_dates, actual_validation_dates, actual_test_dates = get_dates(training_start, validation_start, test_start, test_end)
expected_training_dates = convert_datestring_array_to_datetime(['2020-01-01 00:00:00', '2020-01-01 00:15:00', '2020-01-01 00:30:00', '2020-01-01 00:45:00'])
expected_validation_dates = convert_datestring_array_to_datetime(['2020-01-01 01:00:00', '2020-01-01 01:15:00', '2020-01-01 01:30:00', '2020-01-01 01:45:00'])
expected_test_dates = convert_datestring_array_to_datetime(['2020-01-01 02:00:00', '2020-01-01 02:15:00', '2020-01-01 02:30:00', '2020-01-01 02:45:00'])
self.assertEqual(expected_training_dates, actual_training_dates)
self.assertEqual(expected_validation_dates, actual_validation_dates)
self.assertEqual(expected_test_dates, actual_test_dates)
@patch('forex_predictor.data_extraction.process_raw_data.get_dataframe_from_dates')
def test_get_relevant_data(self, mock_method):
set_intervals([15,15,15,15])
set_target_interval(timedelta(minutes=60))
df = pd.read_csv('tests/resources/dataframe_data.csv')
target_date = datetime.strptime('2014-07-17 00:00:00', '%Y-%m-%d %H:%M:%S')
get_relevant_data(df, target_date)
start_date = datetime.strptime('2014-07-16 23:00:00', '%Y-%m-%d %H:%M:%S')
end_date = datetime.strptime('2014-07-17 01:00:00', '%Y-%m-%d %H:%M:%S')
mock_method.assert_called_with(start_date, end_date, df)
def test_get_dataframe_from_dates(self):
original_df = pd.read_csv('tests/resources/dataframe_data.csv')
start_date = datetime.strptime('2014-07-17 00:00:00', '%Y-%m-%d %H:%M:%S')
end_date = datetime.strptime('2014-07-17 00:05:00', '%Y-%m-%d %H:%M:%S')
actual_df = get_dataframe_from_dates(start_date, end_date, original_df)
expected_df = original_df.iloc[74:79, :]
self.assertTrue(expected_df.equals(actual_df))
def test_find_start_date_index(self):
target_date = datetime.strptime('2014-07-18 08:46:00', '%Y-%m-%d %H:%M:%S')
df = pd.read_csv('tests/resources/dataframe_data.csv')
actual_index = find_start_date_index(df, target_date)
expected_index = 1994
self.assertEqual(expected_index, actual_index)
def test_process_input_data(self):
set_intervals([5, 5, 5])
df = pd.read_csv('tests/resources/dataframe_data.csv').iloc[1998:2013, :]
test_data = {
'datetime': ['2014-07-18 08:49:00', '2014-07-18 08:54:00', '2014-07-18 08:59:00'],
'open': [0.79227, 0.79223, 0.79315],
'high': [0.79231, 0.79312, 0.79325],
'low': [0.79216, 0.79219, 0.79279],
'close': [0.79222, 0.79312, 0.79284]
}
expected_input_data = pd.DataFrame(data=test_data)
actual_input_data = process_input_data(df)
self.assertTrue(expected_input_data.equals(actual_input_data))
def test_process_input_data_error(self):
set_intervals([5, 5, 5, 60])
df = pd.read_csv('tests/resources/dataframe_data.csv').iloc[1998:2013, :]
expected_error_message = 'Insufficient data to process for this number of intervals'
try:
actual_input_data = process_input_data(df)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertEqual(expected_error_message, str(exc_value))
def test_create_row(self):
set_intervals([5,5,5])
test_data = {
'datetime': ['2014-07-18 08:49:00', '2014-07-18 08:54:00', '2014-07-18 08:59:00'],
'open': [0.79227, 0.79223, 0.79315],
'high': [0.79231, 0.79312, 0.79325],
'low': [0.79216, 0.79219, 0.79279],
'close': [0.79222, 0.79312, 0.79284]
}
input_values = pd.DataFrame(data=test_data)
expected_row = create_expected_row([0.79227, 0.79231, 0.79216, 0.79222, 0.79223, 0.79312, 0.79219, 0.79312, 0.79315, 0.79325, 0.79279, 0.79284], [1, 2])
actual_row = create_row(input_values, [1,2])
self.assertTrue(np.array_equal(expected_row, actual_row))
def test_create_relevant_data_row(self):
set_intervals([5,5,5])
set_target_interval(timedelta(minutes=5))
df = pd.read_csv('tests/resources/dataframe_data.csv').iloc[1998:2018, :]
expected_row = create_expected_row([0.79227, 0.79231, 0.79216, 0.79222, 0.79223, 0.79312, 0.79219, 0.79312, 0.79315, 0.79325, 0.79279, 0.79284], [0.79283, 0.79258])
actual_row = create_relevant_data_row(df, datetime.strptime('2014-07-18 09:04:00', '%Y-%m-%d %H:%M:%S'))
self.assertTrue(np.array_equal(expected_row, actual_row))
def test_get_open_and_close_for_period(self):
set_target_interval(timedelta(minutes=60))
df = pd.read_csv('tests/resources/dataframe_data.csv')
start_date = datetime.strptime('2014-07-21 18:00:00', '%Y-%m-%d %H:%M:%S')
open, close = get_open_and_close_for_period(df, start_date)
self.assertEqual(0.79194, open)
self.assertEqual(0.79193, close)
def test_get_open_and_close_for_period_error(self):
set_target_interval(timedelta(minutes=60))
df = pd.read_csv('tests/resources/dataframe_data.csv')
start_date = datetime.strptime('2014-07-21 19:00:00', '%Y-%m-%d %H:%M:%S')
expected_error_message = 'Open-close data unavailable for 2014-07-21 19:00:00 and interval of 60 minutes'
try:
open, close = get_open_and_close_for_period(df, start_date)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertEqual(expected_error_message, str(exc_value))
def convert_datestring_array_to_datetime(datestrings):
"""For readability when working with large amounts of datetimes
"""
datetimes = []
for datestring in datestrings:
datetimes.append(datetime.strptime(datestring, '%Y-%m-%d %H:%M:%S'))
return datetimes
def create_expected_row(input_row, outputs):
"""Create a row similar to how it is done in process_raw_data.py but with the advantage that this takes inputs as a python list
making it much easier to test. Can then use it in more integrated test with expected dataframe values
"""
values = np.array([input_row])
start_value = values[0][0]
values = values[:, 1:]
for i in range(0, len(values[0])):
values[0][i] = Decimal(str(start_value)) - Decimal(str(values[0][i]))
return np.hstack((values, [outputs]))
|
[
"forex_predictor.data_extraction.process_raw_data.apply_category_label_binary",
"pandas.read_csv",
"forex_predictor.data_extraction.process_raw_data.get_dataframe_from_dates",
"forex_predictor.data_extraction.process_raw_data.set_const_intervals",
"sys.exc_info",
"forex_predictor.data_extraction.process_raw_data.set_market",
"pandas.DataFrame",
"forex_predictor.data_extraction.process_raw_data.get_relevant_data",
"forex_predictor.data_extraction.process_raw_data.set_max_input_minutes_missing",
"datetime.timedelta",
"forex_predictor.data_extraction.process_raw_data.get_target_interval",
"forex_predictor.data_extraction.process_raw_data.get_open_and_close_for_period",
"numpy.hstack",
"unittest.mock.patch",
"forex_predictor.data_extraction.process_raw_data.get_intervals",
"datetime.datetime.strptime",
"forex_predictor.data_extraction.process_raw_data.set_intervals",
"forex_predictor.data_extraction.process_raw_data.create_row",
"forex_predictor.data_extraction.process_raw_data.get_dates",
"forex_predictor.data_extraction.process_raw_data.get_max_input_minutes_missing",
"forex_predictor.data_extraction.process_raw_data.process_input_data",
"forex_predictor.data_extraction.process_raw_data.load_market_csv",
"numpy.array",
"forex_predictor.data_extraction.process_raw_data.set_target_interval",
"numpy.array_equal",
"forex_predictor.data_extraction.process_raw_data.find_start_date_index",
"forex_predictor.data_extraction.process_raw_data.get_market"
] |
[((2459, 2519), 'unittest.mock.patch', 'patch', (['"""forex_predictor.data_extraction.process_raw_data.pd"""'], {}), "('forex_predictor.data_extraction.process_raw_data.pd')\n", (2464, 2519), False, 'from unittest.mock import patch\n'), ((3954, 4046), 'unittest.mock.patch', 'patch', (['"""forex_predictor.data_extraction.process_raw_data.get_dataframe_from_dates"""'], {}), "(\n 'forex_predictor.data_extraction.process_raw_data.get_dataframe_from_dates'\n )\n", (3959, 4046), False, 'from unittest.mock import patch\n'), ((9419, 9440), 'numpy.array', 'np.array', (['[input_row]'], {}), '([input_row])\n', (9427, 9440), True, 'import numpy as np\n'), ((9627, 9657), 'numpy.hstack', 'np.hstack', (['(values, [outputs])'], {}), '((values, [outputs]))\n', (9636, 9657), True, 'import numpy as np\n'), ((1250, 1280), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 1, 2]]'], {}), '([[1, 2, 3, 4, 1, 2]])\n', (1258, 1280), True, 'import numpy as np\n'), ((1509, 1533), 'forex_predictor.data_extraction.process_raw_data.set_intervals', 'set_intervals', (['intervals'], {}), '(intervals)\n', (1522, 1533), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((1651, 1672), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(69)'}), '(minutes=69)\n', (1660, 1672), False, 'from datetime import datetime, timedelta\n'), ((1681, 1710), 'forex_predictor.data_extraction.process_raw_data.set_target_interval', 'set_target_interval', (['interval'], {}), '(interval)\n', (1700, 1710), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((1863, 1888), 'forex_predictor.data_extraction.process_raw_data.set_const_intervals', 'set_const_intervals', (['(3)', '(5)'], {}), '(3, 5)\n', (1882, 1888), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((2031, 2069), 'forex_predictor.data_extraction.process_raw_data.set_max_input_minutes_missing', 'set_max_input_minutes_missing', (['minutes'], {}), '(minutes)\n', (2060, 2069), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((2204, 2222), 'forex_predictor.data_extraction.process_raw_data.set_market', 'set_market', (['market'], {}), '(market)\n', (2214, 2222), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((2573, 2599), 'forex_predictor.data_extraction.process_raw_data.load_market_csv', 'load_market_csv', (['"""EUR/GBP"""'], {}), "('EUR/GBP')\n", (2588, 2599), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((2733, 2757), 'forex_predictor.data_extraction.process_raw_data.set_intervals', 'set_intervals', (['intervals'], {}), '(intervals)\n', (2746, 2757), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((2783, 2844), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2020-01-01 00:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2020-01-01 00:00:00', '%Y-%m-%d %H:%M:%S')\n", (2800, 2844), False, 'from datetime import datetime, timedelta\n'), ((2872, 2933), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2020-01-01 01:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2020-01-01 01:00:00', '%Y-%m-%d %H:%M:%S')\n", (2889, 2933), False, 'from datetime import datetime, timedelta\n'), ((2955, 3016), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2020-01-01 02:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2020-01-01 02:00:00', '%Y-%m-%d %H:%M:%S')\n", (2972, 3016), False, 'from datetime import datetime, timedelta\n'), ((3036, 3097), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2020-01-01 03:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2020-01-01 03:00:00', '%Y-%m-%d %H:%M:%S')\n", (3053, 3097), False, 'from datetime import datetime, timedelta\n'), ((3174, 3239), 'forex_predictor.data_extraction.process_raw_data.get_dates', 'get_dates', (['training_start', 'validation_start', 'test_start', 'test_end'], {}), '(training_start, validation_start, test_start, test_end)\n', (3183, 3239), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((4096, 4127), 'forex_predictor.data_extraction.process_raw_data.set_intervals', 'set_intervals', (['[15, 15, 15, 15]'], {}), '([15, 15, 15, 15])\n', (4109, 4127), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((4189, 4238), 'pandas.read_csv', 'pd.read_csv', (['"""tests/resources/dataframe_data.csv"""'], {}), "('tests/resources/dataframe_data.csv')\n", (4200, 4238), True, 'import pandas as pd\n'), ((4261, 4322), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2014-07-17 00:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2014-07-17 00:00:00', '%Y-%m-%d %H:%M:%S')\n", (4278, 4322), False, 'from datetime import datetime, timedelta\n'), ((4331, 4365), 'forex_predictor.data_extraction.process_raw_data.get_relevant_data', 'get_relevant_data', (['df', 'target_date'], {}), '(df, target_date)\n', (4348, 4365), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((4387, 4448), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2014-07-16 23:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2014-07-16 23:00:00', '%Y-%m-%d %H:%M:%S')\n", (4404, 4448), False, 'from datetime import datetime, timedelta\n'), ((4468, 4529), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2014-07-17 01:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2014-07-17 01:00:00', '%Y-%m-%d %H:%M:%S')\n", (4485, 4529), False, 'from datetime import datetime, timedelta\n'), ((4663, 4712), 'pandas.read_csv', 'pd.read_csv', (['"""tests/resources/dataframe_data.csv"""'], {}), "('tests/resources/dataframe_data.csv')\n", (4674, 4712), True, 'import pandas as pd\n'), ((4734, 4795), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2014-07-17 00:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2014-07-17 00:00:00', '%Y-%m-%d %H:%M:%S')\n", (4751, 4795), False, 'from datetime import datetime, timedelta\n'), ((4815, 4876), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2014-07-17 00:05:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2014-07-17 00:05:00', '%Y-%m-%d %H:%M:%S')\n", (4832, 4876), False, 'from datetime import datetime, timedelta\n'), ((4897, 4956), 'forex_predictor.data_extraction.process_raw_data.get_dataframe_from_dates', 'get_dataframe_from_dates', (['start_date', 'end_date', 'original_df'], {}), '(start_date, end_date, original_df)\n', (4921, 4956), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((5126, 5187), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2014-07-18 08:46:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2014-07-18 08:46:00', '%Y-%m-%d %H:%M:%S')\n", (5143, 5187), False, 'from datetime import datetime, timedelta\n'), ((5201, 5250), 'pandas.read_csv', 'pd.read_csv', (['"""tests/resources/dataframe_data.csv"""'], {}), "('tests/resources/dataframe_data.csv')\n", (5212, 5250), True, 'import pandas as pd\n'), ((5275, 5313), 'forex_predictor.data_extraction.process_raw_data.find_start_date_index', 'find_start_date_index', (['df', 'target_date'], {}), '(df, target_date)\n', (5296, 5313), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((5447, 5471), 'forex_predictor.data_extraction.process_raw_data.set_intervals', 'set_intervals', (['[5, 5, 5]'], {}), '([5, 5, 5])\n', (5460, 5471), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((5906, 5934), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'test_data'}), '(data=test_data)\n', (5918, 5934), True, 'import pandas as pd\n'), ((5963, 5985), 'forex_predictor.data_extraction.process_raw_data.process_input_data', 'process_input_data', (['df'], {}), '(df)\n', (5981, 5985), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((6111, 6139), 'forex_predictor.data_extraction.process_raw_data.set_intervals', 'set_intervals', (['[5, 5, 5, 60]'], {}), '([5, 5, 5, 60])\n', (6124, 6139), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((6570, 6594), 'forex_predictor.data_extraction.process_raw_data.set_intervals', 'set_intervals', (['[5, 5, 5]'], {}), '([5, 5, 5])\n', (6583, 6594), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((6938, 6966), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'test_data'}), '(data=test_data)\n', (6950, 6966), True, 'import pandas as pd\n'), ((7149, 7181), 'forex_predictor.data_extraction.process_raw_data.create_row', 'create_row', (['input_values', '[1, 2]'], {}), '(input_values, [1, 2])\n', (7159, 7181), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((7301, 7325), 'forex_predictor.data_extraction.process_raw_data.set_intervals', 'set_intervals', (['[5, 5, 5]'], {}), '([5, 5, 5])\n', (7314, 7325), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((7923, 7972), 'pandas.read_csv', 'pd.read_csv', (['"""tests/resources/dataframe_data.csv"""'], {}), "('tests/resources/dataframe_data.csv')\n", (7934, 7972), True, 'import pandas as pd\n'), ((7995, 8056), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2014-07-21 18:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2014-07-21 18:00:00', '%Y-%m-%d %H:%M:%S')\n", (8012, 8056), False, 'from datetime import datetime, timedelta\n'), ((8079, 8124), 'forex_predictor.data_extraction.process_raw_data.get_open_and_close_for_period', 'get_open_and_close_for_period', (['df', 'start_date'], {}), '(df, start_date)\n', (8108, 8124), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((8327, 8376), 'pandas.read_csv', 'pd.read_csv', (['"""tests/resources/dataframe_data.csv"""'], {}), "('tests/resources/dataframe_data.csv')\n", (8338, 8376), True, 'import pandas as pd\n'), ((8399, 8460), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2014-07-21 19:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2014-07-21 19:00:00', '%Y-%m-%d %H:%M:%S')\n", (8416, 8460), False, 'from datetime import datetime, timedelta\n'), ((869, 930), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2020-01-01 00:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2020-01-01 00:00:00', '%Y-%m-%d %H:%M:%S')\n", (886, 930), False, 'from datetime import datetime, timedelta\n'), ((932, 993), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2020-01-02 00:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2020-01-02 00:00:00', '%Y-%m-%d %H:%M:%S')\n", (949, 993), False, 'from datetime import datetime, timedelta\n'), ((995, 1056), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2020-01-01 03:00:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2020-01-01 03:00:00', '%Y-%m-%d %H:%M:%S')\n", (1012, 1056), False, 'from datetime import datetime, timedelta\n'), ((1359, 1399), 'numpy.array_equal', 'np.array_equal', (['expected_row', 'actual_row'], {}), '(expected_row, actual_row)\n', (1373, 1399), True, 'import numpy as np\n'), ((1570, 1585), 'forex_predictor.data_extraction.process_raw_data.get_intervals', 'get_intervals', ([], {}), '()\n', (1583, 1585), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((1746, 1767), 'forex_predictor.data_extraction.process_raw_data.get_target_interval', 'get_target_interval', ([], {}), '()\n', (1765, 1767), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((1934, 1949), 'forex_predictor.data_extraction.process_raw_data.get_intervals', 'get_intervals', ([], {}), '()\n', (1947, 1949), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((2104, 2135), 'forex_predictor.data_extraction.process_raw_data.get_max_input_minutes_missing', 'get_max_input_minutes_missing', ([], {}), '()\n', (2133, 2135), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((2256, 2268), 'forex_predictor.data_extraction.process_raw_data.get_market', 'get_market', ([], {}), '()\n', (2266, 2268), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((2335, 2378), 'forex_predictor.data_extraction.process_raw_data.apply_category_label_binary', 'apply_category_label_binary', (['(1.2222)', '(1.2223)'], {}), '(1.2222, 1.2223)\n', (2362, 2378), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((2408, 2451), 'forex_predictor.data_extraction.process_raw_data.apply_category_label_binary', 'apply_category_label_binary', (['(1.2223)', '(1.2222)'], {}), '(1.2223, 1.2222)\n', (2435, 2451), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((4153, 4174), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(60)'}), '(minutes=60)\n', (4162, 4174), False, 'from datetime import datetime, timedelta\n'), ((6360, 6382), 'forex_predictor.data_extraction.process_raw_data.process_input_data', 'process_input_data', (['df'], {}), '(df)\n', (6378, 6382), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((7205, 7245), 'numpy.array_equal', 'np.array_equal', (['expected_row', 'actual_row'], {}), '(expected_row, actual_row)\n', (7219, 7245), True, 'import numpy as np\n'), ((7352, 7372), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(5)'}), '(minutes=5)\n', (7361, 7372), False, 'from datetime import datetime, timedelta\n'), ((7679, 7740), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2014-07-18 09:04:00"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2014-07-18 09:04:00', '%Y-%m-%d %H:%M:%S')\n", (7696, 7740), False, 'from datetime import datetime, timedelta\n'), ((7766, 7806), 'numpy.array_equal', 'np.array_equal', (['expected_row', 'actual_row'], {}), '(expected_row, actual_row)\n', (7780, 7806), True, 'import numpy as np\n'), ((7887, 7908), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(60)'}), '(minutes=60)\n', (7896, 7908), False, 'from datetime import datetime, timedelta\n'), ((8291, 8312), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(60)'}), '(minutes=60)\n', (8300, 8312), False, 'from datetime import datetime, timedelta\n'), ((8614, 8659), 'forex_predictor.data_extraction.process_raw_data.get_open_and_close_for_period', 'get_open_and_close_for_period', (['df', 'start_date'], {}), '(df, start_date)\n', (8643, 8659), False, 'from forex_predictor.data_extraction.process_raw_data import create_relevant_data_row, create_row, find_start_date_index, get_dataframe_from_dates, get_dates, get_market, get_max_input_minutes_missing, get_open_and_close_for_period, get_relevant_data, process_input_data, set_intervals, set_const_intervals, set_market, set_max_input_minutes_missing, set_target_interval, get_intervals, get_target_interval, apply_category_label_binary, load_market_csv\n'), ((9036, 9086), 'datetime.datetime.strptime', 'datetime.strptime', (['datestring', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(datestring, '%Y-%m-%d %H:%M:%S')\n", (9053, 9086), False, 'from datetime import datetime, timedelta\n'), ((5485, 5534), 'pandas.read_csv', 'pd.read_csv', (['"""tests/resources/dataframe_data.csv"""'], {}), "('tests/resources/dataframe_data.csv')\n", (5496, 5534), True, 'import pandas as pd\n'), ((6153, 6202), 'pandas.read_csv', 'pd.read_csv', (['"""tests/resources/dataframe_data.csv"""'], {}), "('tests/resources/dataframe_data.csv')\n", (6164, 6202), True, 'import pandas as pd\n'), ((6448, 6462), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (6460, 6462), False, 'import unittest, sys\n'), ((7387, 7436), 'pandas.read_csv', 'pd.read_csv', (['"""tests/resources/dataframe_data.csv"""'], {}), "('tests/resources/dataframe_data.csv')\n", (7398, 7436), True, 'import pandas as pd\n'), ((8725, 8739), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (8737, 8739), False, 'import unittest, sys\n')]
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import os
import unittest
import logging
from base_agent.nsp_dialogue_manager import NSPDialogueManager
from base_agent.loco_mc_agent import LocoMCAgent
from base_agent.test.all_test_commands import *
from fake_agent import MockOpt
class AttributeDict(dict):
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
class FakeAgent(LocoMCAgent):
def __init__(self, opts):
super(FakeAgent, self).__init__(opts)
self.opts = opts
def init_memory(self):
self.memory = "memory"
def init_physical_interfaces(self):
pass
def init_perception(self):
pass
def init_controller(self):
dialogue_object_classes = {}
self.dialogue_manager = NSPDialogueManager(self, dialogue_object_classes, self.opts)
# NOTE: The following commands in locobot_commands can't be supported
# right away but we'll attempt them in the next round:
# "push the chair",
# "find the closest red thing",
# "copy this motion",
# "topple the pile of notebooks",
locobot_commands = list(GROUND_TRUTH_PARSES) + [
"push the chair",
"find the closest red thing",
"copy this motion",
"topple the pile of notebooks",
]
TTAD_MODEL_DIR = os.path.join(os.path.dirname(__file__), "../agent/models/semantic_parser/")
TTAD_BERT_DATA_DIR = os.path.join(os.path.dirname(__file__), "../agent/datasets/annotated_data/")
GROUND_TRUTH_DATA_DIR = os.path.join(os.path.dirname(__file__), "../agent/datasets/ground_truth/")
class TestDialogueManager(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestDialogueManager, self).__init__(*args, **kwargs)
opts = MockOpt()
opts.nsp_data_dir = TTAD_BERT_DATA_DIR
opts.ground_truth_data_dir = GROUND_TRUTH_DATA_DIR
opts.nsp_models_dir = TTAD_MODEL_DIR
opts.no_ground_truth = False
self.agent = FakeAgent(opts)
def test_parses(self):
logging.info(
"Printing semantic parsing for {} locobot commands".format(len(locobot_commands))
)
for command in locobot_commands:
ground_truth_parse = GROUND_TRUTH_PARSES.get(command, None)
model_prediction = self.agent.dialogue_manager.get_logical_form(
command, self.agent.dialogue_manager.model
)
logging.info(
"\nCommand -> '{}' \nGround truth -> {} \nParse -> {}\n".format(
command, ground_truth_parse, model_prediction
)
)
def test_validate_bad_json(self):
is_valid_json = self.agent.dialogue_manager.model.validate_parse_tree({})
self.assertFalse(is_valid_json)
def test_validate_array_span_json(self):
action_dict = {'dialogue_type': 'HUMAN_GIVE_COMMAND', 'action_sequence': [{'action_type': 'BUILD', 'schematic': {'text_span': [0, [5, 5]], 'triples': [{'pred_text': 'has_name', 'obj_text': [0, [5, 5]]}]}}]}
is_valid_json = self.agent.dialogue_manager.model.validate_parse_tree(action_dict)
self.assertTrue(is_valid_json)
def test_validate_string_span_json(self):
action_dict = {'dialogue_type': 'HUMAN_GIVE_COMMAND', 'action_sequence': [{'action_type': 'DANCE', 'dance_type': {'look_turn': {'location': {'reference_object': {'filters': {'triples': [{'pred_text': 'has_name', 'obj_text': 'cube'}]}}}}}}]}
is_valid_json = self.agent.dialogue_manager.model.validate_parse_tree(action_dict)
self.assertTrue(is_valid_json)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"os.path.dirname",
"fake_agent.MockOpt",
"base_agent.nsp_dialogue_manager.NSPDialogueManager"
] |
[((1275, 1300), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1290, 1300), False, 'import os\n'), ((1372, 1397), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1387, 1397), False, 'import os\n'), ((1473, 1498), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1488, 1498), False, 'import os\n'), ((3577, 3592), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3590, 3592), False, 'import unittest\n'), ((781, 841), 'base_agent.nsp_dialogue_manager.NSPDialogueManager', 'NSPDialogueManager', (['self', 'dialogue_object_classes', 'self.opts'], {}), '(self, dialogue_object_classes, self.opts)\n', (799, 841), False, 'from base_agent.nsp_dialogue_manager import NSPDialogueManager\n'), ((1706, 1715), 'fake_agent.MockOpt', 'MockOpt', ([], {}), '()\n', (1713, 1715), False, 'from fake_agent import MockOpt\n')]
|
# -*- coding: utf-8 -*-
import uuid
import hashlib
from datetime import datetime
from flask import current_app, request
from flask.ext.login import UserMixin, AnonymousUserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from server.exceptions import ValidationError
from . import db, login_manager
class User(db.Model):
__tablename__ = 'tb_main_users'
id = db.Column(db.String(64), primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
password_hash = db.Column(db.String(128))
name = db.Column(db.UnicodeText(64))
status = db.Column(db.String(64), default='normal')
last_seen = db.Column(db.DateTime())
created_timestamp = db.Column(db.DateTime(), default=db.func.now())
updated_timestamp = db.Column(db.DateTime(), default=db.func.now(), onupdate=db.func.now())
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
@property
def is_active(self):
return self.status == 'normal'
@property
def is_authenticated(self):
return self.is_active
@property
def is_anonymous(self):
return False
def get_id(self):
try:
return unicode(self.id)
except AttributeError:
raise NotImplementedError("No `id` attribute - override get_id")
@property
def password(self):
raise AttributeError('Can not get password')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def is_admin(self):
return self.username == current_app.config['ADMIN_USERNAME']
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
db.session.commit()
def can(self, action):
if self.is_admin() and action in current_app.config['ADMIN_DEFAULT_ACL_ACTIONS']:
return True
if UserAcl.query.filter_by(user_id=self.id, action=action).first():
return True
return False
def can_any(self, *actions):
for action in actions:
if self.can(action):
return True
else:
return False
def can_all(self, *actions):
for action in actions:
if not self.can(action):
return False
else:
return True
@staticmethod
def new(**kwargs):
kwargs['id'] = uuid.uuid4().hex
return User(**kwargs)
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'],
expires_in=expiration)
return s.dumps({'id': self.id}).decode('ascii')
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def __repr__(self):
return '<User %r>' % self.username
class AnonymousUser(AnonymousUserMixin):
def is_admin(self):
return False
def can(self, *args, **kwargs):
return False
can_any = can
can_all = can
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
class UserAcl(db.Model):
__tablename__ = 'tb_main_user_acl'
id = db.Column(db.String(64), primary_key=True)
user_id = db.Column(db.String(64))
action = db.Column(db.String(128))
created_timestamp = db.Column(db.DateTime(), default=db.func.now())
updated_timestamp = db.Column(db.DateTime(), default=db.func.now(), onupdate=db.func.now())
def __init__(self, **kwargs):
super(UserAcl, self).__init__(**kwargs)
@staticmethod
def new(**kwargs):
kwargs['id'] = uuid.uuid4().hex
return UserAcl(**kwargs)
def __repr__(self):
return '<UserAcl %r, %r>' % (self.user_id, self.action)
class OperationRecord(db.Model):
__tablename__ = 'tb_main_operation_records'
id = db.Column(db.String(64), primary_key=True)
user_id = db.Column(db.String(64))
operation_note = db.Column(db.Text())
created_timestamp = db.Column(db.DateTime(), default=db.func.now())
updated_timestamp = db.Column(db.DateTime(), default=db.func.now(), onupdate=db.func.now())
def __init__(self, **kwargs):
super(OperationRecord, self).__init__(**kwargs)
@staticmethod
def new(**kwargs):
kwargs['id'] = uuid.uuid4().hex
return OperationRecord(**kwargs)
def __repr__(self):
return '<OperationRecord %r>' % self.user_id
|
[
"uuid.uuid4",
"datetime.datetime.utcnow",
"werkzeug.security.check_password_hash",
"itsdangerous.TimedJSONWebSignatureSerializer",
"werkzeug.security.generate_password_hash"
] |
[((1649, 1681), 'werkzeug.security.generate_password_hash', 'generate_password_hash', (['password'], {}), '(password)\n', (1671, 1681), False, 'from werkzeug.security import generate_password_hash, check_password_hash\n'), ((1739, 1788), 'werkzeug.security.check_password_hash', 'check_password_hash', (['self.password_hash', 'password'], {}), '(self.password_hash, password)\n', (1758, 1788), False, 'from werkzeug.security import generate_password_hash, check_password_hash\n'), ((1929, 1946), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1944, 1946), False, 'from datetime import datetime\n'), ((2776, 2843), 'itsdangerous.TimedJSONWebSignatureSerializer', 'Serializer', (["current_app.config['SECRET_KEY']"], {'expires_in': 'expiration'}), "(current_app.config['SECRET_KEY'], expires_in=expiration)\n", (2786, 2843), True, 'from itsdangerous import TimedJSONWebSignatureSerializer as Serializer\n'), ((2988, 3032), 'itsdangerous.TimedJSONWebSignatureSerializer', 'Serializer', (["current_app.config['SECRET_KEY']"], {}), "(current_app.config['SECRET_KEY'])\n", (2998, 3032), True, 'from itsdangerous import TimedJSONWebSignatureSerializer as Serializer\n'), ((2669, 2681), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2679, 2681), False, 'import uuid\n'), ((4092, 4104), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4102, 4104), False, 'import uuid\n'), ((4798, 4810), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4808, 4810), False, 'import uuid\n')]
|
# data predicting
import pickle
import numpy as np
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
from MLEK.main.optimizer import Minimizer
from MLEK.tools.plot_tools import plot_prediction
with open('/Users/hongbinren/Documents/program/MLEK/example_demo/demo_best_estimator', 'rb') as f:
estimator = pickle.load(f)
with open('/Users/hongbinren/Documents/program/MLEK/example_demo/demo_test_data', 'rb') as f1:
test_data = pickle.load(f1)
Ek_test, densx_test, dEkx_test = test_data[:, 0], test_data[:, 1:503], test_data[:, 503:]
## estimate the kinetic energy
Ek_predict = estimator.predict(densx_test)
## estimate the kinetic energy derivative
dEkxt_predict = estimator.predict_gradient(densx_test)
dEkx_predict = estimator.named_steps['reduce_dim'].inverse_transform_gradient(dEkxt_predict)
## estimate the ground state electron density
with open('/Users/hongbinren/Documents/program/MLEK/example_demo/demo_train_data', 'rb') as f2:
train_data = pickle.load(f2)
Ek_train, densx_train, dEkx_train = train_data[:, 0], train_data[:, 1:503], train_data[:, 503:]
densx_init = densx_train[0]
densx_true, Vx_true = densx_test[4], -dEkx_test[4]
mu, N = 1.0, 1.0
optimizer = Minimizer(estimator)
densx_predict = optimizer.run(densx_init[np.newaxis, :], Vx_true[np.newaxis, :], mu, N)
## plot results
plot_prediction(Ek_test, Ek_predict, densx_true, densx_predict, densx_init)
|
[
"MLEK.tools.plot_tools.plot_prediction",
"MLEK.main.optimizer.Minimizer",
"pickle.load"
] |
[((1221, 1241), 'MLEK.main.optimizer.Minimizer', 'Minimizer', (['estimator'], {}), '(estimator)\n', (1230, 1241), False, 'from MLEK.main.optimizer import Minimizer\n'), ((1347, 1422), 'MLEK.tools.plot_tools.plot_prediction', 'plot_prediction', (['Ek_test', 'Ek_predict', 'densx_true', 'densx_predict', 'densx_init'], {}), '(Ek_test, Ek_predict, densx_true, densx_predict, densx_init)\n', (1362, 1422), False, 'from MLEK.tools.plot_tools import plot_prediction\n'), ((342, 356), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (353, 356), False, 'import pickle\n'), ((468, 483), 'pickle.load', 'pickle.load', (['f1'], {}), '(f1)\n', (479, 483), False, 'import pickle\n'), ((1000, 1015), 'pickle.load', 'pickle.load', (['f2'], {}), '(f2)\n', (1011, 1015), False, 'import pickle\n')]
|
from django.urls import path
from . import views
app_name = "charts"
urlpatterns = [
path("", views.home, name="dashboard"),
]
|
[
"django.urls.path"
] |
[((92, 130), 'django.urls.path', 'path', (['""""""', 'views.home'], {'name': '"""dashboard"""'}), "('', views.home, name='dashboard')\n", (96, 130), False, 'from django.urls import path\n')]
|
__author__ = '<NAME>'
__date__ = '2019-05-11'
__license__ = 'MIT License'
import logging
log = logging.getLogger(__name__)
def log_call(fn):
def inner(*args, **kwargs):
log.debug('Function %s called with %s and %s', fn.__name__, args, kwargs)
return fn(*args, **kwargs)
return inner
|
[
"logging.getLogger"
] |
[((97, 124), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (114, 124), False, 'import logging\n')]
|
import argparse
def get_args():
"""
Utility for getting the arguments from the user for running the experiment
:return: parsed arguments
"""
# Env
parser = argparse.ArgumentParser(description='collect arguments')
parser.add_argument('--save_dir', type=str, default="results/grid/sarsa/")
parser.add_argument('--exp_no', type=str, default="4")
parser.add_argument('--env-name', default='pg',
help="pg: point gather env\n"\
"cheetah: safe-cheetah env\n"\
"grid: grid world env\n"\
"pc: point circle env\n"\
)
parser.add_argument('--agent', default='ppo',
help="the RL algo to use\n"\
"ppo: for ppo\n"\
"lyp-ppo: for Lyapnunov based ppo\n" \
"bvf-ppo: for Backward value function based ppo\n" \
"sarsa: for n-step sarsa\n" \
"lyp-sarsa: for Lyapnunov based sarsa\n"\
"bvf-sarsa: for Backward Value Function based sarsa"\
)
parser.add_argument('--gamma', type=float, default=0.99, help="discount factor")
parser.add_argument('--d0', type=float, default=5.0, help="the threshold for safety")
# Actor Critic arguments goes here
parser.add_argument('--value-loss-coef', type=float, default=0.5,
help="learning rate")
parser.add_argument('--target-update-steps', type=int, default=int(1e4),
help="number of steps after to train the agent")
parser.add_argument('--beta', type=float, default=0.001, help='entropy regularization')
parser.add_argument('--critic-lr', type=float, default=1e-3, help="critic learning rate")
parser.add_argument('--updates-per-step', type=int, default=1, help='model updates per simulator step (default: 1)')
parser.add_argument('--tau', type=float, default=0.001, help='soft update rule for target netwrok(default: 0.001)')
# PPO arguments go here
parser.add_argument('--num-envs', type=int, default=10, help='the num of envs to gather data in parallel')
parser.add_argument('--ppo-updates', type=int, default=1, help='num of ppo updates to do')
parser.add_argument('--gae', type=float, default=0.95, help='GAE coefficient')
parser.add_argument('--clip', type=float, default=0.2, help='clipping param for PPO')
parser.add_argument('--traj-len', type=int, default= 10, help="the maximum length of the trajectory for an update")
parser.add_argument('--early-stop', action='store_true',
help="early stop pi training based on target KL ")
# Optmization arguments
parser.add_argument('--lr', type=float, default=1e-2,
help="learning rate")
parser.add_argument('--adam-eps', type=float, default=0.95, help="momenturm for RMSProp")
parser.add_argument('--batch-size', type=int, default=32,
help='size of minibatch for ppo/ ddpg update')
# Safety params
parser.add_argument('--cost-reverse-lr', type=float, default=5e-4,
help="reverse learning rate for reviewer")
parser.add_argument('--cost-q-lr', type=float, default=5e-4,
help="reverse learning rate for critic")
parser.add_argument('--cost-sg-coeff', type=float, default=0.0,
help="the coeeficient for the safe guard policy, minimizes the cost")
parser.add_argument('--prob-alpha', type=float, default=0.6,
help="the kappa parameter for the target networks")
parser.add_argument('--target', action='store_true',
help="use the target network based implementation")
# Training arguments
parser.add_argument('--num-steps', type=int, default=int(1e4),
help="number of steps to train the agent")
parser.add_argument('--num-episodes', type=int, default=int(2e5),
help="number of episodes to train the agetn")
parser.add_argument('--max-ep-len', type=int, default=int(15),
help="number of steps in an episode")
# Evaluation arguments
parser.add_argument('--eval-every', type=float, default=1000,
help="eval after these many steps")
parser.add_argument('--eval-n', type=int, default=1,
help="average eval results over these many episodes")
# Experiment specific
parser.add_argument('--gpu', action='store_true', help="use the gpu and CUDA")
parser.add_argument('--log-mode-steps', action='store_true',
help="changes the mode of logging w.r.r num of steps instead of episodes")
parser.add_argument('--log-every', type=int, default=100,
help="logging schedule for training")
parser.add_argument('--checkpoint-interval', type=int, default=1e5,
help="when to save the models")
parser.add_argument('--seed', type=int, default=7)
parser.add_argument('--out', type=str, default='/tmp/safe/models/')
parser.add_argument('--log-dir', type=str, default="/tmp/safe/logs/")
parser.add_argument('--reset-dir', action='store_true',
help="give this argument to delete the existing logs for the current set of parameters")
args = parser.parse_args()
return args
# DQN specific arguments
# parser.add_argument('--eps-decay-steps',type=int, default=10000,
# help="eps decay rate in num of episodes (1/decay_rate)")
# parser.add_argument('--prioritized', action='store_true',
# help="If true use the prioritized buffer")
# parser.add_argument('--beta-decay-steps',type=int, default=100,
# help="eps decay rate in num of episodes (1/decay_rate)")
# parser.add_argument('--beta-start', type=float, default=0.4,
# help="the intial beta for the IS correction")
# parser.add_argument('--dqn-target-update',type=int, default=1000,
# help="number of steps after which to update the target dqn")
# Safe_DQN stuff
# parser.add_argument('--pi-update-steps',type=int, default=10,
# help="number of times to run the inner optimization loop")
# parser.add_argument('--max-grad-norm', type=float, default=5.0, help='max norm of gradients (default: 0.5)')
# parser.add_argument('--ou-sigma', type=float, default=0.2, help="std for ou noise")
# parser.add_argument('--replay-size', type=int, default=10000, help='size of replay buffer (default: 10000)')
|
[
"argparse.ArgumentParser"
] |
[((184, 240), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""collect arguments"""'}), "(description='collect arguments')\n", (207, 240), False, 'import argparse\n')]
|
import sensor_msgs.msg
from sensor_msgs.msg import *
from . import point_cloud2
import importlib
msg = importlib.import_module('sensor_msgs.msg')
point_cloud2 = importlib.import_module('sensor_msgs.point_cloud2')
__all__ = ['msg', 'point_cloud2']
|
[
"importlib.import_module"
] |
[((104, 146), 'importlib.import_module', 'importlib.import_module', (['"""sensor_msgs.msg"""'], {}), "('sensor_msgs.msg')\n", (127, 146), False, 'import importlib\n'), ((162, 213), 'importlib.import_module', 'importlib.import_module', (['"""sensor_msgs.point_cloud2"""'], {}), "('sensor_msgs.point_cloud2')\n", (185, 213), False, 'import importlib\n')]
|
"""Basic facade for the exiftool executable"""
# Public
import os.path
import shlex
import subprocess
import sys
# Internal
import exiftoolinst
class ExiftoolWrap:
_path = None
_path_to_binary = None
def __init__(self, path=""):
self._path = path
self._detect_installation()
#
# Public
#
def launch_file_rename(self, path_to_images, prefix, file_ext, use_date_time):
"""Launches exiftool with a command to rename all images files
Parameters:
path_to_images: directory where to look for images (non recursive)
prefix: prefix to add to the renamed files
file_ext: which file types to rename specified by file extension
(ex. .jpg)
use_date_time: True will add the date and time taken to the
filename of the images
"""
if not self.is_installed():
return
# Date format: "%Y-%m-%d_%Hh%Mm%Ss
# Extra notations:
# %e: Extension of the original file
# %c: Add a copy number to avoid name collision with existing filenames
# Note that these codes must be escaped with an extra % if used within a
# date format string.
# The full command should look something like this:
# exiftool.exe "-FileName<MyPrefix_${DateTimeOriginal}%-c.%e" -d "%Y-%m-%d_%Hh%Mm%Ss" c:\myfolder
date_time_original = ""
if use_date_time:
date_time_original = "${DateTimeOriginal}"
command_line = "\"" + self._path_to_binary + \
"\" \"-FileName<" + \
prefix + \
date_time_original + \
"%-c.%e\" -d %Y-%m-%d_%Hh%Mm%Ss \"" + \
os.path.join(path_to_images, file_ext) + "\""
return command_line, self._createProcess(command_line)
def is_installed(self):
return self._path_to_binary != None
def set_exiftool_path_manually(self, file):
"""Will set the exiftool executable filepath if it's valid.
If the location is invalid, our previous location will be kept.
return: True if the path is valid.
"""
valid = False
if self._is_valid_exiftool_executable(file):
self._path_to_binary = file
valid = True
return valid
def get_path_to_binary(self):
return self._path_to_binary
#
# Private
#
def _detect_installation(self):
"""Returns True if the installation has been detected successfully."""
self._path_to_binary = None
if not self._try_installation_path(self._path, "exiftool.exe"):
self._try_installation_path("", "exiftool.exe")
return self._path_to_binary != None
def _try_installation_path(self, path, executable_name):
"""True if the path to the binary was found and _path_to_binary was set"""
ret = False
if self._is_valid_exiftool_executable(os.path.join(path, executable_name)):
self._path = path
self._path_to_binary = os.path.join(path, executable_name)
ret = True
return ret
def _is_valid_exiftool_executable(self, path_to_bin):
"""Will check to see if path_to_bin is pointing to a copy of exiftool.exe"""
ret = False
try:
popen = self._createProcess("\"" + path_to_bin + "\" -ver")
popen.wait() # returncode is set by wait() as we need to wait for the program to finish
if (popen.returncode == 0) and (len(popen.stdout.read()) > 0):
print("Found exiftool: " + path_to_bin)
ret = True
except:
pass
return ret
def _createProcess(self, command):
"""Helper that wraps the Popen arguments that we require to launch exiftool"""
# DJOLY:TODO: Known issues to fix before deployment:
#
# The current Popen setup works for our needs but breaks the following
# rules:
#
# * Technically we shoud be splitting the command in a list and not
# execute in a shell prompt. Unfortunately, this seems to break on
# Windows and more investigation is needed to understand how we are
# supposed to call exiftool. The drawbacks of this bastardized call
# don't seem severe for this application.
#
# * The subprocess.Popen documentation has the following note: Do not
# use stdout=PIPE or stderr=PIPE with this function. As the pipes are
# not being read in the current process, the child process may block
# if it generates enough output to a pipe to fill up the OS pipe
# buffer.
#
# ref: http://docs.python.org/dev/library/subprocess.html
return subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
# OS check since currently we only support win32
if sys.platform != "win32":
print("Error: Only win32 is supported.")
assert(sys.platform == "win32")
|
[
"subprocess.Popen"
] |
[((4857, 4946), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'shell': '(True)'}), '(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n shell=True)\n', (4873, 4946), False, 'import subprocess\n')]
|
#!/usr/bin/env python
import numpy as np
np.random.seed(42)
import emcee
def lnprior(params):
return 0.0
def lnlike(params, x, y):
model = params[0] * x + params[1]
residuals = y - model
return -np.sum(residuals ** 2)
def lnprob(params, x, y):
lnp = lnprior(params)
if np.isfinite(lnp):
return lnp + lnlike(params, x, y)
return -np.inf
if __name__ == '__main__':
real_m, real_c = 2, 5
real_x = np.sort(np.random.uniform(0, 10, 20))
real_y = real_m * real_x + real_c
noise = np.random.normal(0, 3, real_x.shape)
observed_y = real_y + noise
p0 = np.array([0, 0])
nwalkers = 10
niters = 100
sampler = emcee.EnsembleSampler(nwalkers, len(p0), lnprob,
args=(real_x, observed_y))
pos = np.array([p0 + 1E-5 * np.random.randn()
for _ in range(nwalkers)])
sampler.run_mcmc(pos, niters)
print(sampler.flatchain[::10, 0])
|
[
"numpy.random.uniform",
"numpy.sum",
"numpy.random.seed",
"numpy.random.randn",
"numpy.isfinite",
"numpy.array",
"numpy.random.normal"
] |
[((43, 61), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (57, 61), True, 'import numpy as np\n'), ((299, 315), 'numpy.isfinite', 'np.isfinite', (['lnp'], {}), '(lnp)\n', (310, 315), True, 'import numpy as np\n'), ((534, 570), 'numpy.random.normal', 'np.random.normal', (['(0)', '(3)', 'real_x.shape'], {}), '(0, 3, real_x.shape)\n', (550, 570), True, 'import numpy as np\n'), ((613, 629), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (621, 629), True, 'import numpy as np\n'), ((216, 238), 'numpy.sum', 'np.sum', (['(residuals ** 2)'], {}), '(residuals ** 2)\n', (222, 238), True, 'import numpy as np\n'), ((454, 482), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)', '(20)'], {}), '(0, 10, 20)\n', (471, 482), True, 'import numpy as np\n'), ((825, 842), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (840, 842), True, 'import numpy as np\n')]
|
import a0
import time
def callback(pkt):
print(f'Recieved reply: {pkt.payload.decode("utf-8")}')
print("Waiting 1ms for response")
client = a0.RpcClient("topic")
client.send("client msg", callback)
time.sleep(0.001)
print("Done!")
|
[
"a0.RpcClient",
"time.sleep"
] |
[((148, 169), 'a0.RpcClient', 'a0.RpcClient', (['"""topic"""'], {}), "('topic')\n", (160, 169), False, 'import a0\n'), ((206, 223), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (216, 223), False, 'import time\n')]
|
def main(trainer, args, myargs):
config = myargs.config
from template_lib.utils import seed_utils
seed_utils.set_random_seed(config.seed)
if args.evaluate:
trainer.evaluate()
return
if args.resume:
trainer.resume()
elif args.finetune:
trainer.finetune()
# Load dataset
trainer.dataset_load()
trainer.train()
|
[
"template_lib.utils.seed_utils.set_random_seed"
] |
[((107, 146), 'template_lib.utils.seed_utils.set_random_seed', 'seed_utils.set_random_seed', (['config.seed'], {}), '(config.seed)\n', (133, 146), False, 'from template_lib.utils import seed_utils\n')]
|
import collections
import unittest
from snmpagent_unity import agent, enums
from snmpagent_unity import exceptions as snmp_ex
from snmpagent_unity.tests import patches
from pysnmp.smi import error as smi_ex
SERVICE_ID_MD5 = (1, 3, 6, 1, 6, 3, 10, 1, 1, 2)
SERVICE_ID_SHA = (1, 3, 6, 1, 6, 3, 10, 1, 1, 3)
SERVICE_ID_DES = (1, 3, 6, 1, 6, 3, 10, 1, 2, 2)
SERVICE_ID_AES = (1, 3, 6, 1, 6, 3, 10, 1, 2, 4)
class TestEngine(unittest.TestCase):
@patches.user_v3_entry
@patches.user_v2_entry
@patches.agent_config_entry
def setUp(self, agent_config_entry, user_v2_entry, user_v3_entry):
self.agent_config_entry = agent_config_entry
self.user_v2_entry = user_v2_entry
self.user_v3_entry = user_v3_entry
self.agent_config_entry.agent_ip = '192.168.0.101'
self.agent_config_entry.agent_port = '11161'
self.agent_config_entry.mgmt_ip = '10.0.0.10'
self.agent_config_entry.cache_interval = '60'
self.agent_config_entry.user = 'admin'
self.agent_config_entry.password = 'password'
self.user_v2_entry.mode = enums.UserVersion.V2
self.user_v2_entry.name = 'userv2'
self.user_v2_entry.community = 'public'
self.user_v3_entry.mode = enums.UserVersion.V3
self.user_v3_entry.name = 'userv3'
self.user_v3_entry.auth_protocol = enums.AuthProtocol.MD5
self.user_v3_entry.auth_key.raw = 'authkey1'
self.user_v3_entry.priv_protocol = enums.PrivProtocol.AES
self.user_v3_entry.priv_key.raw = 'privkey1'
@patches.mock_engine
@patches.mock_client
@patches.mock_udp
@patches.add_transport
@patches.add_vacm_user
@patches.add_v3_user
@patches.add_v1_system
@patches.user_v3_entry
def test_create_engine(self, user_v3_entry, add_v1_system, add_v3_user,
add_vacm_user, *args, **kwargs):
array_config = self.agent_config_entry
user_v2 = self.user_v2_entry
user_v3 = self.user_v3_entry
user_v3_no_priv = user_v3_entry
user_v3_no_priv.mode = enums.UserVersion.V3
user_v3_no_priv.name = 'userv3_no_priv'
user_v3_no_priv.auth_protocol = enums.AuthProtocol.SHA
user_v3_no_priv.auth_key.raw = 'authkey1_no_priv'
user_v3_no_priv.priv_protocol = None
user_v3_no_priv.priv_key.raw = None
access_config = collections.OrderedDict()
access_config[user_v2.name] = user_v2
access_config[user_v3.name] = user_v3
access_config[user_v3_no_priv.name] = user_v3_no_priv
snmp_engine = agent.SNMPEngine(array_config, access_config)
add_v1_system.assert_called_once()
_, name, community = add_v1_system.call_args[0]
self.assertEqual(name, user_v2.name)
self.assertEqual(community, user_v2.community)
self.assertEqual(add_v3_user.call_count, 2)
_, name, auth_proto, auth_key, priv_proto, priv_key = \
add_v3_user.call_args_list[0][0]
self.assertEqual(name, user_v3.name)
self.assertEqual(auth_proto, SERVICE_ID_MD5)
self.assertEqual(auth_key, user_v3.auth_key.raw)
self.assertEqual(priv_proto, SERVICE_ID_AES)
self.assertEqual(priv_key, user_v3.priv_key.raw)
_, name, auth_proto, auth_key = add_v3_user.call_args_list[1][0]
self.assertEqual(name, user_v3_no_priv.name)
self.assertEqual(auth_proto, SERVICE_ID_SHA)
self.assertEqual(auth_key, user_v3_no_priv.auth_key.raw)
self.assertEqual(add_vacm_user.call_count, 3)
client_name = '{}_{}'.format(array_config.mgmt_ip,
array_config.agent_port)
kwargs['get_unity_client'].assert_called_once_with(
client_name, array_config.mgmt_ip, array_config.user,
array_config.password,
cache_interval=int(array_config.cache_interval))
self.assertEqual(snmp_engine.ip, array_config.agent_ip)
self.assertEqual(snmp_engine.port, int(array_config.agent_port))
self.assertEqual(snmp_engine.engine.parent, snmp_engine)
self.assertNotEqual(snmp_engine.engine.unity_client, None)
self.assertEqual(len(snmp_engine.engine.msgAndPduDsp.
mibInstrumController.mibBuilder.
mibSymbols['Unity-MIB']), 181)
self.assertEqual(len(snmp_engine.engine.msgAndPduDsp.
mibInstrumController.mibBuilder.
mibSymbols['Exported-Unity-MIB']), 150)
@patches.mock_engine
@patches.mock_client
@patches.mock_udp
@patches.add_transport
@patches.add_vacm_user
@patches.add_v3_user
@patches.add_v1_system
@patches.agent_config_entry
def test_create_engine_with_default_ip_port(self, agent_config_entry,
*args, **kwargs):
array_config = agent_config_entry
array_config.agent_ip = None
array_config.agent_port = None
array_config.mgmt_ip = '10.0.0.10'
array_config.cache_interval = '60'
array_config.user = 'admin'
array_config.password = 'password'
user_v2 = self.user_v2_entry
user_v3 = self.user_v3_entry
access_config = collections.OrderedDict()
access_config[user_v2.name] = user_v2
access_config[user_v3.name] = user_v3
snmp_engine = agent.SNMPEngine(array_config, access_config)
client_name = '{}_{}'.format(array_config.mgmt_ip,
array_config.agent_port)
kwargs['get_unity_client'].assert_called_once_with(
client_name, array_config.mgmt_ip, array_config.user,
array_config.password,
cache_interval=int(array_config.cache_interval))
self.assertEqual(snmp_engine.ip, '0.0.0.0')
self.assertEqual(snmp_engine.port, 161)
self.assertEqual(snmp_engine.engine.parent, snmp_engine)
self.assertNotEqual(snmp_engine.engine.unity_client, None)
self.assertEqual(len(snmp_engine.engine.msgAndPduDsp.
mibInstrumController.mibBuilder.
mibSymbols['Unity-MIB']), 181)
self.assertEqual(len(snmp_engine.engine.msgAndPduDsp.
mibInstrumController.mibBuilder.
mibSymbols['Exported-Unity-MIB']), 150)
@patches.mock_engine
@patches.mock_client
@patches.mock_udp
@patches.add_transport
def test_create_engine_without_user(self, *args, **kwargs):
array_config = self.agent_config_entry
access_config = collections.OrderedDict()
self.assertRaises(snmp_ex.NoUserExistsError, agent.SNMPEngine,
array_config, access_config)
@patches.mock_engine
@patches.mock_client
@patches.mock_udp
@patches.add_transport
@patches.add_vacm_user
@patches.add_v3_user
@patches.add_v1_system
def test_create_engine_with_invalid_community(self, add_v1_system,
*args, **kwargs):
array_config = self.agent_config_entry
user_v2 = self.user_v2_entry
user_v3 = self.user_v3_entry
access_config = collections.OrderedDict()
access_config[user_v2.name] = user_v2
access_config[user_v3.name] = user_v3
add_v1_system.side_effect = smi_ex.WrongValueError
snmp_engine = agent.SNMPEngine(array_config, access_config)
self.assertEqual(snmp_engine.ip, array_config.agent_ip)
self.assertEqual(snmp_engine.port, int(array_config.agent_port))
self.assertEqual(snmp_engine.engine.parent, snmp_engine)
self.assertNotEqual(snmp_engine.engine.unity_client, None)
self.assertEqual(len(snmp_engine.engine.msgAndPduDsp.
mibInstrumController.mibBuilder.
mibSymbols['Unity-MIB']), 181)
self.assertEqual(len(snmp_engine.engine.msgAndPduDsp.
mibInstrumController.mibBuilder.
mibSymbols['Exported-Unity-MIB']), 150)
@patches.mock_engine
@patches.mock_client
@patches.mock_udp
@patches.add_transport
@patches.add_vacm_user
@patches.add_v3_user
@patches.add_v1_system
def test_create_engine_with_invalid_user(self, add_v1_system, add_v3_user,
*args, **kwargs):
array_config = self.agent_config_entry
user_v2 = self.user_v2_entry
user_v3 = self.user_v3_entry
access_config = collections.OrderedDict()
access_config[user_v2.name] = user_v2
access_config[user_v3.name] = user_v3
add_v3_user.side_effect = smi_ex.WrongValueError
snmp_engine = agent.SNMPEngine(array_config, access_config)
self.assertEqual(snmp_engine.ip, array_config.agent_ip)
self.assertEqual(snmp_engine.port, int(array_config.agent_port))
self.assertEqual(snmp_engine.engine.parent, snmp_engine)
self.assertNotEqual(snmp_engine.engine.unity_client, None)
self.assertEqual(len(snmp_engine.engine.msgAndPduDsp.
mibInstrumController.mibBuilder.
mibSymbols['Unity-MIB']), 181)
self.assertEqual(len(snmp_engine.engine.msgAndPduDsp.
mibInstrumController.mibBuilder.
mibSymbols['Exported-Unity-MIB']), 150)
|
[
"collections.OrderedDict",
"snmpagent_unity.agent.SNMPEngine"
] |
[((2387, 2412), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (2410, 2412), False, 'import collections\n'), ((2590, 2635), 'snmpagent_unity.agent.SNMPEngine', 'agent.SNMPEngine', (['array_config', 'access_config'], {}), '(array_config, access_config)\n', (2606, 2635), False, 'from snmpagent_unity import agent, enums\n'), ((5290, 5315), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (5313, 5315), False, 'import collections\n'), ((5431, 5476), 'snmpagent_unity.agent.SNMPEngine', 'agent.SNMPEngine', (['array_config', 'access_config'], {}), '(array_config, access_config)\n', (5447, 5476), False, 'from snmpagent_unity import agent, enums\n'), ((6666, 6691), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (6689, 6691), False, 'import collections\n'), ((7284, 7309), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (7307, 7309), False, 'import collections\n'), ((7485, 7530), 'snmpagent_unity.agent.SNMPEngine', 'agent.SNMPEngine', (['array_config', 'access_config'], {}), '(array_config, access_config)\n', (7501, 7530), False, 'from snmpagent_unity import agent, enums\n'), ((8646, 8671), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (8669, 8671), False, 'import collections\n'), ((8845, 8890), 'snmpagent_unity.agent.SNMPEngine', 'agent.SNMPEngine', (['array_config', 'access_config'], {}), '(array_config, access_config)\n', (8861, 8890), False, 'from snmpagent_unity import agent, enums\n')]
|
import re
import sre_constants
from functools import wraps
from html import unescape
import requests
import redis
import lxml
from bs4 import BeautifulSoup
from flask import request
class RedisDict:
def __init__(self, **redis_kwargs):
self.__db = redis.Redis(**redis_kwargs)
def __len__(self):
return self.__db.keys().__len__()
def __setitem__(self, key, value):
self.__db.set(key, value)
def __getitem__(self, key):
k = self.__db.get(key)
return k.decode() if k else k
def set(self, key, value):
self.__db.set(key, value)
def __contains__(self, item):
return True if self[item] else False
def __iter__(self):
for key in self.__db.keys():
yield key.decode() if key else key
def expire(self, key, time):
self.__db.expire(key, time)
def pop(self, key):
return self.__db.delete(key)
def get_re_explanation(expression):
try:
re.compile(expression)
except sre_constants.error:
return False
r = requests.get(
'http://rick.measham.id.au/paste/explain.pl',
params={
'regex': expression
}
)
b = BeautifulSoup(r.text, 'lxml')
lines = b.pre.text.strip().splitlines()[2:]
lines.append('-' * 80)
res = []
token, explanation = '', ''
for line in lines:
if line == '-' * 80:
res.append((token, explanation))
token, explanation = '', ''
continue
line = line.strip()
if len(line) >= 40:
regex_part, explanation_part = line.split(maxsplit=1)
token = ' '.join([token, regex_part])
explanation = ' '.join([explanation, explanation_part])
else:
if line.count(' ') >= 23:
regex_part, explanation_part = line.split(maxsplit=1)
token = ' '.join([token, regex_part])
explanation = ' '.join([explanation, explanation_part])
else:
explanation = ' '.join([explanation, line])
return unescape('\n'.join(' : '.join(pair) for pair in res if all(pair)))
def auth_required(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
request_json = request.get_json()
token = None
if request_json:
if 'token' in request_json:
token = request_json.get('token')
else:
token = request.args.get('token')
if token is None or token not in RedisDict():
return {'message': {'error': 'Not authorized'}}, 401
return method(self, *args, **kwargs)
return wrapper
|
[
"redis.Redis",
"flask.request.args.get",
"functools.wraps",
"requests.get",
"bs4.BeautifulSoup",
"flask.request.get_json",
"re.compile"
] |
[((1059, 1151), 'requests.get', 'requests.get', (['"""http://rick.measham.id.au/paste/explain.pl"""'], {'params': "{'regex': expression}"}), "('http://rick.measham.id.au/paste/explain.pl', params={'regex':\n expression})\n", (1071, 1151), False, 'import requests\n'), ((1200, 1229), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.text', '"""lxml"""'], {}), "(r.text, 'lxml')\n", (1213, 1229), False, 'from bs4 import BeautifulSoup\n'), ((2186, 2199), 'functools.wraps', 'wraps', (['method'], {}), '(method)\n', (2191, 2199), False, 'from functools import wraps\n'), ((263, 290), 'redis.Redis', 'redis.Redis', ([], {}), '(**redis_kwargs)\n', (274, 290), False, 'import redis\n'), ((975, 997), 're.compile', 're.compile', (['expression'], {}), '(expression)\n', (985, 997), False, 'import re\n'), ((2263, 2281), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (2279, 2281), False, 'from flask import request\n'), ((2452, 2477), 'flask.request.args.get', 'request.args.get', (['"""token"""'], {}), "('token')\n", (2468, 2477), False, 'from flask import request\n')]
|
import os
import unittest
import tempfile
from testfixtures import compare, Replacer, replace
from testfixtures.popen import MockPopen
from testfixtures.mock import call
from popper.config import ConfigLoader
from popper.runner import WorkflowRunner
from popper.parser import WorkflowParser
from popper.runner_slurm import SlurmRunner, DockerRunner, SingularityRunner
from popper.cli import log as log
from .test_common import PopperTest
from box import Box
def mock_kill(pid, sig):
return 0
class TestSlurmSlurmRunner(PopperTest):
def setUp(self):
log.setLevel("CRITICAL")
self.Popen = MockPopen()
replacer = Replacer()
replacer.replace("popper.runner_host.Popen", self.Popen)
self.addCleanup(replacer.restore)
def tearDown(self):
log.setLevel("NOTSET")
def test_tail_output(self):
self.Popen.set_command("tail -f slurm-x.out", returncode=0)
with SlurmRunner(config=ConfigLoader.load()) as sr:
self.assertEqual(sr._tail_output("slurm-x.out"), 0)
self.assertEqual(len(sr._out_stream_pid), 1)
def test_stop_running_tasks(self):
self.Popen.set_command("scancel --name job_a", returncode=0)
with SlurmRunner(config=ConfigLoader.load()) as sr:
sr._spawned_jobs.add("job_a")
sr.stop_running_tasks()
compare(
call.Popen(
["scancel", "--name", "job_a"],
cwd=os.getcwd(),
env=None,
preexec_fn=os.setsid,
stderr=-2,
stdout=-1,
universal_newlines=True,
),
self.Popen.all_calls[0],
)
@replace("popper.runner_slurm.os.kill", mock_kill)
def test_submit_batch_job(self, mock_kill):
config = ConfigLoader.load(workspace_dir="/w")
self.Popen.set_command(
"sbatch --wait "
f"--job-name popper_sample_{config.wid} "
f"--output /tmp/popper/slurm/popper_sample_{config.wid}.out "
f"/tmp/popper/slurm/popper_sample_{config.wid}.sh",
returncode=0,
)
self.Popen.set_command(
f"tail -f /tmp/popper/slurm/popper_sample_{config.wid}.out", returncode=0
)
step = Box({"id": "sample"}, default_box=True)
with SlurmRunner(config=config) as sr:
sr._submit_batch_job(["ls -la"], step)
with open(f"/tmp/popper/slurm/popper_sample_{config.wid}.sh", "r") as f:
content = f.read()
self.assertEqual(content, "#!/bin/bash\nls -la")
self.assertEqual(len(sr._spawned_jobs), 0)
self.assertEqual(sr._out_stream_thread.is_alive(), False)
call_tail = call.Popen(
["tail", "-f", f"/tmp/popper/slurm/popper_sample_{config.wid}.out"],
cwd=os.getcwd(),
env=None,
preexec_fn=os.setsid,
stderr=-2,
stdout=-1,
universal_newlines=True,
)
call_sbatch = call.Popen(
[
"sbatch",
"--wait",
"--job-name",
f"popper_sample_{config.wid}",
"--output",
f"/tmp/popper/slurm/popper_sample_{config.wid}.out",
f"/tmp/popper/slurm/popper_sample_{config.wid}.sh",
],
cwd=os.getcwd(),
env=None,
preexec_fn=os.setsid,
stderr=-2,
stdout=-1,
universal_newlines=True,
)
self.assertEqual(call_tail in self.Popen.all_calls, True)
self.assertEqual(call_sbatch in self.Popen.all_calls, True)
@replace("popper.runner_slurm.os.kill", mock_kill)
def test_submit_job_failure(self, mock_kill):
config_dict = {
"engine": {"name": "docker", "options": {}},
"resource_manager": {"name": "slurm", "options": {}},
}
config = ConfigLoader.load(workspace_dir="/w", config_file=config_dict)
self.Popen.set_command(
f"sbatch --wait --job-name popper_1_{config.wid} "
f"--output /tmp/popper/slurm/popper_1_{config.wid}.out "
f"/tmp/popper/slurm/popper_1_{config.wid}.sh",
returncode=12,
)
self.Popen.set_command(
f"tail -f /tmp/popper/slurm/popper_1_{config.wid}.out", returncode=0
)
with WorkflowRunner(config) as r:
wf_data = {
"steps": [
{
"uses": "popperized/bin/sh@master",
"runs": ["cat"],
"args": ["README.md"],
}
]
}
self.assertRaises(SystemExit, r.run, WorkflowParser.parse(wf_data=wf_data))
call_tail = call.Popen(
["tail", "-f", f"/tmp/popper/slurm/popper_1_{config.wid}.out"],
cwd=os.getcwd(),
env=None,
preexec_fn=os.setsid,
stderr=-2,
stdout=-1,
universal_newlines=True,
)
call_sbatch = call.Popen(
[
"sbatch",
"--wait",
"--job-name",
f"popper_1_{config.wid}",
"--output",
f"/tmp/popper/slurm/popper_1_{config.wid}.out",
f"/tmp/popper/slurm/popper_1_{config.wid}.sh",
],
cwd=os.getcwd(),
env=None,
preexec_fn=os.setsid,
stderr=-2,
stdout=-1,
universal_newlines=True,
)
self.assertEqual(call_tail in self.Popen.all_calls, True)
self.assertEqual(call_sbatch in self.Popen.all_calls, True)
def test_dry_run(self):
config = ConfigLoader.load(
engine_name="docker", resman_name="slurm", dry_run=True,
)
with WorkflowRunner(config) as r:
wf_data = {
"steps": [
{
"uses": "popperized/bin/sh@master",
"runs": ["cat"],
"args": ["README.md"],
}
]
}
r.run(WorkflowParser.parse(wf_data=wf_data))
self.assertEqual(self.Popen.all_calls, [])
class TestSlurmDockerRunner(unittest.TestCase):
def setUp(self):
log.setLevel("CRITICAL")
self.Popen = MockPopen()
replacer = Replacer()
replacer.replace("popper.runner_host.Popen", self.Popen)
self.addCleanup(replacer.restore)
def tearDown(self):
log.setLevel("NOTSET")
def test_create_cmd(self):
config = {"workspace_dir": "/w"}
with DockerRunner(config=ConfigLoader.load(**config)) as drunner:
step = Box({"args": ["-two", "-flags"]}, default_box=True)
cmd = drunner._create_cmd(step, "foo:1.9", "container_name")
expected = (
"docker create"
" --name container_name"
" --workdir /workspace"
" -v /w:/workspace"
" -v /var/run/docker.sock:/var/run/docker.sock"
" foo:1.9 -two -flags"
)
self.assertEqual(expected, cmd)
config_dict = {
"engine": {
"name": "docker",
"options": {
"privileged": True,
"hostname": "popper.local",
"domainname": "www.example.org",
"volumes": ["/path/in/host:/path/in/container"],
"environment": {"FOO": "bar"},
},
},
"resource_manager": {"name": "slurm"},
}
config = {"workspace_dir": "/w", "config_file": config_dict}
with DockerRunner(config=ConfigLoader.load(**config)) as drunner:
step = Box({"args": ["-two", "-flags"]}, default_box=True)
cmd = drunner._create_cmd(step, "foo:1.9", "container_name")
expected = (
"docker create --name container_name "
"--workdir /workspace "
"-v /w:/workspace "
"-v /var/run/docker.sock:/var/run/docker.sock "
"-v /path/in/host:/path/in/container "
"-e FOO=bar --privileged --hostname popper.local "
"--domainname www.example.org "
"foo:1.9 -two -flags"
)
self.assertEqual(expected, cmd)
@replace("popper.runner_slurm.os.kill", mock_kill)
def test_run(self, mock_kill):
config_dict = {
"engine": {
"name": "docker",
"options": {
"privileged": True,
"hostname": "popper.local",
"domainname": "www.example.org",
"volumes": ["/path/in/host:/path/in/container"],
"environment": {"FOO": "bar"},
},
},
"resource_manager": {"name": "slurm"},
}
config = ConfigLoader.load(workspace_dir="/w", config_file=config_dict)
self.Popen.set_command(
f"sbatch --wait --job-name popper_1_{config.wid} "
f"--output /tmp/popper/slurm/popper_1_{config.wid}.out "
f"/tmp/popper/slurm/popper_1_{config.wid}.sh",
returncode=0,
)
self.Popen.set_command(
f"tail -f /tmp/popper/slurm/popper_1_{config.wid}.out", returncode=0
)
with WorkflowRunner(config) as r:
wf_data = {
"steps": [
{
"uses": "popperized/bin/sh@master",
"runs": ["cat"],
"args": ["README.md"],
}
]
}
r.run(WorkflowParser.parse(wf_data=wf_data))
with open(f"/tmp/popper/slurm/popper_1_{config.wid}.sh", "r") as f:
# fmt: off
expected = f"""#!/bin/bash
docker rm -f popper_1_{config.wid} || true
docker build -t popperized/bin:master {os.environ['HOME']}/.cache/popper/{config.wid}/github.com/popperized/bin/sh
docker create --name popper_1_{config.wid} --workdir /workspace --entrypoint cat -v /w:/workspace -v /var/run/docker.sock:/var/run/docker.sock -v /path/in/host:/path/in/container -e FOO=bar --privileged --hostname popper.local --domainname www.example.org popperized/bin:master README.md
docker start --attach popper_1_{config.wid}"""
# fmt: on
actual = f.read()
self.maxDiff = None
self.assertEqual(expected, actual)
class TestSlurmSingularityRunner(unittest.TestCase):
def setUp(self):
self.Popen = MockPopen()
replacer = Replacer()
replacer.replace("popper.runner_host.Popen", self.Popen)
self.addCleanup(replacer.restore)
def tearDown(self):
log.setLevel("NOTSET")
def test_create_cmd(self):
config = ConfigLoader.load(workspace_dir="/w")
with SingularityRunner(config=config) as sr:
step = Box({"args": ["-two", "-flags"]}, default_box=True)
sr._setup_singularity_cache()
sr._container = os.path.join(sr._singularity_cache, "c1.sif")
cmd = sr._create_cmd(step, "c1.sif")
expected = (
"singularity run"
" --userns --pwd /workspace"
" --bind /w:/workspace"
f' {os.environ["HOME"]}/.cache/popper/singularity/{config.wid}/c1.sif'
" -two -flags"
)
self.assertEqual(expected, cmd)
config_dict = {
"engine": {
"name": "singularity",
"options": {
"hostname": "popper.local",
"ipc": True,
"bind": ["/path/in/host:/path/in/container"],
},
},
"resource_manager": {"name": "slurm"},
}
config = ConfigLoader.load(workspace_dir="/w", config_file=config_dict)
with SingularityRunner(config=config) as sr:
step = Box({"args": ["-two", "-flags"]}, default_box=True)
sr._setup_singularity_cache()
sr._container = os.path.join(sr._singularity_cache, "c2.sif")
cmd = sr._create_cmd(step, "c2.sif")
# fmt: off
expected = f"singularity run --userns --pwd /workspace --bind /w:/workspace --bind /path/in/host:/path/in/container --hostname popper.local --ipc {os.environ['HOME']}/.cache/popper/singularity/{config.wid}/c2.sif -two -flags"
# fmt: on
self.assertEqual(expected, cmd)
@replace("popper.runner_slurm.os.kill", mock_kill)
def test_slurm_singularity_run(self, mock_kill):
config_dict = {
"engine": {
"name": "singularity",
"options": {
"hostname": "popper.local",
"bind": ["/path/in/host:/path/in/container"],
},
},
"resource_manager": {"name": "slurm"},
}
config = ConfigLoader.load(workspace_dir="/w", config_file=config_dict)
# fmt: off
self.Popen.set_command(
f"sbatch --wait --job-name popper_1_{config.wid} --output /tmp/popper/slurm/popper_1_{config.wid}.out /tmp/popper/slurm/popper_1_{config.wid}.sh",
returncode=0,
)
# fmt: on
self.Popen.set_command(
f"tail -f /tmp/popper/slurm/popper_1_{config.wid}.out", returncode=0
)
with WorkflowRunner(config) as r:
wf_data = {"steps": [{"uses": "popperized/bin/sh@master", "args": ["ls"],}]}
r.run(WorkflowParser.parse(wf_data=wf_data))
with open(f"/tmp/popper/slurm/popper_1_{config.wid}.sh", "r") as f:
# fmt: off
expected = f"""#!/bin/bash
singularity run --userns --pwd /workspace --bind /w:/workspace --bind /path/in/host:/path/in/container --hostname popper.local {os.environ['HOME']}/.cache/popper/singularity/{config.wid}/popper_1_{config.wid}.sif ls"""
# fmt: on
actual = f.read()
self.assertEqual(expected, actual)
|
[
"box.Box",
"popper.runner_slurm.SlurmRunner",
"popper.runner_slurm.SingularityRunner",
"testfixtures.Replacer",
"testfixtures.popen.MockPopen",
"os.getcwd",
"testfixtures.replace",
"popper.runner.WorkflowRunner",
"popper.config.ConfigLoader.load",
"popper.parser.WorkflowParser.parse",
"popper.cli.log.setLevel",
"os.path.join"
] |
[((1750, 1799), 'testfixtures.replace', 'replace', (['"""popper.runner_slurm.os.kill"""', 'mock_kill'], {}), "('popper.runner_slurm.os.kill', mock_kill)\n", (1757, 1799), False, 'from testfixtures import compare, Replacer, replace\n'), ((3749, 3798), 'testfixtures.replace', 'replace', (['"""popper.runner_slurm.os.kill"""', 'mock_kill'], {}), "('popper.runner_slurm.os.kill', mock_kill)\n", (3756, 3798), False, 'from testfixtures import compare, Replacer, replace\n'), ((8712, 8761), 'testfixtures.replace', 'replace', (['"""popper.runner_slurm.os.kill"""', 'mock_kill'], {}), "('popper.runner_slurm.os.kill', mock_kill)\n", (8719, 8761), False, 'from testfixtures import compare, Replacer, replace\n'), ((12932, 12981), 'testfixtures.replace', 'replace', (['"""popper.runner_slurm.os.kill"""', 'mock_kill'], {}), "('popper.runner_slurm.os.kill', mock_kill)\n", (12939, 12981), False, 'from testfixtures import compare, Replacer, replace\n'), ((573, 597), 'popper.cli.log.setLevel', 'log.setLevel', (['"""CRITICAL"""'], {}), "('CRITICAL')\n", (585, 597), True, 'from popper.cli import log as log\n'), ((619, 630), 'testfixtures.popen.MockPopen', 'MockPopen', ([], {}), '()\n', (628, 630), False, 'from testfixtures.popen import MockPopen\n'), ((650, 660), 'testfixtures.Replacer', 'Replacer', ([], {}), '()\n', (658, 660), False, 'from testfixtures import compare, Replacer, replace\n'), ((801, 823), 'popper.cli.log.setLevel', 'log.setLevel', (['"""NOTSET"""'], {}), "('NOTSET')\n", (813, 823), True, 'from popper.cli import log as log\n'), ((1865, 1902), 'popper.config.ConfigLoader.load', 'ConfigLoader.load', ([], {'workspace_dir': '"""/w"""'}), "(workspace_dir='/w')\n", (1882, 1902), False, 'from popper.config import ConfigLoader\n'), ((2335, 2374), 'box.Box', 'Box', (["{'id': 'sample'}"], {'default_box': '(True)'}), "({'id': 'sample'}, default_box=True)\n", (2338, 2374), False, 'from box import Box\n'), ((4024, 4086), 'popper.config.ConfigLoader.load', 'ConfigLoader.load', ([], {'workspace_dir': '"""/w"""', 'config_file': 'config_dict'}), "(workspace_dir='/w', config_file=config_dict)\n", (4041, 4086), False, 'from popper.config import ConfigLoader\n'), ((5979, 6053), 'popper.config.ConfigLoader.load', 'ConfigLoader.load', ([], {'engine_name': '"""docker"""', 'resman_name': '"""slurm"""', 'dry_run': '(True)'}), "(engine_name='docker', resman_name='slurm', dry_run=True)\n", (5996, 6053), False, 'from popper.config import ConfigLoader\n'), ((6583, 6607), 'popper.cli.log.setLevel', 'log.setLevel', (['"""CRITICAL"""'], {}), "('CRITICAL')\n", (6595, 6607), True, 'from popper.cli import log as log\n'), ((6629, 6640), 'testfixtures.popen.MockPopen', 'MockPopen', ([], {}), '()\n', (6638, 6640), False, 'from testfixtures.popen import MockPopen\n'), ((6660, 6670), 'testfixtures.Replacer', 'Replacer', ([], {}), '()\n', (6668, 6670), False, 'from testfixtures import compare, Replacer, replace\n'), ((6811, 6833), 'popper.cli.log.setLevel', 'log.setLevel', (['"""NOTSET"""'], {}), "('NOTSET')\n", (6823, 6833), True, 'from popper.cli import log as log\n'), ((9282, 9344), 'popper.config.ConfigLoader.load', 'ConfigLoader.load', ([], {'workspace_dir': '"""/w"""', 'config_file': 'config_dict'}), "(workspace_dir='/w', config_file=config_dict)\n", (9299, 9344), False, 'from popper.config import ConfigLoader\n'), ((10964, 10975), 'testfixtures.popen.MockPopen', 'MockPopen', ([], {}), '()\n', (10973, 10975), False, 'from testfixtures.popen import MockPopen\n'), ((10995, 11005), 'testfixtures.Replacer', 'Replacer', ([], {}), '()\n', (11003, 11005), False, 'from testfixtures import compare, Replacer, replace\n'), ((11146, 11168), 'popper.cli.log.setLevel', 'log.setLevel', (['"""NOTSET"""'], {}), "('NOTSET')\n", (11158, 11168), True, 'from popper.cli import log as log\n'), ((11218, 11255), 'popper.config.ConfigLoader.load', 'ConfigLoader.load', ([], {'workspace_dir': '"""/w"""'}), "(workspace_dir='/w')\n", (11235, 11255), False, 'from popper.config import ConfigLoader\n'), ((12244, 12306), 'popper.config.ConfigLoader.load', 'ConfigLoader.load', ([], {'workspace_dir': '"""/w"""', 'config_file': 'config_dict'}), "(workspace_dir='/w', config_file=config_dict)\n", (12261, 12306), False, 'from popper.config import ConfigLoader\n'), ((13378, 13440), 'popper.config.ConfigLoader.load', 'ConfigLoader.load', ([], {'workspace_dir': '"""/w"""', 'config_file': 'config_dict'}), "(workspace_dir='/w', config_file=config_dict)\n", (13395, 13440), False, 'from popper.config import ConfigLoader\n'), ((2388, 2414), 'popper.runner_slurm.SlurmRunner', 'SlurmRunner', ([], {'config': 'config'}), '(config=config)\n', (2399, 2414), False, 'from popper.runner_slurm import SlurmRunner, DockerRunner, SingularityRunner\n'), ((4486, 4508), 'popper.runner.WorkflowRunner', 'WorkflowRunner', (['config'], {}), '(config)\n', (4500, 4508), False, 'from popper.runner import WorkflowRunner\n'), ((6091, 6113), 'popper.runner.WorkflowRunner', 'WorkflowRunner', (['config'], {}), '(config)\n', (6105, 6113), False, 'from popper.runner import WorkflowRunner\n'), ((7000, 7051), 'box.Box', 'Box', (["{'args': ['-two', '-flags']}"], {'default_box': '(True)'}), "({'args': ['-two', '-flags']}, default_box=True)\n", (7003, 7051), False, 'from box import Box\n'), ((8093, 8144), 'box.Box', 'Box', (["{'args': ['-two', '-flags']}"], {'default_box': '(True)'}), "({'args': ['-two', '-flags']}, default_box=True)\n", (8096, 8144), False, 'from box import Box\n'), ((9743, 9765), 'popper.runner.WorkflowRunner', 'WorkflowRunner', (['config'], {}), '(config)\n', (9757, 9765), False, 'from popper.runner import WorkflowRunner\n'), ((11269, 11301), 'popper.runner_slurm.SingularityRunner', 'SingularityRunner', ([], {'config': 'config'}), '(config=config)\n', (11286, 11301), False, 'from popper.runner_slurm import SlurmRunner, DockerRunner, SingularityRunner\n'), ((11328, 11379), 'box.Box', 'Box', (["{'args': ['-two', '-flags']}"], {'default_box': '(True)'}), "({'args': ['-two', '-flags']}, default_box=True)\n", (11331, 11379), False, 'from box import Box\n'), ((11450, 11495), 'os.path.join', 'os.path.join', (['sr._singularity_cache', '"""c1.sif"""'], {}), "(sr._singularity_cache, 'c1.sif')\n", (11462, 11495), False, 'import os\n'), ((12321, 12353), 'popper.runner_slurm.SingularityRunner', 'SingularityRunner', ([], {'config': 'config'}), '(config=config)\n', (12338, 12353), False, 'from popper.runner_slurm import SlurmRunner, DockerRunner, SingularityRunner\n'), ((12380, 12431), 'box.Box', 'Box', (["{'args': ['-two', '-flags']}"], {'default_box': '(True)'}), "({'args': ['-two', '-flags']}, default_box=True)\n", (12383, 12431), False, 'from box import Box\n'), ((12502, 12547), 'os.path.join', 'os.path.join', (['sr._singularity_cache', '"""c2.sif"""'], {}), "(sr._singularity_cache, 'c2.sif')\n", (12514, 12547), False, 'import os\n'), ((13844, 13866), 'popper.runner.WorkflowRunner', 'WorkflowRunner', (['config'], {}), '(config)\n', (13858, 13866), False, 'from popper.runner import WorkflowRunner\n'), ((2910, 2921), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2919, 2921), False, 'import os\n'), ((3446, 3457), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3455, 3457), False, 'import os\n'), ((4839, 4876), 'popper.parser.WorkflowParser.parse', 'WorkflowParser.parse', ([], {'wf_data': 'wf_data'}), '(wf_data=wf_data)\n', (4859, 4876), False, 'from popper.parser import WorkflowParser\n'), ((6413, 6450), 'popper.parser.WorkflowParser.parse', 'WorkflowParser.parse', ([], {'wf_data': 'wf_data'}), '(wf_data=wf_data)\n', (6433, 6450), False, 'from popper.parser import WorkflowParser\n'), ((10065, 10102), 'popper.parser.WorkflowParser.parse', 'WorkflowParser.parse', ([], {'wf_data': 'wf_data'}), '(wf_data=wf_data)\n', (10085, 10102), False, 'from popper.parser import WorkflowParser\n'), ((13980, 14017), 'popper.parser.WorkflowParser.parse', 'WorkflowParser.parse', ([], {'wf_data': 'wf_data'}), '(wf_data=wf_data)\n', (14000, 14017), False, 'from popper.parser import WorkflowParser\n'), ((957, 976), 'popper.config.ConfigLoader.load', 'ConfigLoader.load', ([], {}), '()\n', (974, 976), False, 'from popper.config import ConfigLoader\n'), ((1247, 1266), 'popper.config.ConfigLoader.load', 'ConfigLoader.load', ([], {}), '()\n', (1264, 1266), False, 'from popper.config import ConfigLoader\n'), ((5015, 5026), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5024, 5026), False, 'import os\n'), ((5604, 5615), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5613, 5615), False, 'import os\n'), ((6940, 6967), 'popper.config.ConfigLoader.load', 'ConfigLoader.load', ([], {}), '(**config)\n', (6957, 6967), False, 'from popper.config import ConfigLoader\n'), ((8033, 8060), 'popper.config.ConfigLoader.load', 'ConfigLoader.load', ([], {}), '(**config)\n', (8050, 8060), False, 'from popper.config import ConfigLoader\n'), ((1478, 1489), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1487, 1489), False, 'import os\n')]
|
# Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Any, Union, Type, Callable, List, Text
import numpy as np
import tensornetwork.tensor
import tensornetwork.backends.abstract_backend as abstract_backend
from tensornetwork import backends
AbstractBackend = abstract_backend.AbstractBackend
Array = Any
Tensor = tensornetwork.tensor.Tensor
class MatvecCache:
"""
Caches matvec functions so that they have identical function signature
when called repeatedly. This circumvents extraneous recompilations when
Jit is used. Incoming matvec functions should be in terms of Tensor
and have function signature A = matvec(x, *args), where each of the
positional arguments in *args is also a Tensor.
"""
def __init__(self):
self.clear()
def clear(self):
self.cache = {}
def retrieve(self, backend_name: Text, matvec: Callable):
if backend_name not in self.cache:
self.cache[backend_name] = {}
if matvec not in self.cache[backend_name]:
def wrapped(x, *args):
X = Tensor(x, backend=backend_name)
Args = [Tensor(a, backend=backend_name) for a in args]
Y = matvec(X, *Args)
return Y.array
self.cache[backend_name][matvec] = wrapped
return self.cache[backend_name][matvec]
KRYLOV_MATVEC_CACHE = MatvecCache()
def krylov_error_checks(backend: Union[Text, AbstractBackend, None],
x0: Union[Tensor, None],
args: Union[List[Tensor], None]):
"""
Checks that at least one of backend and x0 are not None; that backend
and x0.backend agree; that if args is not None its elements are Tensors
whose backends also agree. Creates a backend object from backend
and returns the arrays housed by x0 and args.
Args:
backend: A backend, text specifying one, or None.
x0: A tn.Tensor, or None.
args: A list of tn.Tensor, or None.
Returns:
backend: A backend object.
x0_array: x0.array if x0 was supplied, or None.
args_arr: Each array in the list of args if it was supplied, or None.
"""
# If the backend wasn't specified, infer it from x0. If neither was specified
# raise ValueError.
if backend is None:
if x0 is None:
raise ValueError("One of backend or x0 must be specified.")
backend = x0.backend
else:
backend = backends.backend_factory.get_backend(backend)
# If x0 was specified, return the enclosed array. If attempting to do so
# raises AttributeError, instead raise TypeError. If backend was also
# specified, but was different than x0.backend, raise ValueError.
if x0 is not None:
try:
x0_array = x0.array
except AttributeError as err:
raise TypeError("x0 must be a tn.Tensor.") from err
if x0.backend.name != backend.name:
errstr = ("If both x0 and backend are specified the"
"backends must agree. \n"
f"x0 backend: {x0.backend.name} \n"
f"backend: {backend.name} \n")
raise ValueError(errstr)
else: # If x0 was not specified, set x0_array (the returned value) to None.
x0_array = None
# If args were specified, set the returned args_array to be all the enclosed
# arrays. If any of them raise AttributeError during the attempt, raise
# TypeError. If args was not specified, set args_array to None.
if args is not None:
try:
args_array = [a.array for a in args]
except AttributeError as err:
raise TypeError("Every element of args must be a tn.Tensor.") from err
else:
args_array = None
return (backend, x0_array, args_array)
def eigsh_lanczos(A: Callable,
backend: Optional[Union[Text, AbstractBackend]] = None,
args: Optional[List[Tensor]] = None,
x0: Optional[Tensor] = None,
shape: Optional[Tuple[int, ...]] = None,
dtype: Optional[Type[np.number]] = None,
num_krylov_vecs: int = 20,
numeig: int = 1,
tol: float = 1E-8,
delta: float = 1E-8,
ndiag: int = 20,
reorthogonalize: bool = False) -> Tuple[Tensor, List]:
"""
Lanczos method for finding the lowest eigenvector-eigenvalue pairs
of `A`.
Args:
A: A (sparse) implementation of a linear operator.
Call signature of `A` is `res = A(vector, *args)`, where `vector`
can be an arbitrary `Array`, and `res.shape` has to be `vector.shape`.
backend: A backend, text specifying one, or None.
args: A list of arguments to `A`. `A` will be called as
`res = A(x0, *args)`.
x0: An initial vector for the Lanczos algorithm. If `None`,
a random initial vector is created using the `backend.randn` method
shape: The shape of the input-dimension of `A`.
dtype: The dtype of the input `A`. If both no `x0` is provided,
a random initial state with shape `shape` and dtype `dtype` is created.
num_krylov_vecs: The number of iterations (number of krylov vectors).
numeig: The nummber of eigenvector-eigenvalue pairs to be computed.
If `numeig > 1`, `reorthogonalize` has to be `True`.
tol: The desired precision of the eigenvalus. Uses
`backend.norm(eigvalsnew[0:numeig] - eigvalsold[0:numeig]) < tol`
as stopping criterion between two diagonalization steps of the
tridiagonal operator.
delta: Stopping criterion for Lanczos iteration.
If a Krylov vector :math: `x_n` has an L2 norm
:math:`\\lVert x_n\\rVert < delta`, the iteration
is stopped. It means that an (approximate) invariant subspace has
been found.
ndiag: The tridiagonal Operator is diagonalized every `ndiag`
iterations to check convergence.
reorthogonalize: If `True`, Krylov vectors are kept orthogonal by
explicit orthogonalization (more costly than `reorthogonalize=False`)
Returns:
(eigvals, eigvecs)
eigvals: A list of `numeig` lowest eigenvalues
eigvecs: A list of `numeig` lowest eigenvectors
"""
backend, x0_array, args_array = krylov_error_checks(backend, x0, args)
mv = KRYLOV_MATVEC_CACHE.retrieve(backend.name, A)
result = backend.eigsh_lanczos(mv, args=args_array,
initial_state=x0_array,
shape=shape, dtype=dtype,
num_krylov_vecs=num_krylov_vecs, numeig=numeig,
tol=tol, delta=delta, ndiag=ndiag,
reorthogonalize=reorthogonalize)
eigvals, eigvecs = result
eigvecsT = [Tensor(ev, backend=backend) for ev in eigvecs]
return eigvals, eigvecsT
def eigs(A: Callable,
backend: Optional[Union[Text, AbstractBackend]] = None,
args: Optional[List[Tensor]] = None,
x0: Optional[Tensor] = None,
shape: Optional[Tuple[int, ...]] = None,
dtype: Optional[Type[np.number]] = None,
num_krylov_vecs: int = 20,
numeig: int = 1,
tol: float = 1E-8,
which: Text = 'LR',
maxiter: int = 20) -> Tuple[Tensor, List]:
"""
Implicitly restarted Arnoldi method for finding the lowest
eigenvector-eigenvalue pairs of a linear operator `A`.
`A` is a function implementing the matrix-vector
product.
WARNING: This routine uses jax.jit to reduce runtimes. jitting is triggered
at the first invocation of `eigs`, and on any subsequent calls
if the python `id` of `A` changes, even if the formal definition of `A`
stays the same.
Example: the following will jit once at the beginning, and then never again:
```python
import jax
import numpy as np
def A(H,x):
return jax.np.dot(H,x)
for n in range(100):
H = jax.np.array(np.random.rand(10,10))
x = jax.np.array(np.random.rand(10,10))
res = eigs(A, [H],x) #jitting is triggerd only at `n=0`
```
The following code triggers jitting at every iteration, which
results in considerably reduced performance
```python
import jax
import numpy as np
for n in range(100):
def A(H,x):
return jax.np.dot(H,x)
H = jax.np.array(np.random.rand(10,10))
x = jax.np.array(np.random.rand(10,10))
res = eigs(A, [H],x) #jitting is triggerd at every step `n`
```
Args:
A: A (sparse) implementation of a linear operator.
Call signature of `A` is `res = A(vector, *args)`, where `vector`
can be an arbitrary `Tensor`, and `res.shape` has to be `vector.shape`.
backend: A backend, text specifying one, or None.
args: A list of arguments to `A`. `A` will be called as
`res = A(initial_state, *args)`.
x0: An initial vector for the algorithm. If `None`,
a random initial `Tensor` is created using the `backend.randn` method
shape: The shape of the input-dimension of `A`.
dtype: The dtype of the input `A`. If no `initial_state` is provided,
a random initial state with shape `shape` and dtype `dtype` is created.
num_krylov_vecs: The number of iterations (number of krylov vectors).
numeig: The number of eigenvector-eigenvalue pairs to be computed.
tol: The desired precision of the eigenvalues. For the jax backend
this has currently no effect, and precision of eigenvalues is not
guaranteed. This feature may be added at a later point. To increase
precision the caller can either increase `maxiter` or `num_krylov_vecs`.
which: Flag for targetting different types of eigenvalues. Currently
supported are `which = 'LR'` (larges real part) and `which = 'LM'`
(larges magnitude).
maxiter: Maximum number of restarts. For `maxiter=0` the routine becomes
equivalent to a simple Arnoldi method.
Returns:
(eigvals, eigvecs)
eigvals: A list of `numeig` eigenvalues
eigvecs: A list of `numeig` eigenvectors
"""
backend, x0_array, args_array = krylov_error_checks(backend, x0, args)
mv = KRYLOV_MATVEC_CACHE.retrieve(backend.name, A)
result = backend.eigs(mv, args=args_array, initial_state=x0_array,
shape=shape, dtype=dtype,
num_krylov_vecs=num_krylov_vecs, numeig=numeig,
tol=tol, which=which, maxiter=maxiter)
eigvals, eigvecs = result
eigvecsT = [Tensor(eV, backend=backend) for eV in eigvecs]
return eigvals, eigvecsT
def gmres(A_mv: Callable,
b: Tensor,
A_args: Optional[List] = None,
x0: Optional[Tensor] = None,
tol: float = 1E-05,
atol: Optional[float] = None,
num_krylov_vectors: Optional[int] = None,
maxiter: Optional[int] = 1,
M: Optional[Callable] = None
) -> Tuple[Tensor, int]:
""" GMRES solves the linear system A @ x = b for x given a vector `b` and
a general (not necessarily symmetric/Hermitian) linear operator `A`.
As a Krylov method, GMRES does not require a concrete matrix representation
of the n by n `A`, but only a function
`vector1 = A_mv(vector0, *A_args, **A_kwargs)`
prescribing a one-to-one linear map from vector0 to vector1 (that is,
A must be square, and thus vector0 and vector1 the same size). If `A` is a
dense matrix, or if it is a symmetric/Hermitian operator, a different
linear solver will usually be preferable.
GMRES works by first constructing the Krylov basis
K = (x0, A_mv@x0, A_mv@A_mv@x0, ..., (A_mv^num_krylov_vectors)@x_0) and then
solving a certain dense linear system K @ q0 = q1 from whose solution x can
be approximated. For `num_krylov_vectors = n` the solution is provably exact
in infinite precision, but the expense is cubic in `num_krylov_vectors` so
one is typically interested in the `num_krylov_vectors << n` case.
The solution can in this case be repeatedly
improved, to a point, by restarting the Arnoldi iterations each time
`num_krylov_vectors` is reached. Unfortunately the optimal parameter choices
balancing expense and accuracy are difficult to predict in advance, so
applying this function requires a degree of experimentation.
In a tensor network code one is typically interested in A_mv implementing
some tensor contraction. This implementation thus allows `b` and `x0` to be
of whatever arbitrary, though identical, shape `b = A_mv(x0, ...)` expects.
Reshaping to and from a matrix problem is handled internally.
Args:
A_mv : A function `v0 = A_mv(v, *A_args, **A_kwargs)` where `v0` and
`v` have the same shape.
b : The `b` in `A @ x = b`; it should be of the shape `A_mv`
operates on.
A_args : Positional arguments to `A_mv`, supplied to this interface
as a list.
Default: None.
x0 : An optional guess solution. Zeros are used by default.
If `x0` is supplied, its shape and dtype must match those of
`b`, or an
error will be thrown.
Default: zeros.
tol, atol: Solution tolerance to achieve,
norm(residual) <= max(tol*norm(b), atol).
Default: tol=1E-05
atol=tol
num_krylov_vectors
: Size of the Krylov space to build at each restart.
Expense is cubic in this parameter. If supplied, it must be
an integer in 0 < num_krylov_vectors <= b.size.
Default: b.size.
maxiter : The Krylov space will be repeatedly rebuilt up to this many
times. Large values of this argument
should be used only with caution, since especially for nearly
symmetric matrices and small `num_krylov_vectors` convergence
might well freeze at a value significantly larger than `tol`.
Default: 1.
M : Inverse of the preconditioner of A; see the docstring for
`scipy.sparse.linalg.gmres`. This is only supported in the
numpy backend. Supplying this argument to other backends will
trigger NotImplementedError.
Default: None.
Raises:
ValueError: -if `x0` is supplied but its shape differs from that of `b`.
-in NumPy, if the ARPACK solver reports a breakdown (which
usually indicates some kind of floating point issue).
-if num_krylov_vectors is 0 or exceeds b.size.
-if tol was negative.
-if M was supplied with any backend but NumPy.
Returns:
x : The converged solution. It has the same shape as `b`.
info : 0 if convergence was achieved, the number of restarts otherwise.
"""
try:
b_array = b.array
except AttributeError as err:
raise TypeError("b must be a tn.Tensor") from err
backend, x0_array, args_array = krylov_error_checks(b.backend, x0, A_args)
mv = KRYLOV_MATVEC_CACHE.retrieve(backend.name, A_mv)
out = backend.gmres(mv, b_array, A_args=args_array,
x0=x0_array, tol=tol, atol=atol,
num_krylov_vectors=num_krylov_vectors,
maxiter=maxiter, M=M)
result, info = out
resultT = Tensor(result, backend=b.backend)
return (resultT, info)
|
[
"tensornetwork.backends.backend_factory.get_backend"
] |
[((2874, 2919), 'tensornetwork.backends.backend_factory.get_backend', 'backends.backend_factory.get_backend', (['backend'], {}), '(backend)\n', (2910, 2919), False, 'from tensornetwork import backends\n')]
|
from dataclasses import dataclass
from functools import cached_property
from pyramid.request import Request
from h.models import Organization
from h.traversal.root import Root, RootFactory
class OrganizationRoot(RootFactory):
"""Root factory for routes which deal with organizations."""
def __getitem__(self, pubid):
organization = self.request.find_service(name="organization").get_by_public_id(
pubid
)
if organization is None:
raise KeyError()
return OrganizationContext(request=self.request, organization=organization)
@dataclass
class OrganizationContext:
"""Context for organization-based views."""
request: Request
organization: Organization = None
@cached_property
def __parent__(self):
return Root(self.request)
|
[
"h.traversal.root.Root"
] |
[((804, 822), 'h.traversal.root.Root', 'Root', (['self.request'], {}), '(self.request)\n', (808, 822), False, 'from h.traversal.root import Root, RootFactory\n')]
|
import random
import sc2
from sc2.player import Bot, Computer
import protoss_agent
if __name__ == '__main__':
enemy_race = random.choice([sc2.Race.Protoss, sc2.Race.Terran, sc2.Race.Zerg, sc2.Race.Random])
sc2.run_game(sc2.maps.get("Simple128"),
[Bot(sc2.Race.Protoss, protoss_agent.ProtossRushBot()),
Computer(enemy_race, sc2.Difficulty.Easy)],
realtime=False)
|
[
"sc2.player.Computer",
"protoss_agent.ProtossRushBot",
"random.choice",
"sc2.maps.get"
] |
[((130, 217), 'random.choice', 'random.choice', (['[sc2.Race.Protoss, sc2.Race.Terran, sc2.Race.Zerg, sc2.Race.Random]'], {}), '([sc2.Race.Protoss, sc2.Race.Terran, sc2.Race.Zerg, sc2.Race.\n Random])\n', (143, 217), False, 'import random\n'), ((230, 255), 'sc2.maps.get', 'sc2.maps.get', (['"""Simple128"""'], {}), "('Simple128')\n", (242, 255), False, 'import sc2\n'), ((348, 389), 'sc2.player.Computer', 'Computer', (['enemy_race', 'sc2.Difficulty.Easy'], {}), '(enemy_race, sc2.Difficulty.Easy)\n', (356, 389), False, 'from sc2.player import Bot, Computer\n'), ((297, 327), 'protoss_agent.ProtossRushBot', 'protoss_agent.ProtossRushBot', ([], {}), '()\n', (325, 327), False, 'import protoss_agent\n')]
|
import random
if __name__ == '__main__':
sentences = []
with open('../data/crowded_300k.txt', encoding='utf-8') as f:
for line in f:
line = line.strip().split('\t')
sentences.append(line[1])
sentences.append(line[2])
random.shuffle(sentences)
sentences = sentences[: 267132]
with open('../../data/plain_text/dialogue_f.txt', 'w', encoding='utf-8') as f:
f.write('\n'.join(sentences) + '\n')
|
[
"random.shuffle"
] |
[((275, 300), 'random.shuffle', 'random.shuffle', (['sentences'], {}), '(sentences)\n', (289, 300), False, 'import random\n')]
|
#!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# <NAME>, U.S. Geological Survey
# <NAME>, GNS Science
# <NAME>, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file unittests/libtests/feassemble/data/ElasticityImplicit.py
## @brief Python application for generating C++ data files for testing
## C++ ElasticityImplicit object.
from pyre.components.Component import Component
import numpy
# ----------------------------------------------------------------------
# ElasticityImplicit class
class ElasticityImplicit(Component):
"""
Python application for generating C++ data files for testing C++
ElasticityImplicit object.
"""
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="elasticityimplicit"):
"""
Constructor.
"""
Component.__init__(self, name, facility="formulation")
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def calculateResidual(self, integrator):
"""
Calculate contribution to residual of operator for integrator.
{r} = -[K]{u(t)}
"""
K = integrator._calculateStiffnessMat()
residual = -numpy.dot(K, integrator.fieldT+integrator.fieldTIncr)
return residual.flatten()
def calculateJacobian(self, integrator):
"""
Calculate contribution to Jacobian matrix of operator for integrator.
[A] = [K]
"""
K = integrator._calculateStiffnessMat()
jacobian = K
return jacobian
# FACTORY //////////////////////////////////////////////////////////////
def formulation():
return ElasticityImplicit()
# End of file
|
[
"numpy.dot",
"pyre.components.Component.Component.__init__"
] |
[((1128, 1182), 'pyre.components.Component.Component.__init__', 'Component.__init__', (['self', 'name'], {'facility': '"""formulation"""'}), "(self, name, facility='formulation')\n", (1146, 1182), False, 'from pyre.components.Component import Component\n'), ((1485, 1540), 'numpy.dot', 'numpy.dot', (['K', '(integrator.fieldT + integrator.fieldTIncr)'], {}), '(K, integrator.fieldT + integrator.fieldTIncr)\n', (1494, 1540), False, 'import numpy\n')]
|
import numpy as np
from pprint import pprint
def cal_eigenvalues_and_eigenvectors(A):
"""
:param A: n x n Hermitian matrix
:return:
"""
eigenvalues, normed_eigenvectors = np.linalg.eig(A)
# Below two steps are redounding for readability
lmd = eigenvalues
v = normed_eigenvectors
return lmd, v
def cal_determinant(M):
return np.linalg.det(M)
def check_lemma2():
"""
lmd: short for lambda, i.e., eigenvalues.
"lambda" is not a good choice in python so I use lmd instead
v : normed_eigenvectors
:return:
"""
n = np.random.randint(low=3, high=10) # Dimension of a Hermitian matrix
C = np.matrix(np.random.rand(n, n)) # Seed Matrix
A = (C.getH() + C) # Construct Hermitian matrix
pprint("Pick a {} x {} matrix".format(n, n))
pprint(A)
lmd, v = cal_eigenvalues_and_eigenvectors(A)
pprint("Lambda Shape : {}".format(lmd.shape))
pprint("V Shape: {}".format(v.shape))
# Now pick a dimension: i
i = np.random.randint(low=1, high=n)
pprint("Pick one dimension to check : {}".format(i))
# Now pick a dimension: j
j = np.random.randint(low=0, high=n)
pprint("Pick one dimension to delete : {}".format(j))
# Now, let's compute left side of equation (2) in paper
left = v[ j - 1, i - 1] ** 2
for k in range(0, n):
if k == i - 1:
continue
left *= (lmd[i - 1] - lmd[k])
pprint("Left side equals to {}".format(left))
# Now, let's compute right side of the equation (2) in paper
right = 1
M = np.delete(A, (j - 1), axis=0)
M_j = np.delete(M, (j - 1), axis=1)
lmd_M_j, v_M_j = cal_eigenvalues_and_eigenvectors(M_j)
for k in range(0, n - 1):
right *= (lmd[i - 1] - lmd_M_j[k])
pprint("Right side equals to {}".format(right))
assert np.abs(left - right) < 1e-5, "left side {} does not equal to the right side {}.".format(left, right)
if __name__ == '__main__':
check_lemma2()
|
[
"numpy.abs",
"numpy.linalg.eig",
"numpy.random.randint",
"pprint.pprint",
"numpy.random.rand",
"numpy.linalg.det",
"numpy.delete"
] |
[((194, 210), 'numpy.linalg.eig', 'np.linalg.eig', (['A'], {}), '(A)\n', (207, 210), True, 'import numpy as np\n'), ((369, 385), 'numpy.linalg.det', 'np.linalg.det', (['M'], {}), '(M)\n', (382, 385), True, 'import numpy as np\n'), ((590, 623), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(3)', 'high': '(10)'}), '(low=3, high=10)\n', (607, 623), True, 'import numpy as np\n'), ((855, 864), 'pprint.pprint', 'pprint', (['A'], {}), '(A)\n', (861, 864), False, 'from pprint import pprint\n'), ((1046, 1078), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': 'n'}), '(low=1, high=n)\n', (1063, 1078), True, 'import numpy as np\n'), ((1175, 1207), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'n'}), '(low=0, high=n)\n', (1192, 1207), True, 'import numpy as np\n'), ((1608, 1635), 'numpy.delete', 'np.delete', (['A', '(j - 1)'], {'axis': '(0)'}), '(A, j - 1, axis=0)\n', (1617, 1635), True, 'import numpy as np\n'), ((1648, 1675), 'numpy.delete', 'np.delete', (['M', '(j - 1)'], {'axis': '(1)'}), '(M, j - 1, axis=1)\n', (1657, 1675), True, 'import numpy as np\n'), ((677, 697), 'numpy.random.rand', 'np.random.rand', (['n', 'n'], {}), '(n, n)\n', (691, 697), True, 'import numpy as np\n'), ((1875, 1895), 'numpy.abs', 'np.abs', (['(left - right)'], {}), '(left - right)\n', (1881, 1895), True, 'import numpy as np\n')]
|
import unittest
from sparkel.nlp.words import word_count
class WordsTestCase(unittest.TestCase):
def setUp(self):
self.text = u"This is an simple test case for Spark and Bazel!"
# <prefix>_<function_name>
def test_word_count(self):
expectation = 10
actual = word_count(self.text)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"sparkel.nlp.words.word_count"
] |
[((354, 369), 'unittest.main', 'unittest.main', ([], {}), '()\n', (367, 369), False, 'import unittest\n'), ((299, 320), 'sparkel.nlp.words.word_count', 'word_count', (['self.text'], {}), '(self.text)\n', (309, 320), False, 'from sparkel.nlp.words import word_count\n')]
|
# Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from importlib import import_module
import os.path
import pandas as pd
import sys
import unittest
from tests.common_functions import create_abstract_model, add_components_and_load_data
from tests.project.operations.common_functions import get_project_operational_timepoints
TEST_DATA_DIRECTORY = os.path.join(os.path.dirname(__file__), "..", "..", "test_data")
# Import prerequisite modules
PREREQUISITE_MODULE_NAMES = [
"temporal.operations.timepoints",
"temporal.operations.horizons",
"temporal.investment.periods",
"geography.load_zones",
"project",
"project.capacity.capacity",
"project.availability.availability",
"project.fuels",
"project.operations",
"project.operations.operational_types",
"project.operations.power",
"project.operations.fuel_burn",
]
NAME_OF_MODULE_BEING_TESTED = "project.operations.costs"
IMPORTED_PREREQ_MODULES = list()
for mdl in PREREQUISITE_MODULE_NAMES:
try:
imported_module = import_module("." + str(mdl), package="gridpath")
IMPORTED_PREREQ_MODULES.append(imported_module)
except ImportError:
print("ERROR! Module " + str(mdl) + " not found.")
sys.exit(1)
# Import the module we'll test
try:
MODULE_BEING_TESTED = import_module(
"." + NAME_OF_MODULE_BEING_TESTED, package="gridpath"
)
except ImportError:
print("ERROR! Couldn't import module " + NAME_OF_MODULE_BEING_TESTED + " to test.")
class TestOperationalCosts(unittest.TestCase):
""" """
def test_add_model_components(self):
"""
Test that there are no errors when adding model components
:return:
"""
create_abstract_model(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage="",
)
def test_load_model_data(self):
"""
Test that data are loaded with no errors
:return:
"""
add_components_and_load_data(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage="",
)
def test_data_loaded_correctly(self):
"""
Test that the data loaded are as expected
:return:
"""
m, data = add_components_and_load_data(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage="",
)
instance = m.create_instance(data)
# Load test data as dataframes
projects_df = pd.read_csv(
os.path.join(TEST_DATA_DIRECTORY, "inputs", "projects.tab"), sep="\t"
)
var_om_curve_df = pd.read_csv(
os.path.join(TEST_DATA_DIRECTORY, "inputs", "variable_om_curves.tab"),
sep="\t",
)
startup_by_st_df = pd.read_csv(
os.path.join(TEST_DATA_DIRECTORY, "inputs", "startup_chars.tab"), sep="\t"
)
timepoints_df = pd.read_csv(
os.path.join(TEST_DATA_DIRECTORY, "inputs", "timepoints.tab"),
sep="\t",
usecols=["timepoint", "period"],
)
# Set: VAR_OM_COST_SIMPLE_PRJ_OPR_TMPS
expected_var_om_simple_projects = sorted(
projects_df[projects_df["variable_om_cost_per_mwh"] != "."][
"project"
].tolist()
)
expected_var_om_simple_prj_tmps = get_project_operational_timepoints(
expected_var_om_simple_projects
)
actual_var_om_simple_prj_tmps = sorted(
[(p, tmp) for (p, tmp) in instance.VAR_OM_COST_SIMPLE_PRJ_OPR_TMPS]
)
self.assertListEqual(
expected_var_om_simple_prj_tmps, actual_var_om_simple_prj_tmps
)
# Set: VAR_OM_COST_CURVE_PRJS_OPR_TMPS
expected_var_om_curve_projects = sorted(
var_om_curve_df["project"].unique().tolist()
)
expected_var_om_curve_prj_tmps = get_project_operational_timepoints(
expected_var_om_curve_projects
)
actual_var_om_curve_prj_tmps = sorted(
[(p, tmp) for (p, tmp) in instance.VAR_OM_COST_CURVE_PRJS_OPR_TMPS]
)
self.assertListEqual(
expected_var_om_curve_prj_tmps, actual_var_om_curve_prj_tmps
)
# Set: VAR_OM_COST_CURVE_PRJS_OPR_TMPS_SGMS
expected_segments_by_prj_period = {
("Disp_Binary_Commit", 2020): [0, 1],
("Disp_Binary_Commit", 2030): [0],
("Disp_Cont_Commit", 2020): [0],
("Disp_Cont_Commit", 2030): [0],
}
expected_var_om_curve_prj_tmp_sgms = list()
for (prj, tmp) in expected_var_om_curve_prj_tmps:
prd = timepoints_df[timepoints_df["timepoint"] == tmp].iloc[0]["period"]
segments = expected_segments_by_prj_period[prj, prd]
for sgm in segments:
expected_var_om_curve_prj_tmp_sgms.append((prj, tmp, sgm))
actual_var_om_curve_prj_tmp_sgms = sorted(
[
(prj, tmp, sgm)
for (prj, tmp, sgm) in instance.VAR_OM_COST_CURVE_PRJS_OPR_TMPS_SGMS
]
)
self.assertListEqual(
expected_var_om_curve_prj_tmp_sgms, actual_var_om_curve_prj_tmp_sgms
)
# Set: VAR_OM_COST_ALL_PRJS_OPR_TMPS
expected_var_om_all_prj_tmps = sorted(
list(set(expected_var_om_simple_prj_tmps + expected_var_om_curve_prj_tmps))
)
actual_var_om_all_prj_tmps = sorted(
[(p, tmp) for (p, tmp) in instance.VAR_OM_COST_ALL_PRJS_OPR_TMPS]
)
self.assertListEqual(expected_var_om_all_prj_tmps, actual_var_om_all_prj_tmps)
# Set: STARTUP_COST_PRJ_OPR_TMPS
expected_startup_cost_simple_projects = sorted(
projects_df[projects_df["startup_cost_per_mw"] != "."]["project"].tolist()
)
expected_startup_by_st_projects = sorted(
startup_by_st_df["project"].unique().tolist()
)
expected_startup_cost_all_projects = sorted(
list(
set(
expected_startup_cost_simple_projects
+ expected_startup_by_st_projects
)
)
)
expected_startup_cost_all_prj_tmps = get_project_operational_timepoints(
expected_startup_cost_all_projects
)
actual_startup_cost_all_prj_tmps = sorted(
[(p, tmp) for (p, tmp) in instance.STARTUP_COST_PRJ_OPR_TMPS]
)
self.assertListEqual(
expected_startup_cost_all_prj_tmps, actual_startup_cost_all_prj_tmps
)
# Set: SHUTDOWN_COST_PRJ_OPR_TMPS
expected_shutdown_cost_projects = sorted(
projects_df[projects_df["shutdown_cost_per_mw"] != "."]["project"].tolist()
)
expected_shutdown_cost_prj_tmps = get_project_operational_timepoints(
expected_shutdown_cost_projects
)
actual_shutdown_cost_prj_tmps = sorted(
[(p, tmp) for (p, tmp) in instance.SHUTDOWN_COST_PRJ_OPR_TMPS]
)
self.assertListEqual(
expected_shutdown_cost_prj_tmps, actual_shutdown_cost_prj_tmps
)
# Set: VIOL_ALL_PRJ_OPR_TMPS
expected_ramp_up_viol_projects = sorted(
projects_df[projects_df["ramp_up_violation_penalty"] != "."][
"project"
].tolist()
)
expected_ramp_down_viol_projects = sorted(
projects_df[projects_df["ramp_down_violation_penalty"] != "."][
"project"
].tolist()
)
expected_min_up_time_viol_projects = sorted(
projects_df[projects_df["min_up_time_violation_penalty"] != "."][
"project"
].tolist()
)
expected_min_down_time_viol_projects = sorted(
projects_df[projects_df["min_down_time_violation_penalty"] != "."][
"project"
].tolist()
)
expected_opr_viol_prj_tmps = get_project_operational_timepoints(
expected_ramp_up_viol_projects
+ expected_ramp_down_viol_projects
+ expected_min_up_time_viol_projects
+ expected_min_down_time_viol_projects
)
actual_opr_viol_prj_tmps = sorted(
[(p, tmp) for (p, tmp) in instance.VIOL_ALL_PRJ_OPR_TMPS]
)
self.assertListEqual(expected_opr_viol_prj_tmps, actual_opr_viol_prj_tmps)
# Set: CURTAILMENT_COST_PRJ_OPR_TMPS
expected_curt_cost_projects = sorted(
projects_df[projects_df["curtailment_cost_per_pwh"] != "."][
"project"
].tolist()
)
expected_curt_cost_prj_tmps = get_project_operational_timepoints(
expected_curt_cost_projects
)
actual_curt_cost_prj_tmps = sorted(
[(p, tmp) for (p, tmp) in instance.CURTAILMENT_COST_PRJ_OPR_TMPS]
)
self.assertListEqual(expected_curt_cost_prj_tmps, actual_curt_cost_prj_tmps)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"importlib.import_module",
"tests.common_functions.add_components_and_load_data",
"tests.project.operations.common_functions.get_project_operational_timepoints",
"sys.exit",
"tests.common_functions.create_abstract_model"
] |
[((1842, 1910), 'importlib.import_module', 'import_module', (["('.' + NAME_OF_MODULE_BEING_TESTED)"], {'package': '"""gridpath"""'}), "('.' + NAME_OF_MODULE_BEING_TESTED, package='gridpath')\n", (1855, 1910), False, 'from importlib import import_module\n'), ((9899, 9914), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9912, 9914), False, 'import unittest\n'), ((2252, 2417), 'tests.common_functions.create_abstract_model', 'create_abstract_model', ([], {'prereq_modules': 'IMPORTED_PREREQ_MODULES', 'module_to_test': 'MODULE_BEING_TESTED', 'test_data_dir': 'TEST_DATA_DIRECTORY', 'subproblem': '""""""', 'stage': '""""""'}), "(prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED, test_data_dir=TEST_DATA_DIRECTORY,\n subproblem='', stage='')\n", (2273, 2417), False, 'from tests.common_functions import create_abstract_model, add_components_and_load_data\n'), ((2616, 2788), 'tests.common_functions.add_components_and_load_data', 'add_components_and_load_data', ([], {'prereq_modules': 'IMPORTED_PREREQ_MODULES', 'module_to_test': 'MODULE_BEING_TESTED', 'test_data_dir': 'TEST_DATA_DIRECTORY', 'subproblem': '""""""', 'stage': '""""""'}), "(prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED, test_data_dir=TEST_DATA_DIRECTORY,\n subproblem='', stage='')\n", (2644, 2788), False, 'from tests.common_functions import create_abstract_model, add_components_and_load_data\n'), ((3004, 3176), 'tests.common_functions.add_components_and_load_data', 'add_components_and_load_data', ([], {'prereq_modules': 'IMPORTED_PREREQ_MODULES', 'module_to_test': 'MODULE_BEING_TESTED', 'test_data_dir': 'TEST_DATA_DIRECTORY', 'subproblem': '""""""', 'stage': '""""""'}), "(prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED, test_data_dir=TEST_DATA_DIRECTORY,\n subproblem='', stage='')\n", (3032, 3176), False, 'from tests.common_functions import create_abstract_model, add_components_and_load_data\n'), ((4205, 4272), 'tests.project.operations.common_functions.get_project_operational_timepoints', 'get_project_operational_timepoints', (['expected_var_om_simple_projects'], {}), '(expected_var_om_simple_projects)\n', (4239, 4272), False, 'from tests.project.operations.common_functions import get_project_operational_timepoints\n'), ((4756, 4822), 'tests.project.operations.common_functions.get_project_operational_timepoints', 'get_project_operational_timepoints', (['expected_var_om_curve_projects'], {}), '(expected_var_om_curve_projects)\n', (4790, 4822), False, 'from tests.project.operations.common_functions import get_project_operational_timepoints\n'), ((7105, 7175), 'tests.project.operations.common_functions.get_project_operational_timepoints', 'get_project_operational_timepoints', (['expected_startup_cost_all_projects'], {}), '(expected_startup_cost_all_projects)\n', (7139, 7175), False, 'from tests.project.operations.common_functions import get_project_operational_timepoints\n'), ((7688, 7755), 'tests.project.operations.common_functions.get_project_operational_timepoints', 'get_project_operational_timepoints', (['expected_shutdown_cost_projects'], {}), '(expected_shutdown_cost_projects)\n', (7722, 7755), False, 'from tests.project.operations.common_functions import get_project_operational_timepoints\n'), ((8855, 9040), 'tests.project.operations.common_functions.get_project_operational_timepoints', 'get_project_operational_timepoints', (['(expected_ramp_up_viol_projects + expected_ramp_down_viol_projects +\n expected_min_up_time_viol_projects + expected_min_down_time_viol_projects)'], {}), '(expected_ramp_up_viol_projects +\n expected_ramp_down_viol_projects + expected_min_up_time_viol_projects +\n expected_min_down_time_viol_projects)\n', (8889, 9040), False, 'from tests.project.operations.common_functions import get_project_operational_timepoints\n'), ((9561, 9624), 'tests.project.operations.common_functions.get_project_operational_timepoints', 'get_project_operational_timepoints', (['expected_curt_cost_projects'], {}), '(expected_curt_cost_projects)\n', (9595, 9624), False, 'from tests.project.operations.common_functions import get_project_operational_timepoints\n'), ((1768, 1779), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1776, 1779), False, 'import sys\n')]
|
import logging
from logging.handlers import TimedRotatingFileHandler
import os
def configure_logging(name):
logging.getLogger().setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
logging.getLogger().addHandler(console_handler)
log_filename = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'../../logs/%s.log' % name
))
os.makedirs(os.path.dirname(log_filename), exist_ok=True)
file_handler = TimedRotatingFileHandler(
filename=log_filename,
when='midnight',
backupCount=int(os.environ.get('PEERSCOUT_MAX_LOG_DAYS', 842))
)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
))
logging.getLogger().addHandler(file_handler)
|
[
"os.path.abspath",
"os.path.dirname",
"logging.StreamHandler",
"logging.Formatter",
"os.environ.get",
"logging.getLogger"
] |
[((181, 204), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (202, 204), False, 'import logging\n'), ((460, 489), 'os.path.dirname', 'os.path.dirname', (['log_filename'], {}), '(log_filename)\n', (475, 489), False, 'import os\n'), ((756, 829), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (773, 829), False, 'import logging\n'), ((114, 133), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (131, 133), False, 'import logging\n'), ((252, 271), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (269, 271), False, 'import logging\n'), ((849, 868), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (866, 868), False, 'import logging\n'), ((374, 399), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (389, 399), False, 'import os\n'), ((632, 677), 'os.environ.get', 'os.environ.get', (['"""PEERSCOUT_MAX_LOG_DAYS"""', '(842)'], {}), "('PEERSCOUT_MAX_LOG_DAYS', 842)\n", (646, 677), False, 'import os\n')]
|
#!usr/bin/python3
# -*- coding: utf-8 -*-
# Included modules
import html
import codecs
import imghdr
import os
import shutil
import tempfile
from urllib.request import urlretrieve
from urllib.parse import urljoin
from hashlib import md5
# Third party modules
import requests
import bs4
from bs4 import BeautifulSoup
# Local modules
from . import clean
class NoUrlError(Exception):
def __str__(self):
return 'Chapter instance URL attribute is None'
class ResourceErrorException(Exception):
def __init__(self, url):
self.url = url
def __str__(self):
return 'Error downloading resource from ' + self.url
def get_image_type(url):
"""
获取图片的类型.
Parameters:
url(str): 图片路径.
returns:
str: 图片的类型名{'jpg', 'jpge', 'gif', 'png', None}
raises:
IOError: 图片类型不在 {'jpg', 'jpge', 'gif', 'png'} 四个类型之中
"""
# bugfix: 居然漏写了一个逗号!
for ending in ['jpg', 'jpeg', 'gif', 'png']:
if url.endswith(ending):
return ending
else:
try:
_, temp_file_name = tempfile.mkstemp()
urlretrieve(url, temp_file_name)
image_type = imghdr.what(temp_file_name)
return image_type
except IOError:
return None
def download_resource(url, path):
"""
下载资源,包装 requests
:param url: 资源完整链接
:param path: 资源完整保存地址
:return:
"""
# 文件大小
size = 0
# 请求次数
num = 0
while size == 0:
try:
# urllib.urlretrieve(image_url, full_image_file_name)
with open(path, 'wb') as f:
user_agent = r'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
request_headers = {'User-Agent': user_agent}
requests_object = requests.get(url, headers=request_headers)
try:
content = requests_object.content
# Check for empty response
f.write(content)
except AttributeError:
raise ResourceErrorException(url)
except IOError:
raise ResourceErrorException(url)
# 判断是否正确保存
size = os.path.getsize(path)
if size == 0:
os.remove(path)
# 如果获取超过十次则跳过
num += 1
if num >= 10:
break
def save_css(css_url, css_directory, css_name):
full_css_path = os.path.join(css_directory, css_name + '.css')
# 如果存在则什么也不做
if os.path.exists(full_css_path):
return
# 否则请求下载
download_resource(css_url, full_css_path)
def save_image(image_url, image_directory, image_name):
"""
保存在线图片到指定的路径, 可自定义文件名.
Parameters:
image_url (str): image路径.
image_directory (str): 保存image的路径.
image_name (str): image的文件名(无后缀).
Raises:
ResourceErrorException: 在无法保存该图片时触发该 Error.
Returns:
str: 图片的类型.
"""
image_type = get_image_type(image_url)
if image_type is None:
raise ResourceErrorException(image_url)
full_image_file_name = os.path.join(
image_directory, image_name + '.' + image_type)
# If the image is present on the local filesystem just copy it
if os.path.exists(image_url):
shutil.copy(image_url, full_image_file_name)
return image_type
# 如果存在则略过
if os.path.exists(full_image_file_name):
return image_type
# 否则下载
download_resource(image_url, full_image_file_name)
return image_type
def _replace_css(css_url, css_tag, ebook_folder, css_name=None):
try:
assert isinstance(css_tag, bs4.element.Tag)
except AssertionError:
raise TypeError("css_tag cannot be of type " + str(type(css_tag)))
if css_name is None:
css_name = md5(css_url.encode('utf-8')).hexdigest()
try:
css_dir_path = os.path.join(ebook_folder, 'css')
assert os.path.exists(css_dir_path)
save_css(css_url, css_dir_path, css_name)
css_link = 'css' + '/' + css_name + '.css'
css_tag['href'] = css_link
return css_link, css_name, 'css'
except ResourceErrorException:
css_tag.decompose()
except AssertionError:
raise ValueError(
'%s doesn\'t exist or doesn\'t contain a subdirectory css' % ebook_folder)
except TypeError:
css_tag.decompose()
def _replace_image(image_url, image_tag, ebook_folder,
image_name=None):
"""
将 image_tag 中的image下载到本地, 并将 image_tag 中img的src修改为本地src.
Parameters:
image_url (str): image的url.
image_tag (bs4.element.Tag): bs4中包含image的tag.
ebook_folder (str): 将外部图片保存到本地的地址. 内部一定要包含一个名为 "img" 的文件夹.
image_name (Option[str]): 保存到本地的imgae的文件名(不包含后缀).
Returns:
str: image本地链接地址
str: image的文件名(不包含后缀)
str: image的类型 {'jpg', 'jpge', 'gif', 'png'} .
"""
try:
assert isinstance(image_tag, bs4.element.Tag)
except AssertionError:
raise TypeError("image_tag cannot be of type " + str(type(image_tag)))
if image_name is None:
image_name = md5(image_url.encode('utf-8')).hexdigest()
try:
image_full_path = os.path.join(ebook_folder, 'img')
assert os.path.exists(image_full_path)
image_extension = save_image(image_url, image_full_path,
image_name)
image_link = 'img' + '/' + image_name + '.' + image_extension
image_tag['src'] = image_link
image_tag['href'] = image_link
return image_link, image_name, image_extension
except ResourceErrorException:
image_tag.decompose()
except AssertionError:
raise ValueError(
'%s doesn\'t exist or doesn\'t contain a subdirectory img' % ebook_folder)
except TypeError:
image_tag.decompose()
class Chapter():
"""
chapter对象类. 不能直接调用, 应该用 ChapterFactor() 去实例化chapter.
Parameters:
content (str): 章节内容. 必须为xhtml格式.
title (str): 章节标题.
url (Option[str]): 章节所在网页的URL(如果适用), 默认情况下为None.
Attributes:
content (str): 章节内容.
title (str): 章节标题.
url (str): 章节所在网页的URL(如果适用).
html_title (str): 将特殊字符替换为html安全序列的标题字符串.
"""
def __init__(self, content, title, url=None):
self._validate_input_types(content, title)
self.title = title
self.content = content
self._content_tree = BeautifulSoup(self.content, 'html.parser')
self.url = url
self.html_title = html.escape(self.title, quote=True)
self.imgs = []
self.css = []
def write(self, file_name):
"""
将chapter内容写入 xhtml文件.
Parameters:
file_name (str): 要写入xhtml文件的全名(包含后缀).
"""
try:
assert file_name[-6:] == '.xhtml'
except (AssertionError, IndexError):
raise ValueError('filename must end with .xhtml')
with open(file_name, 'w', encoding='utf-8') as f:
f.write(self.content)
def _validate_input_types(self, content, title):
try:
assert isinstance(content, str)
except AssertionError:
raise TypeError('content must be a string')
try:
assert isinstance(title, str)
except AssertionError:
raise TypeError('title must be a string')
try:
assert title != ''
except AssertionError:
raise ValueError('title cannot be empty string')
try:
assert content != ''
except AssertionError:
raise ValueError('content cannot be empty string')
def get_url(self):
if self.url is not None:
return self.url
else:
raise NoUrlError()
def _get_image_urls(self):
image_nodes = self._content_tree.find_all('img')
raw_image_urls = [node['src']
for node in image_nodes if node.has_attr('src')]
full_image_urls = [urljoin(
self.url, image_url) for image_url in raw_image_urls]
image_nodes_filtered = [
node for node in image_nodes if node.has_attr('src')]
return zip(image_nodes_filtered, full_image_urls)
def _get_css_urls(self):
css_nodes = self._content_tree.find_all("link", type='text/css')
raw_css_urls = [node['href']
for node in css_nodes if node.has_attr('href')]
full_css_urls = [urljoin(
self.url, image_url) for image_url in raw_css_urls]
css_nodes_filtered = [
node for node in css_nodes if node.has_attr('href')]
return zip(css_nodes_filtered, full_css_urls)
def _replace_css_in_chapter(self, ebook_folder):
css_url_list = self._get_css_urls()
for css_tag, css_url in css_url_list:
cssInfo = _replace_css(
css_url, css_tag, ebook_folder)
if cssInfo != None:
css_link, css_id, css_type = cssInfo
css = {'link': css_link, 'id': css_id, 'type': css_type}
if css not in self.css:
self.css.append(css)
unformatted_html_unicode_string = self._content_tree.prettify()
unformatted_html_unicode_string = unformatted_html_unicode_string.replace(
'<br>', '<br/>')
self.content = unformatted_html_unicode_string
def _replace_images_in_chapter(self, ebook_folder):
image_url_list = self._get_image_urls()
for image_tag, image_url in image_url_list:
imgInfo = _replace_image(
image_url, image_tag, ebook_folder)
if imgInfo != None:
img_link, img_id, img_type = imgInfo
img = {'link': img_link, 'id': img_id, 'type': img_type}
self.imgs.append(img)
unformatted_html_unicode_string = self._content_tree.prettify()
unformatted_html_unicode_string = unformatted_html_unicode_string.replace(
'<br>', '<br/>')
self.content = unformatted_html_unicode_string
class ChapterFactory():
"""
用来创建 chapter的类. 可以从 url, 文件 或 文本 三个方式创建 chapter.
Parameters:
clean_function (Option[function]): 用于清扫要在epub中使用的原始html 的函数. 默认情况下, 这是html2epub.clean函数.
"""
def __init__(self, clean_function=clean.clean):
self.clean_function = clean_function
user_agent = r'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0'
self.request_headers = {'User-Agent': user_agent}
def create_chapter_from_url(self, url, title=None, strict=True):
"""
从URL创建chapter对象.
从给定的url中提取网页, 使用clean_function方法对其进行清理, 并将其另存为创建的chpter的内容.
在执行任何javascript之前加载的基本网页.
Parameters:
url (string): 获取chapter对象的网页地址.
title (Option[string]): chapter的章节名, 如果为None, 则使用从网页中获取的 title标签 的内容作为章节名.
Returns:
Chapter: 一个Chapter对象, 其内容是给定url的网页.
Raises:
ValueError: 如果无法连接该url则触发此 Error.
"""
if strict is True:
self.clean_function = clean.clean
else:
self.clean_function = clean.clean_not_strict
try:
request_object = requests.get(
url, headers=self.request_headers, allow_redirects=False)
except requests.exceptions.SSLError:
raise ValueError("Url %s doesn't have valid SSL certificate" % url)
except (requests.exceptions.MissingSchema,
requests.exceptions.ConnectionError):
raise ValueError(
"%s is an invalid url or no network connection" % url)
unicode_string = request_object.text
return self.create_chapter_from_string(unicode_string, url, title)
def create_chapter_from_file(self, file_name, url=None, title=None, strict=True):
"""
从html或xhtml文件创建chapter对象.
使用clean_function方法清理文件的内容, 并将其另存为创建的chapter的内容.
Parameters:
file_name (string): 包含所创建chapter的html或xhtml内容的file_name.
url (Option[string]): A url to infer the title of the chapter from
title (Option[string]): chapter的章节名, 如果为None, 则使用从网页文件中获取的 title标签 的内容作为章节名.
Returns:
Chapter: 一个Chapter对象, 其内容是给定html或xhtml文件的内容.
"""
if strict is True:
self.clean_function = clean.clean
else:
self.clean_function = clean.clean_not_strict
with codecs.open(file_name, 'r', encoding='utf-8') as f:
content_string = f.read()
return self.create_chapter_from_string(content_string, url, title)
def create_chapter_from_string(self, html_string, url=None, title=None, strict=True):
"""
从字符串创建chapter对象.
使用clean_function方法清理字符串, 并将其另存为创建的chapter的内容.
Parameters:
html_string (string): 创建的chapter的html或xhtml内容.
url (Option[string]): 推断章节标题的url
title (Option[string]): chapter的章节名, 如果为None, 则使用从文本中获取的 title标签 的内容作为章节名.
strict : html 清洗的标准是否严格,严格(True)则需要进行过滤,非严格(False)模式直接使用原 html
Returns:
Chapter: 一个Chapter对象, 其内容是给定文本的内容.
"""
if strict is True:
self.clean_function = clean.clean
else:
self.clean_function = clean.clean_not_strict
clean_html_string = self.clean_function(html_string)
clean_xhtml_string = clean.html_to_xhtml(clean_html_string)
if title:
pass
else:
try:
root = BeautifulSoup(html_string, 'html.parser')
title_node = root.title
if title_node is not None:
title = title_node.string
else:
raise ValueError
except (IndexError, ValueError):
title = 'Ebook Chapter'
return Chapter(clean_xhtml_string, title, url)
create_chapter_from_url = ChapterFactory().create_chapter_from_url
create_chapter_from_file = ChapterFactory().create_chapter_from_file
create_chapter_from_string = ChapterFactory().create_chapter_from_string
|
[
"os.remove",
"urllib.parse.urljoin",
"codecs.open",
"tempfile.mkstemp",
"os.path.getsize",
"os.path.exists",
"imghdr.what",
"urllib.request.urlretrieve",
"requests.get",
"bs4.BeautifulSoup",
"os.path.join",
"html.escape",
"shutil.copy"
] |
[((2548, 2594), 'os.path.join', 'os.path.join', (['css_directory', "(css_name + '.css')"], {}), "(css_directory, css_name + '.css')\n", (2560, 2594), False, 'import os\n'), ((2621, 2650), 'os.path.exists', 'os.path.exists', (['full_css_path'], {}), '(full_css_path)\n', (2635, 2650), False, 'import os\n'), ((3231, 3291), 'os.path.join', 'os.path.join', (['image_directory', "(image_name + '.' + image_type)"], {}), "(image_directory, image_name + '.' + image_type)\n", (3243, 3291), False, 'import os\n'), ((3378, 3403), 'os.path.exists', 'os.path.exists', (['image_url'], {}), '(image_url)\n', (3392, 3403), False, 'import os\n'), ((3509, 3545), 'os.path.exists', 'os.path.exists', (['full_image_file_name'], {}), '(full_image_file_name)\n', (3523, 3545), False, 'import os\n'), ((2317, 2338), 'os.path.getsize', 'os.path.getsize', (['path'], {}), '(path)\n', (2332, 2338), False, 'import os\n'), ((3414, 3458), 'shutil.copy', 'shutil.copy', (['image_url', 'full_image_file_name'], {}), '(image_url, full_image_file_name)\n', (3425, 3458), False, 'import shutil\n'), ((4023, 4056), 'os.path.join', 'os.path.join', (['ebook_folder', '"""css"""'], {}), "(ebook_folder, 'css')\n", (4035, 4056), False, 'import os\n'), ((4073, 4101), 'os.path.exists', 'os.path.exists', (['css_dir_path'], {}), '(css_dir_path)\n', (4087, 4101), False, 'import os\n'), ((5390, 5423), 'os.path.join', 'os.path.join', (['ebook_folder', '"""img"""'], {}), "(ebook_folder, 'img')\n", (5402, 5423), False, 'import os\n'), ((5440, 5471), 'os.path.exists', 'os.path.exists', (['image_full_path'], {}), '(image_full_path)\n', (5454, 5471), False, 'import os\n'), ((6664, 6706), 'bs4.BeautifulSoup', 'BeautifulSoup', (['self.content', '"""html.parser"""'], {}), "(self.content, 'html.parser')\n", (6677, 6706), False, 'from bs4 import BeautifulSoup\n'), ((6758, 6793), 'html.escape', 'html.escape', (['self.title'], {'quote': '(True)'}), '(self.title, quote=True)\n', (6769, 6793), False, 'import html\n'), ((1128, 1146), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (1144, 1146), False, 'import tempfile\n'), ((1160, 1192), 'urllib.request.urlretrieve', 'urlretrieve', (['url', 'temp_file_name'], {}), '(url, temp_file_name)\n', (1171, 1192), False, 'from urllib.request import urlretrieve\n'), ((1219, 1246), 'imghdr.what', 'imghdr.what', (['temp_file_name'], {}), '(temp_file_name)\n', (1230, 1246), False, 'import imghdr\n'), ((2375, 2390), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (2384, 2390), False, 'import os\n'), ((8274, 8302), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'image_url'], {}), '(self.url, image_url)\n', (8281, 8302), False, 'from urllib.parse import urljoin\n'), ((8755, 8783), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'image_url'], {}), '(self.url, image_url)\n', (8762, 8783), False, 'from urllib.parse import urljoin\n'), ((11588, 11658), 'requests.get', 'requests.get', (['url'], {'headers': 'self.request_headers', 'allow_redirects': '(False)'}), '(url, headers=self.request_headers, allow_redirects=False)\n', (11600, 11658), False, 'import requests\n'), ((12846, 12891), 'codecs.open', 'codecs.open', (['file_name', '"""r"""'], {'encoding': '"""utf-8"""'}), "(file_name, 'r', encoding='utf-8')\n", (12857, 12891), False, 'import codecs\n'), ((1908, 1950), 'requests.get', 'requests.get', (['url'], {'headers': 'request_headers'}), '(url, headers=request_headers)\n', (1920, 1950), False, 'import requests\n'), ((13945, 13986), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_string', '"""html.parser"""'], {}), "(html_string, 'html.parser')\n", (13958, 13986), False, 'from bs4 import BeautifulSoup\n')]
|
from datetime import date
from . import GenericCalendarTest
from ..africa.mozambique import Mozambique
class MozambiqueTest(GenericCalendarTest):
cal_class = Mozambique
def test_year_new_year_shift(self):
holidays = self.cal.holidays_set(2019)
self.assertIn(date(2019, 1, 1), holidays)
self.assertNotIn(date(2019, 1, 2), holidays)
holidays = self.cal.holidays_set(2020)
self.assertIn(date(2020, 1, 1), holidays)
self.assertNotIn(date(2020, 1, 2), holidays)
def test_n_holidays(self):
n_holidays = len(self.cal.holidays_set(2019))
for holiday in self.cal.get_calendar_holidays(2020):
print(holiday)
assert n_holidays == 10
def test_year_2018(self):
holidays = self.cal.holidays_set(2018)
# Fixed days section:
# 1. New Year's Day
self.assertIn(date(2018, 1, 1), holidays)
# 2. Mozambican Heroes' Day
self.assertIn(date(2018, 2, 3), holidays)
# 3. Mozambican Women's Day
self.assertIn(date(2018, 4, 7), holidays)
# 4. Good Friday
self.assertIn(date(2018, 3, 30), holidays)
# 5. Labour Day
self.assertIn(date(2018, 5, 1), holidays)
# 6. Independence Day
self.assertIn(date(2018, 6, 25), holidays)
# 7. Victory Day
self.assertIn(date(2018, 9, 7), holidays)
# 8. Armed Forces Day
self.assertIn(date(2018, 9, 25), holidays)
# 9. Peace And Reconciliation Day
self.assertIn(date(2018, 10, 4), holidays)
# 10. Christmas day
self.assertIn(date(2018, 12, 25), holidays)
def test_year_2019(self):
holidays = self.cal.holidays_set(2019)
# Fixed days section:
# 1. New Year's Day
self.assertIn(date(2019, 1, 1), holidays)
# 2. Mozambican Heroes' Day
self.assertIn(date(2019, 2, 3), holidays)
# 3. Mozambican Women's Day
self.assertIn(date(2019, 4, 7), holidays)
# 4. Good Friday
self.assertIn(date(2019, 4, 19), holidays)
# 5. Labour Day
self.assertIn(date(2019, 5, 1), holidays)
# 6. Independence Day
self.assertIn(date(2019, 6, 25), holidays)
# 7. Victory Day
self.assertIn(date(2019, 9, 7), holidays)
# 8. Armed Forces Day
self.assertIn(date(2019, 9, 25), holidays)
# 9. Peace And Reconciliation Day
self.assertIn(date(2019, 10, 4), holidays)
# 10. Christmas day
self.assertIn(date(2019, 12, 25), holidays)
def test_year_2020(self):
holidays = self.cal.holidays_set(2020)
# Fixed days section:
# 1. New Year's Day
self.assertIn(date(2020, 1, 1), holidays)
# 2. Mozambican Heroes' Day
self.assertIn(date(2020, 2, 3), holidays)
# 3. Mozambican Women's Day
self.assertIn(date(2020, 4, 7), holidays)
# 4. Good Friday
self.assertIn(date(2020, 4, 10), holidays)
# 5. Labour Day
self.assertIn(date(2020, 5, 1), holidays)
# 6. Independence Day
self.assertIn(date(2020, 6, 25), holidays)
# 7. Victory Day
self.assertIn(date(2020, 9, 7), holidays)
# 8. Armed Forces Day
self.assertIn(date(2020, 9, 25), holidays)
# 9. Peace And Reconciliation Day
self.assertIn(date(2020, 10, 4), holidays)
# 10. Christmas day
self.assertIn(date(2020, 12, 25), holidays)
def test_2020_new_years_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 1, 1)], "New year")
def test_2020_heroes_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 2, 3)], "Mozambican Heroes' Day")
def test_2020_women_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 4, 7)], "Mozambican Women's Day")
def test_2020_good_friday_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 4, 10)], "Good Friday")
def test_2020_labour_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 5, 1)], "Labour Day")
def test_2020_independence_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 6, 25)], "Independence Day")
def test_2020_victory_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 9, 7)], "Victory Day")
def test_2020_armed_forces_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 9, 25)], "Armed Forces Day")
def test_2020_peace_and_reconciliation_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 10, 4)], "Peace And Reconciliation Day")
def test_2020_christmas_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
self.assertEqual(
holidays[date(2020, 12, 25)], "Christmas Day")
|
[
"datetime.date"
] |
[((286, 302), 'datetime.date', 'date', (['(2019)', '(1)', '(1)'], {}), '(2019, 1, 1)\n', (290, 302), False, 'from datetime import date\n'), ((339, 355), 'datetime.date', 'date', (['(2019)', '(1)', '(2)'], {}), '(2019, 1, 2)\n', (343, 355), False, 'from datetime import date\n'), ((436, 452), 'datetime.date', 'date', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (440, 452), False, 'from datetime import date\n'), ((489, 505), 'datetime.date', 'date', (['(2020)', '(1)', '(2)'], {}), '(2020, 1, 2)\n', (493, 505), False, 'from datetime import date\n'), ((881, 897), 'datetime.date', 'date', (['(2018)', '(1)', '(1)'], {}), '(2018, 1, 1)\n', (885, 897), False, 'from datetime import date\n'), ((967, 983), 'datetime.date', 'date', (['(2018)', '(2)', '(3)'], {}), '(2018, 2, 3)\n', (971, 983), False, 'from datetime import date\n'), ((1053, 1069), 'datetime.date', 'date', (['(2018)', '(4)', '(7)'], {}), '(2018, 4, 7)\n', (1057, 1069), False, 'from datetime import date\n'), ((1128, 1145), 'datetime.date', 'date', (['(2018)', '(3)', '(30)'], {}), '(2018, 3, 30)\n', (1132, 1145), False, 'from datetime import date\n'), ((1203, 1219), 'datetime.date', 'date', (['(2018)', '(5)', '(1)'], {}), '(2018, 5, 1)\n', (1207, 1219), False, 'from datetime import date\n'), ((1283, 1300), 'datetime.date', 'date', (['(2018)', '(6)', '(25)'], {}), '(2018, 6, 25)\n', (1287, 1300), False, 'from datetime import date\n'), ((1359, 1375), 'datetime.date', 'date', (['(2018)', '(9)', '(7)'], {}), '(2018, 9, 7)\n', (1363, 1375), False, 'from datetime import date\n'), ((1439, 1456), 'datetime.date', 'date', (['(2018)', '(9)', '(25)'], {}), '(2018, 9, 25)\n', (1443, 1456), False, 'from datetime import date\n'), ((1532, 1549), 'datetime.date', 'date', (['(2018)', '(10)', '(4)'], {}), '(2018, 10, 4)\n', (1536, 1549), False, 'from datetime import date\n'), ((1611, 1629), 'datetime.date', 'date', (['(2018)', '(12)', '(25)'], {}), '(2018, 12, 25)\n', (1615, 1629), False, 'from datetime import date\n'), ((1799, 1815), 'datetime.date', 'date', (['(2019)', '(1)', '(1)'], {}), '(2019, 1, 1)\n', (1803, 1815), False, 'from datetime import date\n'), ((1885, 1901), 'datetime.date', 'date', (['(2019)', '(2)', '(3)'], {}), '(2019, 2, 3)\n', (1889, 1901), False, 'from datetime import date\n'), ((1971, 1987), 'datetime.date', 'date', (['(2019)', '(4)', '(7)'], {}), '(2019, 4, 7)\n', (1975, 1987), False, 'from datetime import date\n'), ((2046, 2063), 'datetime.date', 'date', (['(2019)', '(4)', '(19)'], {}), '(2019, 4, 19)\n', (2050, 2063), False, 'from datetime import date\n'), ((2121, 2137), 'datetime.date', 'date', (['(2019)', '(5)', '(1)'], {}), '(2019, 5, 1)\n', (2125, 2137), False, 'from datetime import date\n'), ((2201, 2218), 'datetime.date', 'date', (['(2019)', '(6)', '(25)'], {}), '(2019, 6, 25)\n', (2205, 2218), False, 'from datetime import date\n'), ((2277, 2293), 'datetime.date', 'date', (['(2019)', '(9)', '(7)'], {}), '(2019, 9, 7)\n', (2281, 2293), False, 'from datetime import date\n'), ((2357, 2374), 'datetime.date', 'date', (['(2019)', '(9)', '(25)'], {}), '(2019, 9, 25)\n', (2361, 2374), False, 'from datetime import date\n'), ((2450, 2467), 'datetime.date', 'date', (['(2019)', '(10)', '(4)'], {}), '(2019, 10, 4)\n', (2454, 2467), False, 'from datetime import date\n'), ((2529, 2547), 'datetime.date', 'date', (['(2019)', '(12)', '(25)'], {}), '(2019, 12, 25)\n', (2533, 2547), False, 'from datetime import date\n'), ((2717, 2733), 'datetime.date', 'date', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (2721, 2733), False, 'from datetime import date\n'), ((2803, 2819), 'datetime.date', 'date', (['(2020)', '(2)', '(3)'], {}), '(2020, 2, 3)\n', (2807, 2819), False, 'from datetime import date\n'), ((2889, 2905), 'datetime.date', 'date', (['(2020)', '(4)', '(7)'], {}), '(2020, 4, 7)\n', (2893, 2905), False, 'from datetime import date\n'), ((2964, 2981), 'datetime.date', 'date', (['(2020)', '(4)', '(10)'], {}), '(2020, 4, 10)\n', (2968, 2981), False, 'from datetime import date\n'), ((3039, 3055), 'datetime.date', 'date', (['(2020)', '(5)', '(1)'], {}), '(2020, 5, 1)\n', (3043, 3055), False, 'from datetime import date\n'), ((3119, 3136), 'datetime.date', 'date', (['(2020)', '(6)', '(25)'], {}), '(2020, 6, 25)\n', (3123, 3136), False, 'from datetime import date\n'), ((3195, 3211), 'datetime.date', 'date', (['(2020)', '(9)', '(7)'], {}), '(2020, 9, 7)\n', (3199, 3211), False, 'from datetime import date\n'), ((3275, 3292), 'datetime.date', 'date', (['(2020)', '(9)', '(25)'], {}), '(2020, 9, 25)\n', (3279, 3292), False, 'from datetime import date\n'), ((3368, 3385), 'datetime.date', 'date', (['(2020)', '(10)', '(4)'], {}), '(2020, 10, 4)\n', (3372, 3385), False, 'from datetime import date\n'), ((3447, 3465), 'datetime.date', 'date', (['(2020)', '(12)', '(25)'], {}), '(2020, 12, 25)\n', (3451, 3465), False, 'from datetime import date\n'), ((3647, 3663), 'datetime.date', 'date', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (3651, 3663), False, 'from datetime import date\n'), ((3845, 3861), 'datetime.date', 'date', (['(2020)', '(2)', '(3)'], {}), '(2020, 2, 3)\n', (3849, 3861), False, 'from datetime import date\n'), ((4056, 4072), 'datetime.date', 'date', (['(2020)', '(4)', '(7)'], {}), '(2020, 4, 7)\n', (4060, 4072), False, 'from datetime import date\n'), ((4269, 4286), 'datetime.date', 'date', (['(2020)', '(4)', '(10)'], {}), '(2020, 4, 10)\n', (4273, 4286), False, 'from datetime import date\n'), ((4471, 4487), 'datetime.date', 'date', (['(2020)', '(5)', '(1)'], {}), '(2020, 5, 1)\n', (4475, 4487), False, 'from datetime import date\n'), ((4677, 4694), 'datetime.date', 'date', (['(2020)', '(6)', '(25)'], {}), '(2020, 6, 25)\n', (4681, 4694), False, 'from datetime import date\n'), ((4885, 4901), 'datetime.date', 'date', (['(2020)', '(9)', '(7)'], {}), '(2020, 9, 7)\n', (4889, 4901), False, 'from datetime import date\n'), ((5092, 5109), 'datetime.date', 'date', (['(2020)', '(9)', '(25)'], {}), '(2020, 9, 25)\n', (5096, 5109), False, 'from datetime import date\n'), ((5317, 5334), 'datetime.date', 'date', (['(2020)', '(10)', '(4)'], {}), '(2020, 10, 4)\n', (5321, 5334), False, 'from datetime import date\n'), ((5539, 5557), 'datetime.date', 'date', (['(2020)', '(12)', '(25)'], {}), '(2020, 12, 25)\n', (5543, 5557), False, 'from datetime import date\n')]
|
# yellowbrick.utils.helpers
# Helper functions and generic utilities for use in Yellowbrick code.
#
# Author: <NAME> <<EMAIL>>
# Created: Fri May 19 10:39:30 2017 -0700
#
# Copyright (C) 2017 District Data Labs
# For license information, see LICENSE.txt
#
# ID: helpers.py [79cd8cf] <EMAIL> $
"""
Helper functions and generic utilities for use in Yellowbrick code.
"""
##########################################################################
## Imports
##########################################################################
import re
import numpy as np
from sklearn.pipeline import Pipeline
from .types import is_estimator
from yellowbrick.exceptions import YellowbrickTypeError
##########################################################################
## Model and Feature Information
##########################################################################
def get_model_name(model):
"""
Detects the model name for a Scikit-Learn model or pipeline.
Parameters
----------
model: class or instance
The object to determine the name for. If the model is an estimator it
returns the class name; if it is a Pipeline it returns the class name
of the final transformer or estimator in the Pipeline.
Returns
-------
name : string
The name of the model or pipeline.
"""
if not is_estimator(model):
raise YellowbrickTypeError(
"Cannot detect the model name for non estimator: '{}'".format(
type(model)
)
)
else:
if isinstance(model, Pipeline):
return get_model_name(model.steps[-1][-1])
else:
return model.__class__.__name__
def has_ndarray_int_columns(features, X):
""" Checks if numeric feature columns exist in ndarray """
_, ncols = X.shape
if not all(d.isdigit() for d in features if isinstance(d, str)) or not isinstance(X, np.ndarray):
return False
ndarray_columns = np.arange(0, ncols)
feature_cols = np.unique([int(d) for d in features])
return all(np.in1d(feature_cols, ndarray_columns))
# Alias for closer name to isinstance and issubclass
hasndarrayintcolumns = has_ndarray_int_columns
def is_monotonic(a, increasing=True):
"""
Tests whether a vector a has monotonicity.
Parameters
----------
a : array-like
Array that should be tested for monotonicity
increasing : bool, default: True
Test if the array is montonically increasing, otherwise test if the
array is montonically decreasing.
"""
a = np.asarray(a) # ensure a is array-like
if a.ndim > 1:
raise ValueError("not supported for multi-dimensonal arrays")
if len(a) <= 1:
return True
if increasing:
return np.all(a[1:] >= a[:-1], axis=0)
return np.all(a[1:] <= a[:-1], axis=0)
##########################################################################
## Numeric Computations
##########################################################################
#From here: http://stackoverflow.com/questions/26248654/numpy-return-0-with-divide-by-zero
def div_safe( numerator, denominator ):
"""
Ufunc-extension that returns 0 instead of nan when dividing numpy arrays
Parameters
----------
numerator: array-like
denominator: scalar or array-like that can be validly divided by the numerator
returns a numpy array
example: div_safe( [-1, 0, 1], 0 ) == [0, 0, 0]
"""
#First handle scalars
if np.isscalar(numerator):
raise ValueError("div_safe should only be used with an array-like numerator")
#Then numpy arrays
try:
with np.errstate(divide='ignore', invalid='ignore'):
result = np.true_divide( numerator, denominator )
result[ ~ np.isfinite( result )] = 0 # -inf inf NaN
return result
except ValueError as e:
raise e
##########################################################################
## String Computations
##########################################################################
def slugify(text):
"""
Returns a slug of given text, normalizing unicode data for file-safe
strings. Used for deciding where to write images to disk.
Parameters
----------
text : string
The string to slugify
Returns
-------
slug : string
A normalized slug representation of the text
.. seealso:: http://yashchandra.com/2014/05/08/how-to-generate-clean-url-or-a-slug-in-python/
"""
slug = re.sub(r'[^\w]+', ' ', text)
slug = "-".join(slug.lower().strip().split())
return slug
|
[
"numpy.true_divide",
"numpy.isscalar",
"numpy.asarray",
"numpy.isfinite",
"numpy.errstate",
"numpy.arange",
"re.sub",
"numpy.all",
"numpy.in1d"
] |
[((1986, 2005), 'numpy.arange', 'np.arange', (['(0)', 'ncols'], {}), '(0, ncols)\n', (1995, 2005), True, 'import numpy as np\n'), ((2589, 2602), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (2599, 2602), True, 'import numpy as np\n'), ((2838, 2869), 'numpy.all', 'np.all', (['(a[1:] <= a[:-1])'], {'axis': '(0)'}), '(a[1:] <= a[:-1], axis=0)\n', (2844, 2869), True, 'import numpy as np\n'), ((3525, 3547), 'numpy.isscalar', 'np.isscalar', (['numerator'], {}), '(numerator)\n', (3536, 3547), True, 'import numpy as np\n'), ((4553, 4581), 're.sub', 're.sub', (['"""[^\\\\w]+"""', '""" """', 'text'], {}), "('[^\\\\w]+', ' ', text)\n", (4559, 4581), False, 'import re\n'), ((2078, 2116), 'numpy.in1d', 'np.in1d', (['feature_cols', 'ndarray_columns'], {}), '(feature_cols, ndarray_columns)\n', (2085, 2116), True, 'import numpy as np\n'), ((2795, 2826), 'numpy.all', 'np.all', (['(a[1:] >= a[:-1])'], {'axis': '(0)'}), '(a[1:] >= a[:-1], axis=0)\n', (2801, 2826), True, 'import numpy as np\n'), ((3681, 3727), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (3692, 3727), True, 'import numpy as np\n'), ((3750, 3788), 'numpy.true_divide', 'np.true_divide', (['numerator', 'denominator'], {}), '(numerator, denominator)\n', (3764, 3788), True, 'import numpy as np\n'), ((3813, 3832), 'numpy.isfinite', 'np.isfinite', (['result'], {}), '(result)\n', (3824, 3832), True, 'import numpy as np\n')]
|
import os
def str2bool(v):
if v is None or isinstance(v, bool):
return v
return v.lower() in ("yes", "true", "t", "1")
def str2int(v):
if v is None:
return v
if v == "":
return None
return int(v)
def str2float(v):
if v is None:
return v
return int(v)
class Base:
# ------------------- need config ---------------------
DATABASE_MYSQL_URL = os.getenv("DATABASE_MYSQL_URL", "root:dSSALHwSsCiXzPr@192.168.0.126:3306/fastapi")
# ------------------- option ---------------------
CONFIG_NAME = "BASE"
SERVICE_NAME = os.getenv("SERVICE_NAME", "fastapi-web-template")
TZ = os.getenv("TZ", "Asia/Shanghai")
TOKEN_SECRET_KEY = os.getenv("TOKEN_SECRET_KEY", "tokensecretkey")
# db
DATABASE_URL = os.getenv("DATABASE_URL", f"mysql+aiomysql://{DATABASE_MYSQL_URL}?charset=utf8mb4")
SHOW_SQL = str2bool(os.getenv("SHOW_SQL", "False"))
RETURN_SQL = str2bool(os.getenv("RETURN_SQL", "True"))
DATABASE_URL_ENCODING = os.getenv("DATABASE_URL_ENCODING", "utf8mb4")
DB_POOL_RECYCLE = str2int(os.getenv("DB_POOL_RECYCLE", 3600))
DB_MAX_OVERFLOW = str2int(os.getenv("DB_MAX_OVERFLOW", 20))
DB_POOL_SIZE = str2int(os.getenv("DB_POOL_SIZE", 5))
|
[
"os.getenv"
] |
[((416, 502), 'os.getenv', 'os.getenv', (['"""DATABASE_MYSQL_URL"""', '"""root:dSSALHwSsCiXzPr@192.168.0.126:3306/fastapi"""'], {}), "('DATABASE_MYSQL_URL',\n 'root:dSSALHwSsCiXzPr@192.168.0.126:3306/fastapi')\n", (425, 502), False, 'import os\n'), ((599, 648), 'os.getenv', 'os.getenv', (['"""SERVICE_NAME"""', '"""fastapi-web-template"""'], {}), "('SERVICE_NAME', 'fastapi-web-template')\n", (608, 648), False, 'import os\n'), ((659, 691), 'os.getenv', 'os.getenv', (['"""TZ"""', '"""Asia/Shanghai"""'], {}), "('TZ', 'Asia/Shanghai')\n", (668, 691), False, 'import os\n'), ((716, 763), 'os.getenv', 'os.getenv', (['"""TOKEN_SECRET_KEY"""', '"""tokensecretkey"""'], {}), "('TOKEN_SECRET_KEY', 'tokensecretkey')\n", (725, 763), False, 'import os\n'), ((793, 880), 'os.getenv', 'os.getenv', (['"""DATABASE_URL"""', 'f"""mysql+aiomysql://{DATABASE_MYSQL_URL}?charset=utf8mb4"""'], {}), "('DATABASE_URL',\n f'mysql+aiomysql://{DATABASE_MYSQL_URL}?charset=utf8mb4')\n", (802, 880), False, 'import os\n'), ((1020, 1065), 'os.getenv', 'os.getenv', (['"""DATABASE_URL_ENCODING"""', '"""utf8mb4"""'], {}), "('DATABASE_URL_ENCODING', 'utf8mb4')\n", (1029, 1065), False, 'import os\n'), ((901, 931), 'os.getenv', 'os.getenv', (['"""SHOW_SQL"""', '"""False"""'], {}), "('SHOW_SQL', 'False')\n", (910, 931), False, 'import os\n'), ((959, 990), 'os.getenv', 'os.getenv', (['"""RETURN_SQL"""', '"""True"""'], {}), "('RETURN_SQL', 'True')\n", (968, 990), False, 'import os\n'), ((1097, 1131), 'os.getenv', 'os.getenv', (['"""DB_POOL_RECYCLE"""', '(3600)'], {}), "('DB_POOL_RECYCLE', 3600)\n", (1106, 1131), False, 'import os\n'), ((1163, 1195), 'os.getenv', 'os.getenv', (['"""DB_MAX_OVERFLOW"""', '(20)'], {}), "('DB_MAX_OVERFLOW', 20)\n", (1172, 1195), False, 'import os\n'), ((1224, 1252), 'os.getenv', 'os.getenv', (['"""DB_POOL_SIZE"""', '(5)'], {}), "('DB_POOL_SIZE', 5)\n", (1233, 1252), False, 'import os\n')]
|
from cryptojwt.utils import as_bytes
def get_session_status_page(service_context, looked_for_state):
"""
Constructs the session status check page
:param service_context: The relying party's service context
:param looked_for_state: Expecting state to be ? (changed/unchanged)
"""
_msg = open(service_context.add_on['status_check']['template_file']).read()
_csi = service_context.get('provider_info')['check_session_iframe']
_mod_msg = _msg.replace("{check_session_iframe}", _csi)
if looked_for_state == "changed":
_mod_msg = _mod_msg.replace(
"{status_check_iframe}",
service_context.add_on['status_check']['session_changed_iframe'])
else:
_mod_msg = _mod_msg.replace(
"{status_check_iframe}",
service_context.add_on['status_check']['session_unchanged_iframe'])
return as_bytes(_mod_msg)
def add_status_check_support(service, rp_iframe_path, template_file="",
session_changed_iframe_path="", session_unchanged_iframe_path=""):
"""
Setup status check support.
:param service: Dictionary of services
:param template_file: Name of template file
"""
# Arbitrary which service is used, just want a link to the service context
authn_service = service["authorization"]
authn_service.service_context.add_on['status_check'] = {
"template_file": template_file,
"rp_iframe_path": rp_iframe_path,
"session_changed_iframe": session_changed_iframe_path,
"session_unchanged_iframe": session_unchanged_iframe_path,
# below are functions
# "rp_iframe": rp_iframe,
"get_session_status_page": get_session_status_page
}
|
[
"cryptojwt.utils.as_bytes"
] |
[((880, 898), 'cryptojwt.utils.as_bytes', 'as_bytes', (['_mod_msg'], {}), '(_mod_msg)\n', (888, 898), False, 'from cryptojwt.utils import as_bytes\n')]
|
from rlxp.envs import GridWorld
from rlxp.rendering import render_env2d
env = GridWorld(7, 10, walls=((2,2), (3,3)))
env.enable_rendering()
for tt in range(50):
env.step(env.action_space.sample())
render_env2d(env)
|
[
"rlxp.envs.GridWorld",
"rlxp.rendering.render_env2d"
] |
[((80, 120), 'rlxp.envs.GridWorld', 'GridWorld', (['(7)', '(10)'], {'walls': '((2, 2), (3, 3))'}), '(7, 10, walls=((2, 2), (3, 3)))\n', (89, 120), False, 'from rlxp.envs import GridWorld\n'), ((203, 220), 'rlxp.rendering.render_env2d', 'render_env2d', (['env'], {}), '(env)\n', (215, 220), False, 'from rlxp.rendering import render_env2d\n')]
|
import random, pylab
random.seed(1)
def getMeanAndStd(X):
mean = sum(X)/float(len(X))
tot = 0.0
for x in X:
tot += (x - mean)**2
std = (tot/len(X))**0.5
return mean, std
# GENERATING NORMALLY DISTRIBUTED DATA
#==============================================================================
# dist, numSamples = [], 1000000
#
# for i in range(numSamples):
# dist.append(random.gauss(0, 100))
# # 0 is the mean, and 100 is the standard deviation
#
#
# weights = [1/numSamples]*len(dist)
# v = pylab.hist(dist, bins = 100,
# weights = [1/numSamples]*len(dist))
#
# pylab.xlabel('x')
# pylab.ylabel('Relative Frequency')
#
# print('Fraction within ~200 of mean =', sum(v[0][30:70]))
#
#==============================================================================
def gaussian(x, mu, sigma):
factor1 = (1.0/(sigma*((2*pylab.pi)**0.5)))
factor2 = pylab.e**-(((x-mu)**2)/(2*sigma**2))
return factor1*factor2
#==============================================================================
# xVals, yVals = [], []
# mu, sigma = 0, 1
# x = -4
# while x <= 4:
# xVals.append(x)
# yVals.append(gaussian(x, mu, sigma))
# x += 0.05
# pylab.plot(xVals, yVals)
# pylab.title('Normal Distribution, mu = ' + str(mu) + ', sigma = ' + str(sigma))
# # W rezultacie uzyskalimydystrybuantę czyli pochodną funkcji rozkładu prawdopodobieństwa
#==============================================================================
import scipy.integrate
def checkEmpirical(numTrials):
for t in range(numTrials):
mu = random.randint(-10, 10)
sigma = random.randint(1, 10)
print('For mu =', mu, 'and sigma =', sigma)
for numStd in (1, 1.96, 3):
area = scipy.integrate.quad(gaussian,
mu-numStd*sigma,
mu+numStd*sigma,
(mu, sigma))[0]
print(' Fraction within', numStd, 'std =', round(area, 4))
# TEST CENTRAL LIMIT THEOREM
def plotMeans(numDice, numRolls, numBins, legend, color, style):
means = []
for i in range(numRolls//numDice):
vals = 0
for j in range(numDice):
vals += 5*random.random()
means.append(vals/float(numDice))
pylab.hist(means, numBins, color = color, label = legend,
weights = [1/len(means)]*len(means),
hatch = style)
return getMeanAndStd(means)
mean, std = plotMeans(1, 1000000, 19, '1 die', 'b', '*')
print('Mean of rolling 1 die =', str(mean) + ',', 'Std =', std)
mean, std = plotMeans(50, 1000000, 19, 'Mean of 50 dice', 'r', '//')
print('Mean of rolling 50 dice =', str(mean) + ',', 'Std =', std)
pylab.title('Rolling Continuous Dice')
pylab.xlabel('Value')
pylab.ylabel('Probability')
pylab.legend()
|
[
"pylab.title",
"random.randint",
"pylab.ylabel",
"random.random",
"random.seed",
"pylab.xlabel",
"pylab.legend"
] |
[((21, 35), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (32, 35), False, 'import random, pylab\n'), ((2776, 2814), 'pylab.title', 'pylab.title', (['"""Rolling Continuous Dice"""'], {}), "('Rolling Continuous Dice')\n", (2787, 2814), False, 'import random, pylab\n'), ((2815, 2836), 'pylab.xlabel', 'pylab.xlabel', (['"""Value"""'], {}), "('Value')\n", (2827, 2836), False, 'import random, pylab\n'), ((2837, 2864), 'pylab.ylabel', 'pylab.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (2849, 2864), False, 'import random, pylab\n'), ((2865, 2879), 'pylab.legend', 'pylab.legend', ([], {}), '()\n', (2877, 2879), False, 'import random, pylab\n'), ((1599, 1622), 'random.randint', 'random.randint', (['(-10)', '(10)'], {}), '(-10, 10)\n', (1613, 1622), False, 'import random, pylab\n'), ((1641, 1662), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (1655, 1662), False, 'import random, pylab\n'), ((2283, 2298), 'random.random', 'random.random', ([], {}), '()\n', (2296, 2298), False, 'import random, pylab\n')]
|
import discord
import youtube_dl
import os
from discord.ext import commands
from discord.utils import get
from discord import FFmpegPCMAudio
bot = commands.Bot(command_prefix='.')
vol = 100
@bot.event
async def on_ready():
print("Logged in as: " + bot.user.name + "\n")
game = discord.Game("поиск дома")
await bot.change_presence(activity=game)
@bot.command(name='ping', help='Проверить пинг')
async def ping(ctx):
await ctx.send(f'{round(bot.latency * 1000)}ms')
@bot.command(pass_context=True, brief="Пригласить бота в канал", aliases=['jo', 'joi'])
async def join(ctx):
try:
channel = ctx.message.author.voice.channel
except AttributeError:
await ctx.send("Вы должны быть в голосовом канале")
return
voice = get(bot.voice_clients, guild=ctx.guild)
if voice and voice.is_connected():
await voice.move_to(channel)
else:
await channel.connect()
await ctx.send(f"Подключен к каналу: {channel}")
@bot.command(pass_context=True, brief="Отключить бота от канала", aliases=['le', 'lea'])
async def leave(ctx):
voice = get(bot.voice_clients, guild=ctx.guild)
if voice and voice.is_connected():
await voice.disconnect()
await ctx.send("Бот отключен от канала")
else:
await ctx.send("Бот не подключен к голосовому каналу")
@bot.command(pass_context=True, brief="Включить проигрывание 'play [url]'", aliases=['pl', 'pla'])
async def play(ctx, *, url: str):
global vol
song_there = os.path.isfile("song.mp3")
try:
if song_there:
os.remove("song.mp3")
except PermissionError:
await ctx.send("Подождите завершения песни или воспользуйтесь командой <skip>")
return
await ctx.send("Loading...")
voice = get(bot.voice_clients, guild=ctx.guild)
if not voice:
await ctx.send("Не в голосовом канале")
return
print(url)
if "spotify" in url and "playlist" in url:
pass
if "spotify" in url:
os.system(f"spotdl {url}")
else:
ydl_opts = {
'default_search': 'ytsearch',
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
print(str(url))
for file in os.listdir("./"):
print(file)
if file.endswith(".mp3"):
os.rename(file, 'song.mp3')
voice.play(discord.FFmpegPCMAudio("song.mp3"))
voice.volume = vol
voice.is_playing()
await ctx.send(f"Проигрывание запущено")
@bot.command(pass_context=True, brief="Поставить проигрывание на паузу", aliases=['pa', 'pau'])
async def pause(ctx):
voice = get(bot.voice_clients, guild=ctx.guild)
if voice and voice.is_playing():
print("Music paused")
voice.pause()
await ctx.send("Проигрывание приостановлено")
else:
await ctx.send("В данный момент ничего не проигрывается")
@bot.command(pass_context=True, brief="Продолжить воспроизведение", aliases=['r', 'res'])
async def resume(ctx):
voice = get(bot.voice_clients, guild=ctx.guild)
if voice and voice.is_paused():
print("Resumed music")
voice.resume()
await ctx.send("Воспроизведение продолжено")
else:
await ctx.send("В данный момент нет приостановленного трека")
@bot.command(pass_context=True, brief="Скипнуть трек", aliases=['sk', 'ski'])
async def skip(ctx):
voice = get(bot.voice_clients, guild=ctx.guild)
if voice and voice.is_playing():
voice.stop()
await ctx.send("Трек пропущен, а ты попущен")
else:
await ctx.send("Нечего скипать")
b_token = os.environ.get('TOKEN')
bot.run(str(b_token))
|
[
"discord.utils.get",
"os.remove",
"os.rename",
"os.system",
"os.environ.get",
"os.path.isfile",
"discord.Game",
"youtube_dl.YoutubeDL",
"discord.FFmpegPCMAudio",
"discord.ext.commands.Bot",
"os.listdir"
] |
[((148, 180), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': '"""."""'}), "(command_prefix='.')\n", (160, 180), False, 'from discord.ext import commands\n'), ((3829, 3852), 'os.environ.get', 'os.environ.get', (['"""TOKEN"""'], {}), "('TOKEN')\n", (3843, 3852), False, 'import os\n'), ((288, 314), 'discord.Game', 'discord.Game', (['"""поиск дома"""'], {}), "('поиск дома')\n", (300, 314), False, 'import discord\n'), ((770, 809), 'discord.utils.get', 'get', (['bot.voice_clients'], {'guild': 'ctx.guild'}), '(bot.voice_clients, guild=ctx.guild)\n', (773, 809), False, 'from discord.utils import get\n'), ((1106, 1145), 'discord.utils.get', 'get', (['bot.voice_clients'], {'guild': 'ctx.guild'}), '(bot.voice_clients, guild=ctx.guild)\n', (1109, 1145), False, 'from discord.utils import get\n'), ((1507, 1533), 'os.path.isfile', 'os.path.isfile', (['"""song.mp3"""'], {}), "('song.mp3')\n", (1521, 1533), False, 'import os\n'), ((1777, 1816), 'discord.utils.get', 'get', (['bot.voice_clients'], {'guild': 'ctx.guild'}), '(bot.voice_clients, guild=ctx.guild)\n', (1780, 1816), False, 'from discord.utils import get\n'), ((2463, 2479), 'os.listdir', 'os.listdir', (['"""./"""'], {}), "('./')\n", (2473, 2479), False, 'import os\n'), ((2849, 2888), 'discord.utils.get', 'get', (['bot.voice_clients'], {'guild': 'ctx.guild'}), '(bot.voice_clients, guild=ctx.guild)\n', (2852, 2888), False, 'from discord.utils import get\n'), ((3236, 3275), 'discord.utils.get', 'get', (['bot.voice_clients'], {'guild': 'ctx.guild'}), '(bot.voice_clients, guild=ctx.guild)\n', (3239, 3275), False, 'from discord.utils import get\n'), ((3613, 3652), 'discord.utils.get', 'get', (['bot.voice_clients'], {'guild': 'ctx.guild'}), '(bot.voice_clients, guild=ctx.guild)\n', (3616, 3652), False, 'from discord.utils import get\n'), ((2006, 2032), 'os.system', 'os.system', (['f"""spotdl {url}"""'], {}), "(f'spotdl {url}')\n", (2015, 2032), False, 'import os\n'), ((2590, 2624), 'discord.FFmpegPCMAudio', 'discord.FFmpegPCMAudio', (['"""song.mp3"""'], {}), "('song.mp3')\n", (2612, 2624), False, 'import discord\n'), ((1578, 1599), 'os.remove', 'os.remove', (['"""song.mp3"""'], {}), "('song.mp3')\n", (1587, 1599), False, 'import os\n'), ((2348, 2378), 'youtube_dl.YoutubeDL', 'youtube_dl.YoutubeDL', (['ydl_opts'], {}), '(ydl_opts)\n', (2368, 2378), False, 'import youtube_dl\n'), ((2547, 2574), 'os.rename', 'os.rename', (['file', '"""song.mp3"""'], {}), "(file, 'song.mp3')\n", (2556, 2574), False, 'import os\n')]
|
# Copyright (c) 2016, MD2K Center of Excellence
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import gzip
import os
import unittest
from datetime import datetime, timedelta
from typing import List
import numpy as np
import pytz
from cerebralcortex.data_processor.signalprocessing.alignment import timestamp_correct
from cerebralcortex.data_processor.signalprocessing.rip import up_down_intercepts, filter_intercept_outlier, \
generate_peak_valley, \
remove_close_valley_peak_pair, filter_expiration_duration_outlier, filter_small_amp_expiration_peak_valley, \
filter_small_amp_inspiration_peak_valley, correct_peak_position, correct_valley_position
from cerebralcortex.data_processor.signalprocessing.vector import smooth, moving_average_curve
from cerebralcortex.kernel.datatypes.datapoint import DataPoint
from cerebralcortex.kernel.datatypes.datastream import DataStream
class TestPeakValleyComputation(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestPeakValleyComputation, cls).setUpClass()
tz = pytz.timezone('US/Eastern')
data = []
cls._sample_frequency = 21.33
cls._smoothing_factor = 5
cls._time_window = 8
cls._expiration_amplitude_threshold_perc = 0.10
cls._threshold_expiration_duration = 0.312
cls._max_amplitude_change_peak_correction = 30
cls._inspiration_amplitude_threshold_perc = 0.10
cls._min_neg_slope_count_peak_correction = 4
cls._minimum_peak_to_valley_time_diff = 0.31
cls._window_length = int(round(cls._time_window * cls._sample_frequency))
with gzip.open(os.path.join(os.path.dirname(__file__), 'res/rip.csv.gz'), 'rt') as f:
for l in f:
values = list(map(int, l.split(',')))
data.append(
DataPoint.from_tuple(datetime.fromtimestamp(values[0] / 1000000.0, tz=tz), values[1]))
cls._data_start_time_to_index = get_data_start_time_to_index_dic(data=data)
cls.rip_datastream = DataStream(None, None)
cls.rip_datastream.data = data
def test_smooth(self):
ds = DataStream(None, None)
ds.datapoints = self.rip_datastream.data
result_smooth = smooth(ds.datapoints, self._smoothing_factor)
sample_smooth_python = [i.sample for i in result_smooth[:5000]]
sample_smooth_matlab = np.genfromtxt(os.path.join(os.path.dirname(__file__), 'res/testmatlab_rip_smooth.csv'),
delimiter=',', )
self.assertTrue(np.alltrue(np.round(sample_smooth_matlab) == np.round(sample_smooth_python)))
def test_moving_average_curve(self):
ds = DataStream(None, None)
ds.datapoints = self.rip_datastream.data
data_smooth = smooth(ds.datapoints, self._smoothing_factor)
result = moving_average_curve(data_smooth, self._window_length)
sample_mac_python = [i.sample for i in result[:5000]]
sample_mac_matlab = np.genfromtxt(os.path.join(os.path.dirname(__file__), 'res/testmatlab_mac_sample.csv'),
delimiter=',', )
for i in range(0, len(sample_mac_matlab)):
self.assertAlmostEqual(sample_mac_matlab[i], sample_mac_python[i], delta=0.1)
def test_up_down_intercepts(self):
data_start_time_list = [0, 1, 2, 3, 4]
mac_start_time_list = [0, 1, 2, 3, 4]
data_sample_list = [10, 20, 30, 40, 50]
mac_sample_list = [11, 12, 31, 32, 52]
expected_up_intercepts_sample = [12, 32]
expected_down_intercepts_sample = [31, 52]
data_input = form_data_point_list_from_start_time_sample(start_time_list=data_start_time_list,
sample_list=data_sample_list)
mac_input = form_data_point_list_from_start_time_sample(start_time_list=mac_start_time_list,
sample_list=mac_sample_list)
data_start_time_to_index = get_data_start_time_to_index_dic(data=data_input)
up_intercepts, down_intercepts = up_down_intercepts(data=data_input,
mac=mac_input,
data_start_time_to_index=data_start_time_to_index)
output_up_intercepts_sample = [i.sample for i in up_intercepts]
output_down_intercepts_sample = [i.sample for i in down_intercepts]
self.assertTrue(np.array_equal(expected_up_intercepts_sample, output_up_intercepts_sample))
self.assertTrue(np.array_equal(expected_down_intercepts_sample, output_down_intercepts_sample))
def test_filter_intercept_outlier(self):
# test cases
up_intercepts_case_list = []
down_intercepts_case_list = []
up_intercepts_expected_case_list = []
down_intercepts_expected_case_list = []
# first case
up_intercepts_case_list.append(form_data_point_from_start_time_array([10, 20, 30, 40, 50]))
down_intercepts_case_list.append(form_data_point_from_start_time_array([9, 11, 21, 31, 41]))
up_intercepts_expected_case_list.append([10, 20, 30, 40, 50])
down_intercepts_expected_case_list.append([9, 11, 21, 31, 41])
# second case
up_intercepts_case_list.append(form_data_point_from_start_time_array([10, 20, 30, 40, 50]))
down_intercepts_case_list.append(form_data_point_from_start_time_array([8, 9, 11, 21, 31, 41, 42]))
up_intercepts_expected_case_list.append([10, 20, 30, 40, 50])
down_intercepts_expected_case_list.append([9, 11, 21, 31, 42])
# third case
up_intercepts_case_list.append(
form_data_point_from_start_time_array([10, 20, 22, 23, 30, 32, 33, 40, 42, 43, 50, 52, 53]))
down_intercepts_case_list.append(form_data_point_from_start_time_array([9, 11, 21, 31, 41]))
up_intercepts_expected_case_list.append([10, 20, 30, 40, 53])
down_intercepts_expected_case_list.append([9, 11, 21, 31, 41])
# fourth case
up_intercepts_case_list.append(form_data_point_from_start_time_array([10, 20, 30, 40, 50]))
down_intercepts_case_list.append(form_data_point_from_start_time_array(
[7, 8, 9, 11, 12, 13, 21, 22, 23, 31, 32, 33, 41, 42, 43, 51, 52, 53]))
up_intercepts_expected_case_list.append([10, 20, 30, 40, 50])
down_intercepts_expected_case_list.append([9, 13, 23, 33, 43])
# fifth case
up_intercepts_case_list.append(form_data_point_from_start_time_array([10, 11, 12, 16, 17, 18, 22, 23, 24]))
down_intercepts_case_list.append(
form_data_point_from_start_time_array([7, 8, 9, 13, 14, 15, 19, 20, 21, 25, 26, 27]))
up_intercepts_expected_case_list.append([12, 18, 24])
down_intercepts_expected_case_list.append([9, 15, 21])
for i, up_intercepts_case in enumerate(up_intercepts_case_list):
up_intercepts = up_intercepts_case
down_intercepts = down_intercepts_case_list[i]
up_intercepts_output, down_intercepts_output = filter_intercept_outlier(up_intercepts, down_intercepts)
# test all are List[Datapoints]
self.assertIsInstance(up_intercepts_output, list)
self.assertIsInstance(down_intercepts_output, list)
# test output match for first case
up_intercepts_output_start_time = [i.start_time for i in up_intercepts_output]
self.assertTrue(np.array_equal(up_intercepts_output_start_time, up_intercepts_expected_case_list[i]))
down_intercepts_output_start_time = [i.start_time for i in down_intercepts_output]
self.assertTrue(np.array_equal(down_intercepts_output_start_time, down_intercepts_expected_case_list[i]))
def test_generate_peak_valley(self):
down_intercepts_start_time = [10, 20, 30, 40, 50]
up_intercepts_start_time = [15, 25, 35, 45, 55]
data_start_times = [11, 12, 13, 16, 17, 18, 21, 22, 23, 26, 27, 28, 31, 32, 33, 36, 37, 38, 41, 42, 43, 46, 47,
48, 51, 52, 53, 56, 57, 58]
data_samples = [1, 2, 3, 10, 11, 12, 1, 2, 3, 10, 11, 12, 1, 2, 3, 10, 11, 12, 1, 2, 3, 10, 11, 12, 1, 2, 3, 10,
11, 12]
expected_valley_samples = [1, 1, 1, 1]
expected_peak_samples = [12, 12, 12, 12]
data_input = form_data_point_list_from_start_time_sample(start_time_list=data_start_times,
sample_list=data_samples)
down_intercepts_input = form_data_point_from_start_time_array(start_time_list=down_intercepts_start_time)
up_intercepts_inpput = form_data_point_from_start_time_array(start_time_list=up_intercepts_start_time)
peaks_output, valleys_output = generate_peak_valley(up_intercepts=up_intercepts_inpput,
down_intercepts=down_intercepts_input, data=data_input)
output_peaks_sample = [i.sample for i in peaks_output]
output_valleys_sample = [i.sample for i in valleys_output]
self.assertTrue(np.array_equal(output_peaks_sample, expected_peak_samples))
self.assertTrue(np.array_equal(output_valleys_sample, expected_valley_samples))
def test_correct_valley_position(self):
valleys_start_time = [1, 21]
up_intercepts_start_time = [10, 30]
peaks_start_time = [20, 40]
valleys_samples = [100, 100]
up_intercepts_samples = [500, 500]
peaks_samples = [1000, 1000]
data_start_time = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + [21, 22, 23, 24, 25, 26, 27, 28, 29, 30]
data_samples = [100, 110, 120, 130, 140, 100, 200, 300, 400, 500] + [100, 110, 120, 130, 140, 150, 160, 170,
180, 500]
expected_valleys_start_time = [6,
21] # data is not monotoneously increasing from 1 to 10 export_data time, so new valley move towards right at 6 where it is monotonoeously increasing. but the second valley is alright. as data is monotonoeusly increasing from export_data time 21 to 30.
expected_valleys_samples = [100, 100]
peaks_input = form_data_point_list_from_start_time_sample(start_time_list=peaks_start_time,
sample_list=peaks_samples)
valleys_input = form_data_point_list_from_start_time_sample(start_time_list=valleys_start_time,
sample_list=valleys_samples)
up_intercepts_input = form_data_point_list_from_start_time_sample(start_time_list=up_intercepts_start_time,
sample_list=up_intercepts_samples)
data_input = form_data_point_list_from_start_time_sample(start_time_list=data_start_time,
sample_list=data_samples)
data_start_time_to_index = get_data_start_time_to_index_dic(data=data_input)
valleys_corrected_ouput = correct_valley_position(peaks=peaks_input,
valleys=valleys_input,
up_intercepts=up_intercepts_input,
data=data_input,
data_start_time_to_index=data_start_time_to_index)
valleys_corrected_ouput_start_time = [i.start_time for i in valleys_corrected_ouput]
valleys_corrected_ouput_samples = [i.sample for i in valleys_corrected_ouput]
self.assertTrue(np.array_equal(valleys_corrected_ouput_start_time, expected_valleys_start_time))
self.assertTrue(np.array_equal(valleys_corrected_ouput_samples, expected_valleys_samples))
def test_correct_peak_position(self):
test_cases = []
# test case - 0: monotoneously decreasing from peak to up intercept. so peak position will not be changed.
data_start_time = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
data_samples = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
valleys_start_time = [1]
up_intercepts_start_time = [5]
peaks_start_time = [10]
valleys_samples = [10]
up_intercepts_samples = [50]
peaks_samples = [100]
expected_peaks_start_time = [10]
expected_peaks_samples = [100]
test_cases.append({
'data_start_time': data_start_time,
'data_samples': data_samples,
'valleys_start_time': valleys_start_time,
'valleys_samples': valleys_samples,
'up_intercepts_start_time': up_intercepts_start_time,
'up_intercepts_samples': up_intercepts_samples,
'peaks_start_time': peaks_start_time,
'peaks_samples': peaks_samples,
'expected_peaks_start_time': expected_peaks_start_time,
'expected_peaks_samples': expected_peaks_samples
})
# test case - 1: from up_intercepts to peak, increases from 50 to 90, then decreases from 90 to 60 by 3 point count.
# which is less than 4 (self._min_neg_slope_count_peak_correction = 4). so peak position will not be updated.
data_start_time = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
data_samples = [10, 20, 30, 40, 50, 90, 80, 70, 60, 100]
valleys_start_time = [1]
up_intercepts_start_time = [5]
peaks_start_time = [10]
valleys_samples = [10]
up_intercepts_samples = [50]
peaks_samples = [100]
expected_peaks_start_time = [10]
expected_peaks_samples = [100]
test_cases.append({
'data_start_time': data_start_time,
'data_samples': data_samples,
'valleys_start_time': valleys_start_time,
'valleys_samples': valleys_samples,
'up_intercepts_start_time': up_intercepts_start_time,
'up_intercepts_samples': up_intercepts_samples,
'peaks_start_time': peaks_start_time,
'peaks_samples': peaks_samples,
'expected_peaks_start_time': expected_peaks_start_time,
'expected_peaks_samples': expected_peaks_samples
})
# test case - 2: from up_intercepts to peak, increases from 30 to 60, then decreases from 60 to 10 by 5 point count.
# which is greater than 4 (self._min_neg_slope_count_peak_correction = 4).
# new peak sample value is 60. previous peak is at sample 100. so, amplitude change from new peak to prev peak is = 80%.
# 80% is not less than 30% (self._max_amplitude_change_peak_correction = 30).
# so peak position will not be updated.
data_start_time = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
data_samples = [10, 20, 30, 60, 50, 40, 30, 20, 10, 100]
valleys_start_time = [1]
up_intercepts_start_time = [3]
peaks_start_time = [10]
valleys_samples = [10]
up_intercepts_samples = [30]
peaks_samples = [100]
expected_peaks_start_time = [10]
expected_peaks_samples = [100]
test_cases.append({
'data_start_time': data_start_time,
'data_samples': data_samples,
'valleys_start_time': valleys_start_time,
'valleys_samples': valleys_samples,
'up_intercepts_start_time': up_intercepts_start_time,
'up_intercepts_samples': up_intercepts_samples,
'peaks_start_time': peaks_start_time,
'peaks_samples': peaks_samples,
'expected_peaks_start_time': expected_peaks_start_time,
'expected_peaks_samples': expected_peaks_samples
})
# test case - 3: from up_intercepts to peak, increases from 30 to 90, then decreases from 90 to 10 by 5 point count.
# which is greater than 4 (self._min_neg_slope_count_peak_correction = 4).
# new peak sample value is 90. previous peak is at sample 100. so, amplitude change from new peak to prev peak is = 12.5%.
# 12.5% is less than 30% (self._max_amplitude_change_peak_correction = 30).
# so peak position will be updated to new peak (sample = 90, start_time = 4)
data_start_time = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
data_samples = [10, 20, 30, 90, 50, 40, 30, 20, 10, 100]
valleys_start_time = [1]
up_intercepts_start_time = [3]
peaks_start_time = [10]
valleys_samples = [10]
up_intercepts_samples = [30]
peaks_samples = [100]
expected_peaks_start_time = [4]
expected_peaks_samples = [90]
test_cases.append({
'data_start_time': data_start_time,
'data_samples': data_samples,
'valleys_start_time': valleys_start_time,
'valleys_samples': valleys_samples,
'up_intercepts_start_time': up_intercepts_start_time,
'up_intercepts_samples': up_intercepts_samples,
'peaks_start_time': peaks_start_time,
'peaks_samples': peaks_samples,
'expected_peaks_start_time': expected_peaks_start_time,
'expected_peaks_samples': expected_peaks_samples
})
for i, item in enumerate(test_cases):
data_start_time = item['data_start_time']
data_samples = item['data_samples']
valleys_start_time = item['valleys_start_time']
up_intercepts_start_time = item['up_intercepts_start_time']
peaks_start_time = item['peaks_start_time']
valleys_samples = item['valleys_samples']
up_intercepts_samples = item['up_intercepts_samples']
peaks_samples = item['peaks_samples']
expected_peaks_start_time = item['expected_peaks_start_time']
expected_peaks_samples = item['expected_peaks_samples']
valleys_input = form_data_point_list_from_start_time_sample(start_time_list=valleys_start_time,
sample_list=valleys_samples)
up_intercepts_input = form_data_point_list_from_start_time_sample(start_time_list=up_intercepts_start_time,
sample_list=up_intercepts_samples)
peaks_input = form_data_point_list_from_start_time_sample(start_time_list=peaks_start_time,
sample_list=peaks_samples)
data_input = form_data_point_list_from_start_time_sample(start_time_list=data_start_time,
sample_list=data_samples)
data_start_time_to_index = get_data_start_time_to_index_dic(data=data_input)
peaks_output = correct_peak_position(peaks=peaks_input,
valleys=valleys_input,
up_intercepts=up_intercepts_input,
data=data_input,
max_amplitude_change_peak_correction=self._max_amplitude_change_peak_correction,
min_neg_slope_count_peak_correction=self._min_neg_slope_count_peak_correction,
data_start_time_to_index=data_start_time_to_index)
peaks_output_samples = [i.sample for i in peaks_output]
peaks_output_start_time = [i.start_time for i in peaks_output]
self.assertTrue(np.array_equal(expected_peaks_start_time, peaks_output_start_time),
msg='Test failed for test case ' + str(i))
self.assertTrue(np.array_equal(expected_peaks_samples, peaks_output_samples),
msg='Test failed for test case ' + str(i))
def test_remove_close_valley_peak_pair(self):
valleys_start_time = form_time_delta_list_from_start_time_in_seconds([1, 2]) # time in seconds
peaks_start_time = form_time_delta_list_from_start_time_in_seconds(
[1 + self._minimum_peak_to_valley_time_diff + 0.1,
2 + self._minimum_peak_to_valley_time_diff - 0.1]) # time in seconds
expected_valleys_start_time = form_time_delta_list_from_start_time_in_seconds([1])
expected_peaks_start_time = form_time_delta_list_from_start_time_in_seconds(
[1 + self._minimum_peak_to_valley_time_diff + 0.1])
input_peaks = form_data_point_from_start_time_array(peaks_start_time)
input_valleys = form_data_point_from_start_time_array(valleys_start_time)
output_peaks, output_valleys = remove_close_valley_peak_pair(peaks=input_peaks, valleys=input_valleys,
minimum_peak_to_valley_time_diff=self._minimum_peak_to_valley_time_diff)
output_peaks_start_time = [i.start_time for i in output_peaks]
output_valleys_start_time = [i.start_time for i in output_valleys]
self.assertTrue(np.array_equal(expected_peaks_start_time, output_peaks_start_time))
self.assertTrue(np.array_equal(expected_valleys_start_time, output_valleys_start_time))
def test_filter_expiration_duration_outlier(self):
peaks_start_time = form_time_delta_list_from_start_time_in_seconds([1, 2, 3, 4, 5])
valleys_start_time = form_time_delta_list_from_start_time_in_seconds(
[0, 1 + self._threshold_expiration_duration + .1, 2 + self._threshold_expiration_duration - .1,
3 + self._threshold_expiration_duration + .1, 4 + self._threshold_expiration_duration - .1])
expected_peaks_start_time = form_time_delta_list_from_start_time_in_seconds([1, 3, 5])
expected_valleys_start_time = form_time_delta_list_from_start_time_in_seconds(
[0, 1 + self._threshold_expiration_duration + .1, 3 + self._threshold_expiration_duration + .1])
input_peaks = form_data_point_from_start_time_array(peaks_start_time)
input_valleys = form_data_point_from_start_time_array(valleys_start_time)
output_peaks, output_valleys = filter_expiration_duration_outlier(peaks=input_peaks, valleys=input_valleys,
threshold_expiration_duration=self._threshold_expiration_duration)
output_peaks_start_time = [i.start_time for i in output_peaks]
output_valleys_start_time = [i.start_time for i in output_valleys]
self.assertTrue(np.array_equal(expected_peaks_start_time, output_peaks_start_time))
self.assertTrue(np.array_equal(expected_valleys_start_time, output_valleys_start_time))
def test_filter_small_amp_inspiration_peak_valley(self):
valleys_sample = [1, 2, 3, 4, 5]
peak_sample = [21, 22, 23, 24, 5.5]
# self._inspiration_amplitude_threshold_perc is .10 on average. here inspiration avg value 16.100000000000001. so, 10% of 16.100000000000001 = 1.61. so, inspiration[4] = peak[4] - valley[4] = 0.5 < 1.61. so, last peak and valley is not expected.
expected_valleys_sample = [1, 2, 3, 4]
expected_peaks_sample = [21, 22, 23, 24]
input_valleys = form_data_point_from_sample_array(sample_list=valleys_sample)
input_peaks = form_data_point_from_sample_array(sample_list=peak_sample)
output_peaks, output_valleys = filter_small_amp_inspiration_peak_valley(peaks=input_peaks,
valleys=input_valleys,
inspiration_amplitude_threshold_perc=0.1)
output_valleys_sample = [i.sample for i in output_valleys]
output_peaks_sample = [i.sample for i in output_peaks]
self.assertTrue(np.array_equal(expected_peaks_sample, output_peaks_sample))
self.assertTrue(np.array_equal(expected_valleys_sample, output_valleys_sample))
def test_filter_small_amp_expiration_peak_valley(self):
valleys_sample = [1, 2, 3, 4, 5]
peak_sample = [22, 23, 24, 5.5, 26]
# self._expiration_amplitude_threshold_perc is .10 on average. here expiration avg value 15.125. so, 10% of 15.125 = 1.51. so, expiration = abs(valley = 5 - peak = 5.5) = 0.5 < 1.51. so, peak = 5.5 and valley = 5 is not expected.
expected_valleys_sample = [1, 2, 3, 4]
expected_peaks_sample = [22, 23, 24, 26]
input_valleys = form_data_point_from_sample_array(sample_list=valleys_sample)
input_peaks = form_data_point_from_sample_array(sample_list=peak_sample)
output_peaks, output_valleys = filter_small_amp_expiration_peak_valley(peaks=input_peaks, valleys=input_valleys,
expiration_amplitude_threshold_perc=0.1)
output_valleys_sample = [i.sample for i in output_valleys]
output_peaks_sample = [i.sample for i in output_peaks]
self.assertTrue(np.array_equal(expected_peaks_sample, output_peaks_sample))
self.assertTrue(np.array_equal(expected_valleys_sample, output_valleys_sample))
def test_timestamp_correct(self):
rip_corrected = timestamp_correct(datastream=self.rip_datastream, sampling_frequency=self._sample_frequency)
timestamp_corrected_rip_data_unique_start_time_count = len(set([i.start_time for i in rip_corrected.data]))
raw_rip_data_unique_start_time_count = len(set([i.start_time for i in self.rip_datastream.data]))
self.assertGreaterEqual(timestamp_corrected_rip_data_unique_start_time_count,
raw_rip_data_unique_start_time_count,
msg='Timestamp corrected rip data has duplicate export_data times. '
'Check if rip raw data sample frequency missmatch with provided default rip sample frequency.')
def get_data_start_time_to_index_dic(data: List[DataPoint]) -> dict:
data_start_time_to_index = {}
for index, d in enumerate(data):
data_start_time_to_index[d.start_time] = index
return data_start_time_to_index
def form_data_point_from_start_time_array(start_time_list):
datapoints = []
for i in start_time_list:
datapoints.append(DataPoint.from_tuple(i, 0))
return datapoints
def form_data_point_list_from_start_time_sample(start_time_list,
sample_list):
datapoints = []
if len(start_time_list) == len(sample_list):
for i, start_time in enumerate(start_time_list):
datapoints.append(DataPoint.from_tuple(start_time, sample_list[i]))
else:
raise Exception('Length of start_time list and sample list missmatch.')
return datapoints
def form_time_delta_list_from_start_time_in_seconds(start_time_list):
start_time_time_delta_list = []
for i in start_time_list:
start_time_time_delta_list.append(timedelta(seconds=i))
return start_time_time_delta_list
def form_data_point_from_sample_array(sample_list):
datapoints = []
for i in sample_list:
datapoints.append(DataPoint.from_tuple(start_time=datetime.now(), sample=i))
return datapoints
if __name__ == '__main__':
unittest.main()
|
[
"cerebralcortex.data_processor.signalprocessing.rip.correct_valley_position",
"cerebralcortex.data_processor.signalprocessing.rip.filter_intercept_outlier",
"cerebralcortex.data_processor.signalprocessing.rip.remove_close_valley_peak_pair",
"cerebralcortex.data_processor.signalprocessing.rip.filter_small_amp_inspiration_peak_valley",
"numpy.round",
"unittest.main",
"cerebralcortex.data_processor.signalprocessing.rip.generate_peak_valley",
"os.path.dirname",
"cerebralcortex.data_processor.signalprocessing.alignment.timestamp_correct",
"datetime.timedelta",
"datetime.datetime.now",
"cerebralcortex.data_processor.signalprocessing.rip.up_down_intercepts",
"cerebralcortex.data_processor.signalprocessing.vector.smooth",
"cerebralcortex.data_processor.signalprocessing.rip.filter_expiration_duration_outlier",
"datetime.datetime.fromtimestamp",
"cerebralcortex.data_processor.signalprocessing.rip.filter_small_amp_expiration_peak_valley",
"cerebralcortex.data_processor.signalprocessing.rip.correct_peak_position",
"cerebralcortex.data_processor.signalprocessing.vector.moving_average_curve",
"cerebralcortex.kernel.datatypes.datapoint.DataPoint.from_tuple",
"cerebralcortex.kernel.datatypes.datastream.DataStream",
"pytz.timezone",
"numpy.array_equal"
] |
[((28847, 28862), 'unittest.main', 'unittest.main', ([], {}), '()\n', (28860, 28862), False, 'import unittest\n'), ((2307, 2334), 'pytz.timezone', 'pytz.timezone', (['"""US/Eastern"""'], {}), "('US/Eastern')\n", (2320, 2334), False, 'import pytz\n'), ((3283, 3305), 'cerebralcortex.kernel.datatypes.datastream.DataStream', 'DataStream', (['None', 'None'], {}), '(None, None)\n', (3293, 3305), False, 'from cerebralcortex.kernel.datatypes.datastream import DataStream\n'), ((3386, 3408), 'cerebralcortex.kernel.datatypes.datastream.DataStream', 'DataStream', (['None', 'None'], {}), '(None, None)\n', (3396, 3408), False, 'from cerebralcortex.kernel.datatypes.datastream import DataStream\n'), ((3483, 3528), 'cerebralcortex.data_processor.signalprocessing.vector.smooth', 'smooth', (['ds.datapoints', 'self._smoothing_factor'], {}), '(ds.datapoints, self._smoothing_factor)\n', (3489, 3528), False, 'from cerebralcortex.data_processor.signalprocessing.vector import smooth, moving_average_curve\n'), ((3940, 3962), 'cerebralcortex.kernel.datatypes.datastream.DataStream', 'DataStream', (['None', 'None'], {}), '(None, None)\n', (3950, 3962), False, 'from cerebralcortex.kernel.datatypes.datastream import DataStream\n'), ((4035, 4080), 'cerebralcortex.data_processor.signalprocessing.vector.smooth', 'smooth', (['ds.datapoints', 'self._smoothing_factor'], {}), '(ds.datapoints, self._smoothing_factor)\n', (4041, 4080), False, 'from cerebralcortex.data_processor.signalprocessing.vector import smooth, moving_average_curve\n'), ((4098, 4152), 'cerebralcortex.data_processor.signalprocessing.vector.moving_average_curve', 'moving_average_curve', (['data_smooth', 'self._window_length'], {}), '(data_smooth, self._window_length)\n', (4118, 4152), False, 'from cerebralcortex.data_processor.signalprocessing.vector import smooth, moving_average_curve\n'), ((5383, 5489), 'cerebralcortex.data_processor.signalprocessing.rip.up_down_intercepts', 'up_down_intercepts', ([], {'data': 'data_input', 'mac': 'mac_input', 'data_start_time_to_index': 'data_start_time_to_index'}), '(data=data_input, mac=mac_input, data_start_time_to_index\n =data_start_time_to_index)\n', (5401, 5489), False, 'from cerebralcortex.data_processor.signalprocessing.rip import up_down_intercepts, filter_intercept_outlier, generate_peak_valley, remove_close_valley_peak_pair, filter_expiration_duration_outlier, filter_small_amp_expiration_peak_valley, filter_small_amp_inspiration_peak_valley, correct_peak_position, correct_valley_position\n'), ((10143, 10260), 'cerebralcortex.data_processor.signalprocessing.rip.generate_peak_valley', 'generate_peak_valley', ([], {'up_intercepts': 'up_intercepts_inpput', 'down_intercepts': 'down_intercepts_input', 'data': 'data_input'}), '(up_intercepts=up_intercepts_inpput, down_intercepts=\n down_intercepts_input, data=data_input)\n', (10163, 10260), False, 'from cerebralcortex.data_processor.signalprocessing.rip import up_down_intercepts, filter_intercept_outlier, generate_peak_valley, remove_close_valley_peak_pair, filter_expiration_duration_outlier, filter_small_amp_expiration_peak_valley, filter_small_amp_inspiration_peak_valley, correct_peak_position, correct_valley_position\n'), ((12517, 12693), 'cerebralcortex.data_processor.signalprocessing.rip.correct_valley_position', 'correct_valley_position', ([], {'peaks': 'peaks_input', 'valleys': 'valleys_input', 'up_intercepts': 'up_intercepts_input', 'data': 'data_input', 'data_start_time_to_index': 'data_start_time_to_index'}), '(peaks=peaks_input, valleys=valleys_input,\n up_intercepts=up_intercepts_input, data=data_input,\n data_start_time_to_index=data_start_time_to_index)\n', (12540, 12693), False, 'from cerebralcortex.data_processor.signalprocessing.rip import up_down_intercepts, filter_intercept_outlier, generate_peak_valley, remove_close_valley_peak_pair, filter_expiration_duration_outlier, filter_small_amp_expiration_peak_valley, filter_small_amp_inspiration_peak_valley, correct_peak_position, correct_valley_position\n'), ((22193, 22341), 'cerebralcortex.data_processor.signalprocessing.rip.remove_close_valley_peak_pair', 'remove_close_valley_peak_pair', ([], {'peaks': 'input_peaks', 'valleys': 'input_valleys', 'minimum_peak_to_valley_time_diff': 'self._minimum_peak_to_valley_time_diff'}), '(peaks=input_peaks, valleys=input_valleys,\n minimum_peak_to_valley_time_diff=self._minimum_peak_to_valley_time_diff)\n', (22222, 22341), False, 'from cerebralcortex.data_processor.signalprocessing.rip import up_down_intercepts, filter_intercept_outlier, generate_peak_valley, remove_close_valley_peak_pair, filter_expiration_duration_outlier, filter_small_amp_expiration_peak_valley, filter_small_amp_inspiration_peak_valley, correct_peak_position, correct_valley_position\n'), ((23676, 23823), 'cerebralcortex.data_processor.signalprocessing.rip.filter_expiration_duration_outlier', 'filter_expiration_duration_outlier', ([], {'peaks': 'input_peaks', 'valleys': 'input_valleys', 'threshold_expiration_duration': 'self._threshold_expiration_duration'}), '(peaks=input_peaks, valleys=input_valleys,\n threshold_expiration_duration=self._threshold_expiration_duration)\n', (23710, 23823), False, 'from cerebralcortex.data_processor.signalprocessing.rip import up_down_intercepts, filter_intercept_outlier, generate_peak_valley, remove_close_valley_peak_pair, filter_expiration_duration_outlier, filter_small_amp_expiration_peak_valley, filter_small_amp_inspiration_peak_valley, correct_peak_position, correct_valley_position\n'), ((24938, 25067), 'cerebralcortex.data_processor.signalprocessing.rip.filter_small_amp_inspiration_peak_valley', 'filter_small_amp_inspiration_peak_valley', ([], {'peaks': 'input_peaks', 'valleys': 'input_valleys', 'inspiration_amplitude_threshold_perc': '(0.1)'}), '(peaks=input_peaks, valleys=\n input_valleys, inspiration_amplitude_threshold_perc=0.1)\n', (24978, 25067), False, 'from cerebralcortex.data_processor.signalprocessing.rip import up_down_intercepts, filter_intercept_outlier, generate_peak_valley, remove_close_valley_peak_pair, filter_expiration_duration_outlier, filter_small_amp_expiration_peak_valley, filter_small_amp_inspiration_peak_valley, correct_peak_position, correct_valley_position\n'), ((26219, 26346), 'cerebralcortex.data_processor.signalprocessing.rip.filter_small_amp_expiration_peak_valley', 'filter_small_amp_expiration_peak_valley', ([], {'peaks': 'input_peaks', 'valleys': 'input_valleys', 'expiration_amplitude_threshold_perc': '(0.1)'}), '(peaks=input_peaks, valleys=\n input_valleys, expiration_amplitude_threshold_perc=0.1)\n', (26258, 26346), False, 'from cerebralcortex.data_processor.signalprocessing.rip import up_down_intercepts, filter_intercept_outlier, generate_peak_valley, remove_close_valley_peak_pair, filter_expiration_duration_outlier, filter_small_amp_expiration_peak_valley, filter_small_amp_inspiration_peak_valley, correct_peak_position, correct_valley_position\n'), ((26788, 26885), 'cerebralcortex.data_processor.signalprocessing.alignment.timestamp_correct', 'timestamp_correct', ([], {'datastream': 'self.rip_datastream', 'sampling_frequency': 'self._sample_frequency'}), '(datastream=self.rip_datastream, sampling_frequency=self.\n _sample_frequency)\n', (26805, 26885), False, 'from cerebralcortex.data_processor.signalprocessing.alignment import timestamp_correct\n'), ((5779, 5853), 'numpy.array_equal', 'np.array_equal', (['expected_up_intercepts_sample', 'output_up_intercepts_sample'], {}), '(expected_up_intercepts_sample, output_up_intercepts_sample)\n', (5793, 5853), True, 'import numpy as np\n'), ((5879, 5957), 'numpy.array_equal', 'np.array_equal', (['expected_down_intercepts_sample', 'output_down_intercepts_sample'], {}), '(expected_down_intercepts_sample, output_down_intercepts_sample)\n', (5893, 5957), True, 'import numpy as np\n'), ((8411, 8467), 'cerebralcortex.data_processor.signalprocessing.rip.filter_intercept_outlier', 'filter_intercept_outlier', (['up_intercepts', 'down_intercepts'], {}), '(up_intercepts, down_intercepts)\n', (8435, 8467), False, 'from cerebralcortex.data_processor.signalprocessing.rip import up_down_intercepts, filter_intercept_outlier, generate_peak_valley, remove_close_valley_peak_pair, filter_expiration_duration_outlier, filter_small_amp_expiration_peak_valley, filter_small_amp_inspiration_peak_valley, correct_peak_position, correct_valley_position\n'), ((10472, 10530), 'numpy.array_equal', 'np.array_equal', (['output_peaks_sample', 'expected_peak_samples'], {}), '(output_peaks_sample, expected_peak_samples)\n', (10486, 10530), True, 'import numpy as np\n'), ((10556, 10618), 'numpy.array_equal', 'np.array_equal', (['output_valleys_sample', 'expected_valley_samples'], {}), '(output_valleys_sample, expected_valley_samples)\n', (10570, 10618), True, 'import numpy as np\n'), ((13123, 13202), 'numpy.array_equal', 'np.array_equal', (['valleys_corrected_ouput_start_time', 'expected_valleys_start_time'], {}), '(valleys_corrected_ouput_start_time, expected_valleys_start_time)\n', (13137, 13202), True, 'import numpy as np\n'), ((13228, 13301), 'numpy.array_equal', 'np.array_equal', (['valleys_corrected_ouput_samples', 'expected_valleys_samples'], {}), '(valleys_corrected_ouput_samples, expected_valleys_samples)\n', (13242, 13301), True, 'import numpy as np\n'), ((20280, 20633), 'cerebralcortex.data_processor.signalprocessing.rip.correct_peak_position', 'correct_peak_position', ([], {'peaks': 'peaks_input', 'valleys': 'valleys_input', 'up_intercepts': 'up_intercepts_input', 'data': 'data_input', 'max_amplitude_change_peak_correction': 'self._max_amplitude_change_peak_correction', 'min_neg_slope_count_peak_correction': 'self._min_neg_slope_count_peak_correction', 'data_start_time_to_index': 'data_start_time_to_index'}), '(peaks=peaks_input, valleys=valleys_input,\n up_intercepts=up_intercepts_input, data=data_input,\n max_amplitude_change_peak_correction=self.\n _max_amplitude_change_peak_correction,\n min_neg_slope_count_peak_correction=self.\n _min_neg_slope_count_peak_correction, data_start_time_to_index=\n data_start_time_to_index)\n', (20301, 20633), False, 'from cerebralcortex.data_processor.signalprocessing.rip import up_down_intercepts, filter_intercept_outlier, generate_peak_valley, remove_close_valley_peak_pair, filter_expiration_duration_outlier, filter_small_amp_expiration_peak_valley, filter_small_amp_inspiration_peak_valley, correct_peak_position, correct_valley_position\n'), ((22579, 22645), 'numpy.array_equal', 'np.array_equal', (['expected_peaks_start_time', 'output_peaks_start_time'], {}), '(expected_peaks_start_time, output_peaks_start_time)\n', (22593, 22645), True, 'import numpy as np\n'), ((22671, 22741), 'numpy.array_equal', 'np.array_equal', (['expected_valleys_start_time', 'output_valleys_start_time'], {}), '(expected_valleys_start_time, output_valleys_start_time)\n', (22685, 22741), True, 'import numpy as np\n'), ((24066, 24132), 'numpy.array_equal', 'np.array_equal', (['expected_peaks_start_time', 'output_peaks_start_time'], {}), '(expected_peaks_start_time, output_peaks_start_time)\n', (24080, 24132), True, 'import numpy as np\n'), ((24158, 24228), 'numpy.array_equal', 'np.array_equal', (['expected_valleys_start_time', 'output_valleys_start_time'], {}), '(expected_valleys_start_time, output_valleys_start_time)\n', (24172, 24228), True, 'import numpy as np\n'), ((25379, 25437), 'numpy.array_equal', 'np.array_equal', (['expected_peaks_sample', 'output_peaks_sample'], {}), '(expected_peaks_sample, output_peaks_sample)\n', (25393, 25437), True, 'import numpy as np\n'), ((25463, 25525), 'numpy.array_equal', 'np.array_equal', (['expected_valleys_sample', 'output_valleys_sample'], {}), '(expected_valleys_sample, output_valleys_sample)\n', (25477, 25525), True, 'import numpy as np\n'), ((26577, 26635), 'numpy.array_equal', 'np.array_equal', (['expected_peaks_sample', 'output_peaks_sample'], {}), '(expected_peaks_sample, output_peaks_sample)\n', (26591, 26635), True, 'import numpy as np\n'), ((26661, 26723), 'numpy.array_equal', 'np.array_equal', (['expected_valleys_sample', 'output_valleys_sample'], {}), '(expected_valleys_sample, output_valleys_sample)\n', (26675, 26723), True, 'import numpy as np\n'), ((27866, 27892), 'cerebralcortex.kernel.datatypes.datapoint.DataPoint.from_tuple', 'DataPoint.from_tuple', (['i', '(0)'], {}), '(i, 0)\n', (27886, 27892), False, 'from cerebralcortex.kernel.datatypes.datapoint import DataPoint\n'), ((28545, 28565), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'i'}), '(seconds=i)\n', (28554, 28565), False, 'from datetime import datetime, timedelta\n'), ((3660, 3685), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3675, 3685), False, 'import os\n'), ((4271, 4296), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4286, 4296), False, 'import os\n'), ((8806, 8894), 'numpy.array_equal', 'np.array_equal', (['up_intercepts_output_start_time', 'up_intercepts_expected_case_list[i]'], {}), '(up_intercepts_output_start_time,\n up_intercepts_expected_case_list[i])\n', (8820, 8894), True, 'import numpy as np\n'), ((9015, 9107), 'numpy.array_equal', 'np.array_equal', (['down_intercepts_output_start_time', 'down_intercepts_expected_case_list[i]'], {}), '(down_intercepts_output_start_time,\n down_intercepts_expected_case_list[i])\n', (9029, 9107), True, 'import numpy as np\n'), ((21074, 21140), 'numpy.array_equal', 'np.array_equal', (['expected_peaks_start_time', 'peaks_output_start_time'], {}), '(expected_peaks_start_time, peaks_output_start_time)\n', (21088, 21140), True, 'import numpy as np\n'), ((21241, 21301), 'numpy.array_equal', 'np.array_equal', (['expected_peaks_samples', 'peaks_output_samples'], {}), '(expected_peaks_samples, peaks_output_samples)\n', (21255, 21301), True, 'import numpy as np\n'), ((28202, 28250), 'cerebralcortex.kernel.datatypes.datapoint.DataPoint.from_tuple', 'DataPoint.from_tuple', (['start_time', 'sample_list[i]'], {}), '(start_time, sample_list[i])\n', (28222, 28250), False, 'from cerebralcortex.kernel.datatypes.datapoint import DataPoint\n'), ((2898, 2923), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2913, 2923), False, 'import os\n'), ((3818, 3848), 'numpy.round', 'np.round', (['sample_smooth_matlab'], {}), '(sample_smooth_matlab)\n', (3826, 3848), True, 'import numpy as np\n'), ((3852, 3882), 'numpy.round', 'np.round', (['sample_smooth_python'], {}), '(sample_smooth_python)\n', (3860, 3882), True, 'import numpy as np\n'), ((28764, 28778), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (28776, 28778), False, 'from datetime import datetime, timedelta\n'), ((3104, 3156), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(values[0] / 1000000.0)'], {'tz': 'tz'}), '(values[0] / 1000000.0, tz=tz)\n', (3126, 3156), False, 'from datetime import datetime, timedelta\n')]
|
'''
Notice how this file does not import syshub or know about it
in any way.
'''
import sys
def say_something():
print('hello')
def input_something():
print('prompt: ', end='')
print(sys.stdin.readline())
def raise_something():
raise ValueError
|
[
"sys.stdin.readline"
] |
[((196, 216), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (214, 216), False, 'import sys\n')]
|
import logging
import flask
from flask_wtf import FlaskForm as Form
from pydantic import SecretStr
from werkzeug import Response
from wtforms import PasswordField, StringField, validators
from overhave.authorization import IAdminAuthorizationManager
from overhave.entities import SystemUserModel
logger = logging.getLogger(__name__)
_INVALID_AUTH_MSG = "Specified username '{username}' and password pair is invalid!"
class LoginForm(Form):
""" Form for user authorization. """
username: StringField = StringField(
"Username",
validators=[validators.input_required(message="Field required!")],
render_kw={"placeholder": "Username", "icon": "glyphicon-user"},
)
password: PasswordField = PasswordField(
"Password", render_kw={"placeholder": "Password", "icon": "glyphicon-certificate"},
)
def __init__(self, auth_manager: IAdminAuthorizationManager) -> None:
super().__init__()
self._auth_manager = auth_manager
def get_user(self) -> SystemUserModel:
authorized_user = self._auth_manager.authorize_user(
username=self.username.data, password=SecretStr(self.password.data)
)
if authorized_user is None:
raise validators.ValidationError(_INVALID_AUTH_MSG.format(username=self.username.data))
return authorized_user
@staticmethod
def flash_and_redirect(flash_msg: str) -> Response:
flask.flash(flash_msg, category="error")
return flask.redirect(flask.url_for("admin.login"))
|
[
"wtforms.validators.input_required",
"flask.flash",
"pydantic.SecretStr",
"flask.url_for",
"wtforms.PasswordField",
"logging.getLogger"
] |
[((308, 335), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (325, 335), False, 'import logging\n'), ((733, 834), 'wtforms.PasswordField', 'PasswordField', (['"""Password"""'], {'render_kw': "{'placeholder': 'Password', 'icon': 'glyphicon-certificate'}"}), "('Password', render_kw={'placeholder': 'Password', 'icon':\n 'glyphicon-certificate'})\n", (746, 834), False, 'from wtforms import PasswordField, StringField, validators\n'), ((1435, 1475), 'flask.flash', 'flask.flash', (['flash_msg'], {'category': '"""error"""'}), "(flash_msg, category='error')\n", (1446, 1475), False, 'import flask\n'), ((1506, 1534), 'flask.url_for', 'flask.url_for', (['"""admin.login"""'], {}), "('admin.login')\n", (1519, 1534), False, 'import flask\n'), ((569, 621), 'wtforms.validators.input_required', 'validators.input_required', ([], {'message': '"""Field required!"""'}), "(message='Field required!')\n", (594, 621), False, 'from wtforms import PasswordField, StringField, validators\n'), ((1145, 1174), 'pydantic.SecretStr', 'SecretStr', (['self.password.data'], {}), '(self.password.data)\n', (1154, 1174), False, 'from pydantic import SecretStr\n')]
|
import unittest
import random
from skiplist import SkipList
class SkipListTest(unittest.TestCase):
def setUp(self):
self.sl = SkipList()
def test_insert(self):
key, data = random.randint(0, 1 << 20), 'SkipList'
self.sl[key] = data
self.assertEqual(self.sl[key], data)
def test_remove(self):
key, data = random.randint(0, 1 << 20), 'SkipList'
self.sl[key] = data
self.assertEqual(self.sl[key], data)
del self.sl[key]
self.assertRaises(KeyError, self.sl.__getitem__, key)
def test_update(self):
key, data = random.randint(0, 1 << 20), 'SkipList'
self.sl[key] = data
self.assertEqual(self.sl[key], data)
self.sl[key] = 'SkyMemory'
self.assertEqual(self.sl[key], 'SkyMemory')
def test_search(self):
key, data = random.randint(0, 1 << 20), 'SkipList'
self.sl[key] = data
self.assertEqual(self.sl[key], data)
self.assertRaises(KeyError, self.sl.__getitem__, key + 1)
def test_len(self):
keys = random.sample(range(10000), 50)
for k in keys:
self.sl[k] = f"data_{k}"
self.assertEqual(len(self.sl), len(keys))
def test_contain(self):
key, data = 1, 'SkipList'
self.sl[key] = data
self.assertIn(1, self.sl)
self.assertNotIn(2, self.sl)
def test_iterable(self):
keys = random.sample(range(10000), 50)
for k in keys:
self.sl[k] = f"data_{k}"
self.assertListEqual(list(self.sl), sorted(keys))
def test_rangekey(self):
keys = random.sample(range(10000), 50)
for k in keys:
self.sl[k] = f"data_{k}"
skeys = sorted(keys)
r1 = self.sl.rangekey(skeys[5], skeys[20])
r2 = []
for k in skeys[5:21]:
r2.append((k, f"data_{k}"))
self.assertListEqual(list(r1), r2)
def test_verbose(self):
keys = random.sample(range(10000), 15)
for k in keys:
self.sl[k] = f"data_{k}"
print()
self.sl._verbose()
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"skiplist.SkipList",
"random.randint"
] |
[((2141, 2156), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2154, 2156), False, 'import unittest\n'), ((141, 151), 'skiplist.SkipList', 'SkipList', ([], {}), '()\n', (149, 151), False, 'from skiplist import SkipList\n'), ((200, 226), 'random.randint', 'random.randint', (['(0)', '(1 << 20)'], {}), '(0, 1 << 20)\n', (214, 226), False, 'import random\n'), ((361, 387), 'random.randint', 'random.randint', (['(0)', '(1 << 20)'], {}), '(0, 1 << 20)\n', (375, 387), False, 'import random\n'), ((611, 637), 'random.randint', 'random.randint', (['(0)', '(1 << 20)'], {}), '(0, 1 << 20)\n', (625, 637), False, 'import random\n'), ((860, 886), 'random.randint', 'random.randint', (['(0)', '(1 << 20)'], {}), '(0, 1 << 20)\n', (874, 886), False, 'import random\n')]
|
# evaluate a decision tree on the entire small dataset
from numpy import mean
from numpy import std
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.tree import DecisionTreeClassifier
# define dataset
X, y = make_classification(n_samples=1000, n_features=3, n_informative=2, n_redundant=1, random_state=1)
# define model
model = DecisionTreeClassifier()
# define evaluation procedure
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
# evaluate model
scores = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1)
# report result
print('Mean Accuracy: %.3f (%.3f)' % (mean(scores), std(scores)))
|
[
"numpy.std",
"sklearn.model_selection.cross_val_score",
"sklearn.model_selection.RepeatedStratifiedKFold",
"sklearn.datasets.make_classification",
"sklearn.tree.DecisionTreeClassifier",
"numpy.mean"
] |
[((333, 434), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(1000)', 'n_features': '(3)', 'n_informative': '(2)', 'n_redundant': '(1)', 'random_state': '(1)'}), '(n_samples=1000, n_features=3, n_informative=2,\n n_redundant=1, random_state=1)\n', (352, 434), False, 'from sklearn.datasets import make_classification\n'), ((454, 478), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (476, 478), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((514, 579), 'sklearn.model_selection.RepeatedStratifiedKFold', 'RepeatedStratifiedKFold', ([], {'n_splits': '(10)', 'n_repeats': '(3)', 'random_state': '(1)'}), '(n_splits=10, n_repeats=3, random_state=1)\n', (537, 579), False, 'from sklearn.model_selection import RepeatedStratifiedKFold\n'), ((606, 672), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X', 'y'], {'scoring': '"""accuracy"""', 'cv': 'cv', 'n_jobs': '(-1)'}), "(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1)\n", (621, 672), False, 'from sklearn.model_selection import cross_val_score\n'), ((727, 739), 'numpy.mean', 'mean', (['scores'], {}), '(scores)\n', (731, 739), False, 'from numpy import mean\n'), ((741, 752), 'numpy.std', 'std', (['scores'], {}), '(scores)\n', (744, 752), False, 'from numpy import std\n')]
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import functools
from sqlalchemy.orm import sessionmaker
from aiida.backends.sqlalchemy.models.base import Base
from aiida.backends.sqlalchemy.models.computer import DbComputer
from aiida.backends.sqlalchemy.utils import install_tc
from aiida.backends.testimplbase import AiidaTestImplementation
from aiida.orm.implementation.sqlalchemy.backend import SqlaBackend
# Querying for expired objects automatically doesn't seem to work.
# That's why expire on commit=False resolves many issues of objects beeing
# obsolete
expire_on_commit = True
Session = sessionmaker(expire_on_commit=expire_on_commit)
# This contains the codebase for the setUpClass and tearDown methods used
# internally by the AiidaTestCase. This inherits only from 'object' to avoid
# that it is picked up by the automatic discovery of tests
# (It shouldn't, as it risks to destroy the DB if there are not the checks
# in place, and these are implemented in the AiidaTestCase
class SqlAlchemyTests(AiidaTestImplementation):
# Specify the need to drop the table at the beginning of a test case
# If True, completely drops the tables and recreates the schema,
# but this is usually unnecessary and pretty slow
# Also, if the tests are interrupted, there is the risk that the
# DB remains dropped, so you have to do 'verdi -p test_xxx setup' again to
# install the schema again
drop_all = False
test_session = None
connection = None
def setUpClass_method(self):
from aiida.backends.sqlalchemy import get_scoped_session
if self.test_session is None:
# Should we use reset_session?
self.test_session = get_scoped_session()
if self.drop_all:
Base.metadata.drop_all(self.test_session.connection)
Base.metadata.create_all(self.test_session.connection)
install_tc(self.test_session.connection)
else:
self.clean_db()
self.backend = SqlaBackend()
def setUp_method(self):
pass
def tearDown_method(self):
pass
@staticmethod
def inject_computer(f):
@functools.wraps(f)
def dec(*args, **kwargs):
computer = DbComputer.query.filter_by(name="localhost").first()
args = list(args)
args.insert(1, computer)
return f(*args, **kwargs)
return dec
def clean_db(self):
from sqlalchemy.sql import table
DbGroupNodes = table('db_dbgroup_dbnodes')
DbGroup = table('db_dbgroup')
DbLink = table('db_dblink')
DbNode = table('db_dbnode')
DbLog = table('db_dblog')
DbAuthInfo = table('db_dbauthinfo')
DbUser = table('db_dbuser')
DbComputer = table('db_dbcomputer')
self.test_session.execute(DbGroupNodes.delete())
self.test_session.execute(DbGroup.delete())
self.test_session.execute(DbLog.delete())
self.test_session.execute(DbLink.delete())
self.test_session.execute(DbNode.delete())
self.test_session.execute(DbAuthInfo.delete())
self.test_session.execute(DbComputer.delete())
self.test_session.execute(DbUser.delete())
self.test_session.commit()
def tearDownClass_method(self):
"""
Backend-specific tasks for tearing down the test environment.
"""
self.test_session.close()
self.test_session = None
|
[
"aiida.backends.sqlalchemy.models.computer.DbComputer.query.filter_by",
"aiida.backends.sqlalchemy.models.base.Base.metadata.create_all",
"aiida.backends.sqlalchemy.get_scoped_session",
"aiida.backends.sqlalchemy.models.computer.DbComputer.delete",
"aiida.backends.sqlalchemy.utils.install_tc",
"aiida.backends.sqlalchemy.models.base.Base.metadata.drop_all",
"functools.wraps",
"aiida.orm.implementation.sqlalchemy.backend.SqlaBackend",
"sqlalchemy.orm.sessionmaker",
"sqlalchemy.sql.table"
] |
[((1297, 1344), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'expire_on_commit': 'expire_on_commit'}), '(expire_on_commit=expire_on_commit)\n', (1309, 1344), False, 'from sqlalchemy.orm import sessionmaker\n'), ((2694, 2707), 'aiida.orm.implementation.sqlalchemy.backend.SqlaBackend', 'SqlaBackend', ([], {}), '()\n', (2705, 2707), False, 'from aiida.orm.implementation.sqlalchemy.backend import SqlaBackend\n'), ((2851, 2869), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (2866, 2869), False, 'import functools\n'), ((3195, 3222), 'sqlalchemy.sql.table', 'table', (['"""db_dbgroup_dbnodes"""'], {}), "('db_dbgroup_dbnodes')\n", (3200, 3222), False, 'from sqlalchemy.sql import table\n'), ((3241, 3260), 'sqlalchemy.sql.table', 'table', (['"""db_dbgroup"""'], {}), "('db_dbgroup')\n", (3246, 3260), False, 'from sqlalchemy.sql import table\n'), ((3278, 3296), 'sqlalchemy.sql.table', 'table', (['"""db_dblink"""'], {}), "('db_dblink')\n", (3283, 3296), False, 'from sqlalchemy.sql import table\n'), ((3314, 3332), 'sqlalchemy.sql.table', 'table', (['"""db_dbnode"""'], {}), "('db_dbnode')\n", (3319, 3332), False, 'from sqlalchemy.sql import table\n'), ((3349, 3366), 'sqlalchemy.sql.table', 'table', (['"""db_dblog"""'], {}), "('db_dblog')\n", (3354, 3366), False, 'from sqlalchemy.sql import table\n'), ((3388, 3410), 'sqlalchemy.sql.table', 'table', (['"""db_dbauthinfo"""'], {}), "('db_dbauthinfo')\n", (3393, 3410), False, 'from sqlalchemy.sql import table\n'), ((3428, 3446), 'sqlalchemy.sql.table', 'table', (['"""db_dbuser"""'], {}), "('db_dbuser')\n", (3433, 3446), False, 'from sqlalchemy.sql import table\n'), ((3468, 3490), 'sqlalchemy.sql.table', 'table', (['"""db_dbcomputer"""'], {}), "('db_dbcomputer')\n", (3473, 3490), False, 'from sqlalchemy.sql import table\n'), ((2396, 2416), 'aiida.backends.sqlalchemy.get_scoped_session', 'get_scoped_session', ([], {}), '()\n', (2414, 2416), False, 'from aiida.backends.sqlalchemy import get_scoped_session\n'), ((2456, 2508), 'aiida.backends.sqlalchemy.models.base.Base.metadata.drop_all', 'Base.metadata.drop_all', (['self.test_session.connection'], {}), '(self.test_session.connection)\n', (2478, 2508), False, 'from aiida.backends.sqlalchemy.models.base import Base\n'), ((2521, 2575), 'aiida.backends.sqlalchemy.models.base.Base.metadata.create_all', 'Base.metadata.create_all', (['self.test_session.connection'], {}), '(self.test_session.connection)\n', (2545, 2575), False, 'from aiida.backends.sqlalchemy.models.base import Base\n'), ((2588, 2628), 'aiida.backends.sqlalchemy.utils.install_tc', 'install_tc', (['self.test_session.connection'], {}), '(self.test_session.connection)\n', (2598, 2628), False, 'from aiida.backends.sqlalchemy.utils import install_tc\n'), ((3842, 3861), 'aiida.backends.sqlalchemy.models.computer.DbComputer.delete', 'DbComputer.delete', ([], {}), '()\n', (3859, 3861), False, 'from aiida.backends.sqlalchemy.models.computer import DbComputer\n'), ((2927, 2971), 'aiida.backends.sqlalchemy.models.computer.DbComputer.query.filter_by', 'DbComputer.query.filter_by', ([], {'name': '"""localhost"""'}), "(name='localhost')\n", (2953, 2971), False, 'from aiida.backends.sqlalchemy.models.computer import DbComputer\n')]
|
import os
import tensorflow as tf
import os
from PIL import Image
import numpy as np
import cv2
from preprocessing import preprocessing_factory
from google.protobuf import text_format
def main(_):
labels = []
'''
# Let's read our pbtxt file into a Graph protobuf
f = open("C:/Users/turnt/OneDrive/Desktop/Rob0Workspace/Scene_labeler/data/gr.pbtxt", "r")
graph_protobuf = text_format.Parse(f.read(), tf.GraphDef())
# Import the graph protobuf into our new graph.
graph_clone = tf.Graph()
with graph_clone.as_default():
tf.import_graph_def(graph_def=graph_protobuf, name="")
# Display the graph inline.
graph_clone.as_graph_def()
'''
graph_def = tf.compat.v1.GraphDef()
with tf.io.gfile.GFile("C:/Users/turnt/OneDrive/Desktop/Rob0Workspace/Scene_labeler/data/gr.pb", 'rb') as f:
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
# Create a list of labels.
with open('C:/Users/turnt/OneDrive/Desktop/Rob0Workspace/Scene_labeler/classes.txt', 'rt') as lf:
for l in lf:
labels.append(l.strip())
# Load from a file
image = Image.open('C:/Users/turnt/OneDrive/Desktop/Rob0Workspace/Scene_labeler/input_images/test/001.jpg')
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
'resnet_v1_50',
is_training=False)
eval_image_size = 72
image = image_preprocessing_fn(image, eval_image_size, eval_image_size)
# image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
output_layer = 'prefetch_queue/fifo_queue:0'
input_node = 'prefetch_queue/fifo_queue:0'
'''
output_layer = 'resnet_v1_50/conv1/Relu:0' OR 'resnet_v1_50/block4/unit_3/bottleneck_v1/Relu:0'
input_node = 'resnet_v1_50/SpatialSqueeze:0'
'''
with tf.Session() as sess:
try:
prob_tensor = sess.graph.get_tensor_by_name(output_layer)
predictions, = sess.run(prob_tensor, {input_node: image})
except KeyError:
print("Couldn't find classification output layer: " + output_layer + ".")
exit(-1)
# Print the highest probability label
highest_probability_index = np.argmax(predictions)
print('Classified as: ' + labels[highest_probability_index])
print()
# Or you can print out all of the results mapping labels to probabilities.
label_index = 0
for p in predictions:
truncated_probablity = np.float64(np.round(p,8))
print (labels[label_index], truncated_probablity)
label_index += 1
if __name__ == '__main__':
tf.app.run()
|
[
"numpy.argmax",
"tensorflow.Session",
"PIL.Image.open",
"preprocessing.preprocessing_factory.get_preprocessing",
"numpy.round",
"tensorflow.import_graph_def",
"tensorflow.compat.v1.GraphDef",
"tensorflow.app.run",
"tensorflow.io.gfile.GFile"
] |
[((707, 730), 'tensorflow.compat.v1.GraphDef', 'tf.compat.v1.GraphDef', ([], {}), '()\n', (728, 730), True, 'import tensorflow as tf\n'), ((1159, 1268), 'PIL.Image.open', 'Image.open', (['"""C:/Users/turnt/OneDrive/Desktop/Rob0Workspace/Scene_labeler/input_images/test/001.jpg"""'], {}), "(\n 'C:/Users/turnt/OneDrive/Desktop/Rob0Workspace/Scene_labeler/input_images/test/001.jpg'\n )\n", (1169, 1268), False, 'from PIL import Image\n'), ((1289, 1363), 'preprocessing.preprocessing_factory.get_preprocessing', 'preprocessing_factory.get_preprocessing', (['"""resnet_v1_50"""'], {'is_training': '(False)'}), "('resnet_v1_50', is_training=False)\n", (1328, 1363), False, 'from preprocessing import preprocessing_factory\n'), ((2200, 2222), 'numpy.argmax', 'np.argmax', (['predictions'], {}), '(predictions)\n', (2209, 2222), True, 'import numpy as np\n'), ((2597, 2609), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (2607, 2609), True, 'import tensorflow as tf\n'), ((741, 847), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['"""C:/Users/turnt/OneDrive/Desktop/Rob0Workspace/Scene_labeler/data/gr.pb"""', '"""rb"""'], {}), "(\n 'C:/Users/turnt/OneDrive/Desktop/Rob0Workspace/Scene_labeler/data/gr.pb',\n 'rb')\n", (758, 847), True, 'import tensorflow as tf\n'), ((897, 936), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (916, 936), True, 'import tensorflow as tf\n'), ((1817, 1829), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1827, 1829), True, 'import tensorflow as tf\n'), ((2468, 2482), 'numpy.round', 'np.round', (['p', '(8)'], {}), '(p, 8)\n', (2476, 2482), True, 'import numpy as np\n')]
|
# <NAME> <<EMAIL>>
import math
from .Round import Round
##__________________________________________________________________||
class RoundLog(object):
"""Binning with equal width in log scale
Parameters
----------
width : float or int, default 1
The common logarithm (log10) of the width.
aboundary : float or int, optional
A boundary. If not given, ``width/2`` will be used.
min : float or int, optional
The lowest bin will be the bin that ``min`` falls in. It must be a
positive value. If given, ``__call__(val)`` returns ``underflow_bin``
if the ``val`` is less than the lower edge of the lowest bin.
underflow_bin : optional
The underflow bin. When ``min`` is given, the ``__call__(val)`` returns
``underflow_bin`` if the ``val`` is less than the lower edge of the
lowest bin.
max : float or int, optional
The highest bin will be the bin that ``max`` falls in except when
``max`` is one of boundaries. It must be a positive value. When ``max``
is one of boundaries, the highest bin is the bin whose upper edge is
``max``. If given, ``__call__(val)`` returns the overflow bin if the
``val`` is greater than or equal to the upper edge of the highest bin.
overflow_bin : optional
The overflow bin if ``overflow_bin`` is any value other than ``True``.
If ``overflow_bin`` is ``True``, the overflow bin will be the upper
edge of the highest bin. When ``max`` is given, the ``__call__(val)``
returns the overflow bin if the ``val`` is greater than or equal to the
upper edge of the highest bin.
valid : function, optional
Boolean function to test if value is valid
"""
def __init__(self, width=0.1, aboundary=1,
min=None, underflow_bin=None,
max=None, overflow_bin=None,
valid=None):
self._round = Round(width=width, aboundary=math.log10(aboundary))
self.width = width
self.aboundary = aboundary
self.min = min
self.max = max
self.valid = valid
if self.min is None:
self.min_bin_log10_lowedge = None
self.underflow_bin = None
else:
self.min_bin_log10_lowedge = self._round(math.log10(self.min))
self.underflow_bin = underflow_bin
if self.max is None:
self.max_bin_log10_upedge = None
self.overflow_bin = None
else:
self._round(math.log10(self.max)) # = self._round.boundaries[-2]
self.max_bin_log10_upedge = self._round.boundaries[-1]
if overflow_bin is True:
self.overflow_bin = 10**self.max_bin_log10_upedge
else:
self.overflow_bin = overflow_bin
def __repr__(self):
return '{}(width={!r}, aboundary={!r}, min={!r}, underflow_bin={!r}, max={!r}, overflow_bin={!r}, valid={!r})'.format(
self.__class__.__name__,
self.width,
self.aboundary,
self.min,
self.underflow_bin,
self.max,
self.overflow_bin,
self.valid
)
def __call__(self, val):
if self.valid:
if not self.valid(val):
return None
if val <= 0.0:
if self.min is not None:
return self.underflow_bin
elif val == 0.0:
return 0
else:
return None
if self.min is not None:
if math.log10(val) < self.min_bin_log10_lowedge:
return self.underflow_bin
if math.isinf(val):
if self.max is not None:
return self.overflow_bin
else:
return None
if self.max is not None:
if self.max_bin_log10_upedge <= math.log10(val):
return self.overflow_bin
val = math.log10(val)
val = self._round(val)
if val is None:
return None
return 10**val
def next(self, bin):
if bin is None:
return None
if bin == self.underflow_bin:
return self.__call__(self.min)
if bin < 0:
return None
if bin == 0:
return 0
if bin == self.overflow_bin:
return self.overflow_bin
log10_bin = self._round(math.log10(bin))
if log10_bin is None:
return None
log10_next = self._round.next(log10_bin)
if self.max is not None:
if log10_next == self.max_bin_log10_upedge:
return self.overflow_bin
return 10**log10_next
##__________________________________________________________________||
|
[
"math.log10",
"math.isinf"
] |
[((3688, 3703), 'math.isinf', 'math.isinf', (['val'], {}), '(val)\n', (3698, 3703), False, 'import math\n'), ((3980, 3995), 'math.log10', 'math.log10', (['val'], {}), '(val)\n', (3990, 3995), False, 'import math\n'), ((4453, 4468), 'math.log10', 'math.log10', (['bin'], {}), '(bin)\n', (4463, 4468), False, 'import math\n'), ((1989, 2010), 'math.log10', 'math.log10', (['aboundary'], {}), '(aboundary)\n', (1999, 2010), False, 'import math\n'), ((2328, 2348), 'math.log10', 'math.log10', (['self.min'], {}), '(self.min)\n', (2338, 2348), False, 'import math\n'), ((2547, 2567), 'math.log10', 'math.log10', (['self.max'], {}), '(self.max)\n', (2557, 2567), False, 'import math\n'), ((3588, 3603), 'math.log10', 'math.log10', (['val'], {}), '(val)\n', (3598, 3603), False, 'import math\n'), ((3907, 3922), 'math.log10', 'math.log10', (['val'], {}), '(val)\n', (3917, 3922), False, 'import math\n')]
|
import numpy as np
import pyqtgraph as pg
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFont, QColor
from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton,\
QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem
from EGGS_labrad.clients.Widgets import TextChangingButton, QCustomGroupBox
_SHELL_FONT = 'MS Shell Dlg 2'
# todo: clean up display pyqtgraph
class stability_gui(QFrame):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.setFrameStyle(0x0001 | 0x0030)
self.setFixedSize(400, 875)
self.makeLayout()
self.setWindowTitle("Stability Client")
def _makeStabilityTab(self):
"""
This tab displays the trap parameters and the resultant secular frequencies.
This is independent of ion number.
Part of the Parameters QTabWidget.
"""
# parameter box
stability_widget = QWidget()
stability_widget_layout = QGridLayout(stability_widget)
# l0_distance
l0_distance_label = QLabel("Length Scale (\u03BCm)")
self.l0_distance = QLabel("00.00")
self.l0_distance.setStyleSheet('color: blue')
# # record button
# self.record_button = TextChangingButton(('Stop Recording', 'Start Recording'))
# self.record_button.setMaximumHeight(25)
# a parameter
aparam_display_label = QLabel('a-parameter')
self.aparam_display = QLabel('0.0000')
# q parameter
qparam_display_label = QLabel('q-parameter')
self.qparam_display = QLabel('0.000')
# wsecr - radial
wsecr_display_label = QLabel('\u03C9 Radial (x2\u03C0 MHz)')
self.wsecr_display = QLabel('0.000')
# wsecz - radial
wsecz_display_label = QLabel('\u03C9 Axial (x2\u03C0 MHz)')
self.wsecz_display = QLabel('0.000')
# anharmonic_limit
anharmonic_limit_label = QLabel("Anharmonic Limit (%)")
self.anharmonic_limit = QLabel("00.00")
# configure display elements
for display in (self.l0_distance, self.aparam_display, self.qparam_display, self.wsecr_display,
self.wsecz_display, self.anharmonic_limit):
display.setFont(QFont(_SHELL_FONT, pointSize=22))
display.setAlignment(Qt.AlignRight)
display.setStyleSheet('color: blue')
for display_label in (l0_distance_label, aparam_display_label, qparam_display_label,
wsecr_display_label, wsecz_display_label, anharmonic_limit_label):
display_label.setAlignment(Qt.AlignRight)
# layout parameter box elements
stability_widget_layout.addWidget(anharmonic_limit_label, 1, 0, 1, 1)
stability_widget_layout.addWidget(self.anharmonic_limit, 2, 0, 1, 1)
stability_widget_layout.addWidget(aparam_display_label, 1, 1, 1, 1)
stability_widget_layout.addWidget(self.aparam_display, 2, 1, 1, 1)
stability_widget_layout.addWidget(qparam_display_label, 1, 2, 1, 1)
stability_widget_layout.addWidget(self.qparam_display, 2, 2, 1, 1)
stability_widget_layout.addWidget(wsecr_display_label, 3, 1, 1, 1)
stability_widget_layout.addWidget(self.wsecr_display, 4, 1, 1, 1)
stability_widget_layout.addWidget(wsecz_display_label, 3, 2, 1, 1)
stability_widget_layout.addWidget(self.wsecz_display, 4, 2, 1, 1)
stability_widget_layout.addWidget(l0_distance_label, 3, 0, 1, 1)
stability_widget_layout.addWidget(self.l0_distance, 4, 0, 1, 1)
return stability_widget
def _makeIonTab(self):
"""
This tab allows configuration of ion chain data to retrieve
mode values (i.e. eigenvector components and mode frequencies).
"""
# create holders
iontab_widget = QWidget()
iontab_widget_layout = QGridLayout(iontab_widget)
# total_ions
total_ion_label = QLabel("# of ions")
self.total_ions = QDoubleSpinBox()
self.total_ions.setRange(1, 10)
self.total_ions.setDecimals(0)
self.total_ions.setSingleStep(1)
self.total_ions.setKeyboardTracking(False)
# ion_num
ion_num_label = QLabel("Ion #")
self.ion_num = QComboBox()
# ion_mass
ion_mass_label = QLabel("Ion Mass (amu)")
self.ion_mass = QDoubleSpinBox()
self.ion_mass.setRange(1, 200)
self.ion_mass.setDecimals(1)
self.ion_mass.setSingleStep(1)
self.ion_mass.setKeyboardTracking(False)
# configure display elements
for display in (self.total_ions, self.ion_num, self.ion_mass):
try:
display.setFont(QFont(_SHELL_FONT, pointSize=18))
display.setAlignment(Qt.AlignRight)
except AttributeError:
pass
for display_label in (total_ion_label, ion_num_label, ion_mass_label):
display_label.setAlignment(Qt.AlignRight)
# lay out
iontab_widget_layout.addWidget(total_ion_label, 0, 0, 1, 1)
iontab_widget_layout.addWidget(self.total_ions, 1, 0, 1, 1)
iontab_widget_layout.addWidget(ion_num_label, 0, 1, 1, 1)
iontab_widget_layout.addWidget(self.ion_num, 1, 1, 1, 1)
iontab_widget_layout.addWidget(ion_mass_label, 0, 2, 1, 1)
iontab_widget_layout.addWidget(self.ion_mass, 1, 2, 1, 1)
# todo: integrate with andor
return iontab_widget
def _makeTrapTab(self):
"""
This tab allows configuration of dynamic trap parameters.
Part of the Parameters QTabWidget.
"""
# create holders
trap_widget = QWidget()
trap_widget_layout = QGridLayout(trap_widget)
# vrf
vrf_display_label = QLabel('VRF (Vpp)')
self.vrf_display = QDoubleSpinBox()
# vrf - offset
voff_display_label = QLabel('V_off (V)')
self.voff_display = QDoubleSpinBox()
# wrf
wrf_display_label = QLabel('\u03C9RF (x2\u03C0 MHz)')
self.wrf_display = QDoubleSpinBox()
# vdc
vdc_display_label = QLabel('VDC (V)')
self.vdc_display = QDoubleSpinBox()
# configure display elements
for display in (self.vrf_display, self.voff_display, self.wrf_display, self.vdc_display):
display.setFont(QFont(_SHELL_FONT, pointSize=12))
display.setAlignment(Qt.AlignRight)
display.setDecimals(3)
display.setSingleStep(1)
display.setRange(-100, 1000)
display.setKeyboardTracking(False)
for display_label in (vrf_display_label, voff_display_label,
wrf_display_label, vdc_display_label):
display_label.setAlignment(Qt.AlignRight)
# create radio buttons
radio_widget = QWidget()
radio_widget_layout = QHBoxLayout(radio_widget)
self.values_get = QRadioButton("Get Values from System")
self.values_set = QRadioButton("Manually Set Values")
radio_widget_layout.addWidget(self.values_get)
radio_widget_layout.addWidget(self.values_set)
self.values_set.setChecked(True)
# lay out
trap_widget_layout.addWidget(radio_widget, 0, 0, 1, 2)
trap_widget_layout.addWidget(vrf_display_label, 1, 0, 1, 1)
trap_widget_layout.addWidget(self.vrf_display, 2, 0, 1, 1)
trap_widget_layout.addWidget(wrf_display_label, 1, 1, 1, 1)
trap_widget_layout.addWidget(self.wrf_display, 2, 1, 1, 1)
trap_widget_layout.addWidget(vdc_display_label, 3, 0, 1, 1)
trap_widget_layout.addWidget(self.vdc_display, 4, 0, 1, 1)
trap_widget_layout.addWidget(voff_display_label, 3, 1, 1, 1)
trap_widget_layout.addWidget(self.voff_display, 4, 1, 1, 1)
return trap_widget
def _makeGeometryTab(self):
"""
This tab allows configuration of trap geometry parameters.
Part of the Parameters QTabWidget.
"""
# r0, kr, z0, kz
# create holders
geometry_widget = QWidget()
geometry_widget_layout = QGridLayout(geometry_widget)
# display labels
r0_display_label = QLabel('r0 (\u03BCm)')
kr_display_label = QLabel('\u03BAr')
z0_display_label = QLabel('z0 (\u03BCm)')
kz_display_label = QLabel('\u03BAz')
# spin boxes
self.r0_display = QDoubleSpinBox()
self.kr_display = QDoubleSpinBox()
self.z0_display = QDoubleSpinBox()
self.kz_display = QDoubleSpinBox()
# configure display elements
for spinbox in (self.r0_display, self.kr_display, self.z0_display, self.kz_display):
spinbox.setFont(QFont(_SHELL_FONT, pointSize=12))
spinbox.setAlignment(Qt.AlignRight)
for spinbox in (self.r0_display, self.z0_display):
spinbox.setRange(0, 10000)
spinbox.setDecimals(0)
spinbox.setSingleStep(1)
for spinbox in (self.kr_display, self.kz_display):
spinbox.setRange(0, 1)
spinbox.setDecimals(3)
spinbox.setSingleStep(1)
for display_label in (r0_display_label, kr_display_label, z0_display_label, kz_display_label):
display_label.setAlignment(Qt.AlignRight)
# lay out
geometry_widget_layout.addWidget(r0_display_label, 0, 0, 1, 1)
geometry_widget_layout.addWidget(self.r0_display, 1, 0, 1, 1)
geometry_widget_layout.addWidget(kr_display_label, 0, 1, 1, 1)
geometry_widget_layout.addWidget(self.kr_display, 1, 1, 1, 1)
geometry_widget_layout.addWidget(z0_display_label, 2, 0, 1, 1)
geometry_widget_layout.addWidget(self.z0_display, 3, 0, 1, 1)
geometry_widget_layout.addWidget(kz_display_label, 2, 1, 1, 1)
geometry_widget_layout.addWidget(self.kz_display, 3, 1, 1, 1)
return geometry_widget
def _makeMathieuDisplayTab(self):
"""
This tab draws the stability plot display.
Part of the Display QTabWidget
"""
# create holder widget
mathieu_widget = QWidget()
mathieu_widget_display = QGridLayout(mathieu_widget)
# create plotwidget for display
pg.setConfigOption('background', 'k')
self.stability_display = pg.PlotWidget(name='Mathieu Stability Display', border=True)
self.stability_display.showGrid(x=True, y=True, alpha=0.5)
self.stability_display.setRange(xRange=[0, 1], yRange=[0, 0.1])
self.stability_display.setLimits(xMin=-0.1, xMax=1, yMin=-0.1, yMax=0.1)
self.stability_display.setMaximumSize(400, 400)
self.stability_display.setMinimumSize(300, 300)
self.stability_display.setLabel('left', 'a')
self.stability_display.setLabel('bottom', 'q')
self.stability_point = self.stability_display.plot(symbol='o', symbolBrush=QColor(Qt.white))
# create stability boundaries for mathieu
# todo: cut off after intersection; also do negative
xarr = np.linspace(0, 1, 100)
yarr = 0.5 * np.power(xarr, 2)
self.stability_region = self.stability_display.plot(symbol=None, pen=QColor(Qt.red))
self.stability_region2 = self.stability_display.plot(xarr, yarr, symbol=None, pen=QColor(Qt.red))
# beta setting
beta_setting_display = QLabel('\u03B2')
beta_setting_display.setAlignment(Qt.AlignRight)
self.beta_setting = QDoubleSpinBox()
self.beta_setting.setFont(QFont('MS Shell Dlg 2', pointSize=14))
self.beta_setting.setDecimals(1)
self.beta_setting.setSingleStep(1)
self.beta_setting.setRange(0, 5)
self.beta_setting.setKeyboardTracking(False)
self.beta_setting.setAlignment(Qt.AlignRight)
# autoscale button
self.autoscale = QPushButton("Autoscale")
# lay out
mathieu_widget_display.addWidget(beta_setting_display, 0, 0, 1, 1)
mathieu_widget_display.addWidget(self.beta_setting, 1, 0, 1, 1)
mathieu_widget_display.addWidget(self.autoscale, 1, 1, 1, 1)
mathieu_widget_display.addWidget(self.stability_display, 2, 0, 3, 3)
return mathieu_widget
def _makeEigenTab(self):
"""
This tab displays the ion chain mode data.
Part of the Display QTabWidget.
"""
# create holders
eigen_widget = QWidget()
eigen_widget_layout = QGridLayout(eigen_widget)
# create widgets
self.eigenmode_axial_display = QTreeWidget()
self.eigenmode_axial_display.setHeaderLabels(["Mode Frequency (x2\u03C0 MHz)", "Ion Number", "Mode Amplitude"])
self.eigenmode_radial_display = QTreeWidget()
self.eigenmode_radial_display.setHeaderLabels(["Mode Frequency (x2\u03C0 MHz)", "Ion Number", "Mode Amplitude"])
# lay out
eigen_widget_layout.addWidget(QCustomGroupBox(self.eigenmode_axial_display, "Axial Modes"))
eigen_widget_layout.addWidget(QCustomGroupBox(self.eigenmode_radial_display, "Radial Modes"))
return eigen_widget
def makeLayout(self):
# create parameter tab widget
parameterTabWidget = QTabWidget()
chain_widget = QWidget()
chain_widget_layout = QVBoxLayout(chain_widget)
chain_widget_layout.addWidget(QCustomGroupBox(self._makeIonTab(), "Ion Chain"))
chain_widget_layout.addWidget(QCustomGroupBox(self._makeStabilityTab(), "Ion Stability"))
trap_widget = QWidget()
trap_widget_layout = QVBoxLayout(trap_widget)
trap_widget_layout.addWidget(QCustomGroupBox(self._makeTrapTab(), "Trap Parameter"))
trap_widget_layout.addWidget(QCustomGroupBox(self._makeGeometryTab(), "Trap Geometry"))
parameterTabWidget.addTab(chain_widget, "Ion Chain")
parameterTabWidget.addTab(trap_widget, "Trap")
# create display tab widget
display_tabs = {
'Mathieu': self._makeMathieuDisplayTab(),
'Eigenmode Data': self._makeEigenTab(),
}
displayTabWidget = QTabWidget()
for tab_name, tab_widget in display_tabs.items():
displayTabWidget.addTab(tab_widget, tab_name)
# title
title = QLabel('Stability Client')
title.setFont(QFont(_SHELL_FONT, pointSize=18))
title.setAlignment(Qt.AlignCenter)
# lay out
layout = QGridLayout(self)
layout.addWidget(title, 0, 0, 1, 4)
layout.addWidget(parameterTabWidget, 1, 0, 2, 4)
layout.addWidget(displayTabWidget, 4, 0, 3, 4)
def drawStability(self, beta=0.4):
xarr = np.linspace(0, 1, 100)
yarr = np.power(beta, 2) - 0.5 * np.power(xarr, 2)
self.stability_region.setData(xarr, yarr)
if __name__ == "__main__":
from EGGS_labrad.clients import runGUI
runGUI(stability_gui)
|
[
"EGGS_labrad.clients.Widgets.QCustomGroupBox",
"PyQt5.QtGui.QColor",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QTabWidget",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QRadioButton",
"numpy.power",
"PyQt5.QtWidgets.QWidget.__init__",
"numpy.linspace",
"EGGS_labrad.clients.runGUI",
"PyQt5.QtWidgets.QComboBox",
"pyqtgraph.setConfigOption",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QTreeWidget",
"PyQt5.QtWidgets.QDoubleSpinBox",
"PyQt5.QtGui.QFont",
"pyqtgraph.PlotWidget"
] |
[((15426, 15447), 'EGGS_labrad.clients.runGUI', 'runGUI', (['stability_gui'], {}), '(stability_gui)\n', (15432, 15447), False, 'from EGGS_labrad.clients import runGUI\n'), ((533, 563), 'PyQt5.QtWidgets.QWidget.__init__', 'QWidget.__init__', (['self', 'parent'], {}), '(self, parent)\n', (549, 563), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((998, 1007), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (1005, 1007), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1042, 1071), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', (['stability_widget'], {}), '(stability_widget)\n', (1053, 1071), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1122, 1149), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Length Scale (μm)"""'], {}), "('Length Scale (μm)')\n", (1128, 1149), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1182, 1197), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""00.00"""'], {}), "('00.00')\n", (1188, 1197), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1470, 1491), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""a-parameter"""'], {}), "('a-parameter')\n", (1476, 1491), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1522, 1538), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""0.0000"""'], {}), "('0.0000')\n", (1528, 1538), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1592, 1613), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""q-parameter"""'], {}), "('q-parameter')\n", (1598, 1613), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1644, 1659), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""0.000"""'], {}), "('0.000')\n", (1650, 1659), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1715, 1743), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""ω Radial (x2π MHz)"""'], {}), "('ω Radial (x2π MHz)')\n", (1721, 1743), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1783, 1798), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""0.000"""'], {}), "('0.000')\n", (1789, 1798), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1854, 1881), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""ω Axial (x2π MHz)"""'], {}), "('ω Axial (x2π MHz)')\n", (1860, 1881), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1921, 1936), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""0.000"""'], {}), "('0.000')\n", (1927, 1936), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((1997, 2027), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Anharmonic Limit (%)"""'], {}), "('Anharmonic Limit (%)')\n", (2003, 2027), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((2060, 2075), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""00.00"""'], {}), "('00.00')\n", (2066, 2075), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((4034, 4043), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (4041, 4043), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((4075, 4101), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', (['iontab_widget'], {}), '(iontab_widget)\n', (4086, 4101), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((4149, 4168), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""# of ions"""'], {}), "('# of ions')\n", (4155, 4168), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((4195, 4211), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (4209, 4211), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((4425, 4440), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Ion #"""'], {}), "('Ion #')\n", (4431, 4440), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((4464, 4475), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', ([], {}), '()\n', (4473, 4475), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((4520, 4544), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Ion Mass (amu)"""'], {}), "('Ion Mass (amu)')\n", (4526, 4544), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((4569, 4585), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (4583, 4585), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((5955, 5964), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (5962, 5964), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((5994, 6018), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', (['trap_widget'], {}), '(trap_widget)\n', (6005, 6018), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((6061, 6080), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""VRF (Vpp)"""'], {}), "('VRF (Vpp)')\n", (6067, 6080), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((6108, 6124), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (6122, 6124), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((6177, 6196), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""V_off (V)"""'], {}), "('V_off (V)')\n", (6183, 6196), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((6225, 6241), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (6239, 6241), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((6284, 6307), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""ωRF (x2π MHz)"""'], {}), "('ωRF (x2π MHz)')\n", (6290, 6307), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((6345, 6361), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (6359, 6361), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((6404, 6421), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""VDC (V)"""'], {}), "('VDC (V)')\n", (6410, 6421), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((6449, 6465), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (6463, 6465), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((7119, 7128), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (7126, 7128), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((7159, 7184), 'PyQt5.QtWidgets.QHBoxLayout', 'QHBoxLayout', (['radio_widget'], {}), '(radio_widget)\n', (7170, 7184), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((7211, 7249), 'PyQt5.QtWidgets.QRadioButton', 'QRadioButton', (['"""Get Values from System"""'], {}), "('Get Values from System')\n", (7223, 7249), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((7276, 7311), 'PyQt5.QtWidgets.QRadioButton', 'QRadioButton', (['"""Manually Set Values"""'], {}), "('Manually Set Values')\n", (7288, 7311), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((8476, 8485), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (8483, 8485), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((8519, 8547), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', (['geometry_widget'], {}), '(geometry_widget)\n', (8530, 8547), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((8601, 8618), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""r0 (μm)"""'], {}), "('r0 (μm)')\n", (8607, 8618), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((8651, 8663), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""κr"""'], {}), "('κr')\n", (8657, 8663), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((8696, 8713), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""z0 (μm)"""'], {}), "('z0 (μm)')\n", (8702, 8713), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((8746, 8758), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""κz"""'], {}), "('κz')\n", (8752, 8758), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((8812, 8828), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (8826, 8828), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((8855, 8871), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (8869, 8871), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((8898, 8914), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (8912, 8914), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((8941, 8957), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (8955, 8957), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((10626, 10635), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (10633, 10635), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((10669, 10696), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', (['mathieu_widget'], {}), '(mathieu_widget)\n', (10680, 10696), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((10745, 10782), 'pyqtgraph.setConfigOption', 'pg.setConfigOption', (['"""background"""', '"""k"""'], {}), "('background', 'k')\n", (10763, 10782), True, 'import pyqtgraph as pg\n'), ((10816, 10876), 'pyqtgraph.PlotWidget', 'pg.PlotWidget', ([], {'name': '"""Mathieu Stability Display"""', 'border': '(True)'}), "(name='Mathieu Stability Display', border=True)\n", (10829, 10876), True, 'import pyqtgraph as pg\n'), ((11544, 11566), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (11555, 11566), True, 'import numpy as np\n'), ((11859, 11870), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""β"""'], {}), "('β')\n", (11865, 11870), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((11961, 11977), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QDoubleSpinBox', ([], {}), '()\n', (11975, 11977), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((12335, 12359), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['"""Autoscale"""'], {}), "('Autoscale')\n", (12346, 12359), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((12937, 12946), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (12944, 12946), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((12977, 13002), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', (['eigen_widget'], {}), '(eigen_widget)\n', (12988, 13002), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((13067, 13080), 'PyQt5.QtWidgets.QTreeWidget', 'QTreeWidget', ([], {}), '()\n', (13078, 13080), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((13241, 13254), 'PyQt5.QtWidgets.QTreeWidget', 'QTreeWidget', ([], {}), '()\n', (13252, 13254), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((13718, 13730), 'PyQt5.QtWidgets.QTabWidget', 'QTabWidget', ([], {}), '()\n', (13728, 13730), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((13755, 13764), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (13762, 13764), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((13795, 13820), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', (['chain_widget'], {}), '(chain_widget)\n', (13806, 13820), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((14030, 14039), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (14037, 14039), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((14069, 14093), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', (['trap_widget'], {}), '(trap_widget)\n', (14080, 14093), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((14605, 14617), 'PyQt5.QtWidgets.QTabWidget', 'QTabWidget', ([], {}), '()\n', (14615, 14617), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((14767, 14793), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Stability Client"""'], {}), "('Stability Client')\n", (14773, 14793), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((14929, 14946), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', (['self'], {}), '(self)\n', (14940, 14946), False, 'from PyQt5.QtWidgets import QFrame, QWidget, QLabel, QGridLayout, QGroupBox, QDoubleSpinBox, QPushButton, QTabWidget, QComboBox, QRadioButton, QHBoxLayout, QVBoxLayout, QTreeWidget, QTreeWidgetItem\n'), ((15218, 15240), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (15229, 15240), True, 'import numpy as np\n'), ((11588, 11605), 'numpy.power', 'np.power', (['xarr', '(2)'], {}), '(xarr, 2)\n', (11596, 11605), True, 'import numpy as np\n'), ((12012, 12049), 'PyQt5.QtGui.QFont', 'QFont', (['"""MS Shell Dlg 2"""'], {'pointSize': '(14)'}), "('MS Shell Dlg 2', pointSize=14)\n", (12017, 12049), False, 'from PyQt5.QtGui import QFont, QColor\n'), ((13432, 13492), 'EGGS_labrad.clients.Widgets.QCustomGroupBox', 'QCustomGroupBox', (['self.eigenmode_axial_display', '"""Axial Modes"""'], {}), "(self.eigenmode_axial_display, 'Axial Modes')\n", (13447, 13492), False, 'from EGGS_labrad.clients.Widgets import TextChangingButton, QCustomGroupBox\n'), ((13532, 13594), 'EGGS_labrad.clients.Widgets.QCustomGroupBox', 'QCustomGroupBox', (['self.eigenmode_radial_display', '"""Radial Modes"""'], {}), "(self.eigenmode_radial_display, 'Radial Modes')\n", (13547, 13594), False, 'from EGGS_labrad.clients.Widgets import TextChangingButton, QCustomGroupBox\n'), ((14816, 14848), 'PyQt5.QtGui.QFont', 'QFont', (['_SHELL_FONT'], {'pointSize': '(18)'}), '(_SHELL_FONT, pointSize=18)\n', (14821, 14848), False, 'from PyQt5.QtGui import QFont, QColor\n'), ((15256, 15273), 'numpy.power', 'np.power', (['beta', '(2)'], {}), '(beta, 2)\n', (15264, 15273), True, 'import numpy as np\n'), ((2314, 2346), 'PyQt5.QtGui.QFont', 'QFont', (['_SHELL_FONT'], {'pointSize': '(22)'}), '(_SHELL_FONT, pointSize=22)\n', (2319, 2346), False, 'from PyQt5.QtGui import QFont, QColor\n'), ((6630, 6662), 'PyQt5.QtGui.QFont', 'QFont', (['_SHELL_FONT'], {'pointSize': '(12)'}), '(_SHELL_FONT, pointSize=12)\n', (6635, 6662), False, 'from PyQt5.QtGui import QFont, QColor\n'), ((9117, 9149), 'PyQt5.QtGui.QFont', 'QFont', (['_SHELL_FONT'], {'pointSize': '(12)'}), '(_SHELL_FONT, pointSize=12)\n', (9122, 9149), False, 'from PyQt5.QtGui import QFont, QColor\n'), ((11400, 11416), 'PyQt5.QtGui.QColor', 'QColor', (['Qt.white'], {}), '(Qt.white)\n', (11406, 11416), False, 'from PyQt5.QtGui import QFont, QColor\n'), ((11683, 11697), 'PyQt5.QtGui.QColor', 'QColor', (['Qt.red'], {}), '(Qt.red)\n', (11689, 11697), False, 'from PyQt5.QtGui import QFont, QColor\n'), ((11789, 11803), 'PyQt5.QtGui.QColor', 'QColor', (['Qt.red'], {}), '(Qt.red)\n', (11795, 11803), False, 'from PyQt5.QtGui import QFont, QColor\n'), ((15282, 15299), 'numpy.power', 'np.power', (['xarr', '(2)'], {}), '(xarr, 2)\n', (15290, 15299), True, 'import numpy as np\n'), ((4908, 4940), 'PyQt5.QtGui.QFont', 'QFont', (['_SHELL_FONT'], {'pointSize': '(18)'}), '(_SHELL_FONT, pointSize=18)\n', (4913, 4940), False, 'from PyQt5.QtGui import QFont, QColor\n')]
|
"""
Dev: <NAME>
Date: 11/17/19
Program: Cpu vs Cpu War game
"""
import random
def victoryScreen(player, hands):
"""Prints a personalized victory screen to the terminal"""
print('~~~~~~~~~~~~~~~~~~~~~~~~~~')
print(f'~~~~~ {player} wins!! ~~~~~')
print(f'~~~~~~ In {hands} hands ~~~~~~~')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~')
def drawCard(player):
"""Generates and returns a random number that represents a card's value"""
card = random.randint(0, 14)
print(f"{player} drew a {card}")
return card
def main():
"""Contains the game's loop and all conditional checks"""
playerOneScore = 0
playerTwoScore = 0
numOfHands = 0
while playerOneScore < 10 and playerTwoScore < 10: # Game continues until one of the players score 10
playerOneCard = drawCard(player='player1') # Call the drawcard function using the argument 'player1' for the player parameter
playerTwoCard = drawCard(player='player2')
if playerOneCard > playerTwoCard:
print('Player1 wins the round\n')
playerOneScore += 1 # increment playerone's score
numOfHands += 1
elif playerTwoCard > playerOneCard:
print('Player2 wins the round\n')
playerTwoScore += 1
numOfHands += 1
else:
print("A tie! no one increases\n")
if playerOneScore == 10:
victoryScreen(player='player1', hands=numOfHands)
elif playerTwoScore == 10:
victoryScreen(player='player2', hands=numOfHands)
main()
|
[
"random.randint"
] |
[((485, 506), 'random.randint', 'random.randint', (['(0)', '(14)'], {}), '(0, 14)\n', (499, 506), False, 'import random\n')]
|
# -- coding: utf-8 --
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.urls import re_path
from . import api
app_name = "connector"
urlpatterns = [
re_path(r"^types/?$", api.get_types, name="types"),
re_path(
r"^instances/?$",
api.get_instances,
name="instances",
),
# re_path(r'^api/instance/new/(?P<dialect>[\w\-]+)/(?P<interface>[\w\-]+)$', api.new_connector, name='connectors.api.new_connector'),
# re_path(r'^api/instance/get/(?P<id>\d+)$', api.get_connector, name='connectors.api.get_connector'),
# re_path(r'^api/instance/delete/?$', api.delete_connector, name='connectors.api.delete_connector'),
# re_path(r'^api/instance/update/?$', api.update_connector, name='connectors.api.update_connector'),
# re_path(r'^api/instance/test/?$', api.test_connector, name='connectors.api.test_connector'),
]
|
[
"django.urls.re_path"
] |
[((888, 937), 'django.urls.re_path', 're_path', (['"""^types/?$"""', 'api.get_types'], {'name': '"""types"""'}), "('^types/?$', api.get_types, name='types')\n", (895, 937), False, 'from django.urls import re_path\n'), ((944, 1005), 'django.urls.re_path', 're_path', (['"""^instances/?$"""', 'api.get_instances'], {'name': '"""instances"""'}), "('^instances/?$', api.get_instances, name='instances')\n", (951, 1005), False, 'from django.urls import re_path\n')]
|
from django.shortcuts import render
from assignment.forms import CalculationForm
from assignment.logic import calculate
def calculator(request):
ctx = {}
if request.method == 'POST':
form = CalculationForm(request.POST)
if form.is_valid():
ctx.update(calculate(**form.cleaned_data))
else:
form = CalculationForm()
ctx['form'] = form
return render(request, 'index.html', ctx)
|
[
"django.shortcuts.render",
"assignment.logic.calculate",
"assignment.forms.CalculationForm"
] |
[((401, 435), 'django.shortcuts.render', 'render', (['request', '"""index.html"""', 'ctx'], {}), "(request, 'index.html', ctx)\n", (407, 435), False, 'from django.shortcuts import render\n'), ((210, 239), 'assignment.forms.CalculationForm', 'CalculationForm', (['request.POST'], {}), '(request.POST)\n', (225, 239), False, 'from assignment.forms import CalculationForm\n'), ((348, 365), 'assignment.forms.CalculationForm', 'CalculationForm', ([], {}), '()\n', (363, 365), False, 'from assignment.forms import CalculationForm\n'), ((291, 321), 'assignment.logic.calculate', 'calculate', ([], {}), '(**form.cleaned_data)\n', (300, 321), False, 'from assignment.logic import calculate\n')]
|
"""Views for imager_images."""
from django.urls import reverse_lazy
from django.views.generic import CreateView, DetailView, ListView, TemplateView, UpdateView
from imager_images.forms import AlbumForm, PhotoForm
from imager_images.models import Album, Photo
class LibraryView(TemplateView):
"""View for library view."""
template_name = "imager_images/library.html"
def get_context_data(self, **kwargs):
"""Return album and photos for requested user."""
context = super(LibraryView, self).get_context_data(**kwargs)
user = self.request.user.profile
context['photos'] = user.photo.all()
context['albums'] = user.album.all()
return context
class AlbumsView(ListView):
"""View for the user's albums."""
template_name = 'imager_images/albums.html'
model = Album
context_object_name = 'albums'
def get_queryset(self):
"""Overwrite queryset to get all albums."""
user = self.request.user.profile
return user.album.all()
class PhotosView(ListView):
"""View all photos for a user."""
template_name = 'imager_images/photos.html'
model = Photo
context_object_name = 'photos'
def get_queryset(self):
"""Overwrite queryset to get all photos."""
user = self.request.user.profile
return user.photo.all()
class AlbumInfo(DetailView):
"""View for specific photo info."""
template_name = 'imager_images/album_info.html'
model = Album
class PhotoInfo(DetailView):
"""View for specific photo info."""
template_name = 'imager_images/photo_info.html'
model = Photo
class CreatePhoto(CreateView):
"""View to create photo."""
template_name = 'imager_images/photo_form.html'
model = Photo
form_class = PhotoForm
success_url = reverse_lazy('library')
def form_valid(self, form):
"""Validate if form submission successful."""
form.instance.user = self.request.user.profile
return super(CreatePhoto, self).form_valid(form)
class CreateAlbum(CreateView):
"""View to create album."""
template_name = 'imager_images/album_form.html'
model = Album
form_class = AlbumForm
success_url = reverse_lazy('library')
def form_valid(self, form):
"""Validate if form submission successful."""
form.instance.user = self.request.user.profile
return super(CreateAlbum, self).form_valid(form)
class EditPhoto(UpdateView):
"""Edit existing photos."""
template_name = 'imager_images/photo_edit.html'
model = Photo
form_class = PhotoForm
success_url = reverse_lazy('library')
def form_valid(self, form):
"""Validate if form submission successful."""
form.instance.user = self.request.user.profile
return super(EditPhoto, self).form_valid(form)
class EditAlbum(UpdateView):
"""Edit existing albums."""
template_name = 'imager_images/album_edit.html'
model = Album
form_class = AlbumForm
success_url = reverse_lazy('library')
def form_valid(self, form):
"""Validate if form submission successful."""
form.instance.user = self.request.user.profile
return super(EditAlbum, self).form_valid(form)
|
[
"django.urls.reverse_lazy"
] |
[((1818, 1841), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""library"""'], {}), "('library')\n", (1830, 1841), False, 'from django.urls import reverse_lazy\n'), ((2222, 2245), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""library"""'], {}), "('library')\n", (2234, 2245), False, 'from django.urls import reverse_lazy\n'), ((2624, 2647), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""library"""'], {}), "('library')\n", (2636, 2647), False, 'from django.urls import reverse_lazy\n'), ((3024, 3047), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""library"""'], {}), "('library')\n", (3036, 3047), False, 'from django.urls import reverse_lazy\n')]
|
from setuptools import setup, find_packages
import os
import uwsgiit
CLASSIFIERS = [
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
]
setup(
author="<NAME>",
author_email="<EMAIL>",
name='uwsgiit-py',
version=uwsgiit.__version__,
description='Library for uwsgi.it api',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.md')).read(),
url="https://github.com/xrmx/uwsgiit-py",
license='BSD License',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
install_requires=[
'requests>=2',
],
test_suite='uwsgiit.tests',
packages=find_packages(exclude=["test_project", "example.*"]),
include_package_data=True,
zip_safe = False,
)
|
[
"os.path.dirname",
"setuptools.find_packages"
] |
[((880, 932), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['test_project', 'example.*']"}), "(exclude=['test_project', 'example.*'])\n", (893, 932), False, 'from setuptools import setup, find_packages\n'), ((597, 622), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (612, 622), False, 'import os\n')]
|
import torch
import tensorflow as tf
import tensorboard as tb
# fix a bug with tensorboard
tf.io.gfile = tb.compat.tensorflow_stub.io.gfile
def log_representation(net, inputs, metadata, writer, step, tag='representation', metadata_header=None,
inputs_are_images=False):
r"""
Computes representations and logs them to tensorboard.
Args:
net (torch.nn.Module): Encoder.
inputs (torch.Tensor): Inputs.
writer (torch.writer.SummaryWriter): Summary writer.
metadata (torch.Tensor or list): A list of labels, each element will be convert to string.
step (int): Global step value to record.
tag (string, optional): Name for the embedding. (default: :obj:`representation`)
metadata_header (list, optional): Metadata header. (default: :obj:`None`)
inputs_are_images (boolean, optional): Set to :obj:`True` if inputs are images. (default: :obj:`False`)
"""
with torch.no_grad():
representation = net(inputs)
representation = representation.view(representation.shape[0], -1).detach()
label_img = inputs if inputs_are_images else None
writer.add_embedding(representation, metadata, tag=tag, global_step=step, metadata_header=metadata_header,
label_img=label_img)
|
[
"torch.no_grad"
] |
[((963, 978), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (976, 978), False, 'import torch\n')]
|
import unittest
import paramak
class TestPortCutterRectangular(unittest.TestCase):
def test_creation(self):
"""Checks a PortCutterRectangular creation."""
test_component = paramak.PortCutterRectangular(
distance=3,
z_pos=0,
height=0.2,
width=0.4,
fillet_radius=0.02,
azimuth_placement_angle=[0, 45, 90, 180]
)
assert test_component.solid is not None
|
[
"paramak.PortCutterRectangular"
] |
[((198, 337), 'paramak.PortCutterRectangular', 'paramak.PortCutterRectangular', ([], {'distance': '(3)', 'z_pos': '(0)', 'height': '(0.2)', 'width': '(0.4)', 'fillet_radius': '(0.02)', 'azimuth_placement_angle': '[0, 45, 90, 180]'}), '(distance=3, z_pos=0, height=0.2, width=0.4,\n fillet_radius=0.02, azimuth_placement_angle=[0, 45, 90, 180])\n', (227, 337), False, 'import paramak\n')]
|
# This Python file uses the following encoding: utf-8
import os, sys
import traceback
from PyQt5.QtWidgets import *
import PyQt5.QtCore
import PyQt5.QtGui
import PyQt5.uic
from PyQt5.uic import *
from xml.dom import minidom
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
from common import lang
import common.common, common.files, common.dialog, common.qt
from common.vars import *
from common.books import *
class IndexNameWindow(QDialog):
def __init__(self, parent):
super(IndexNameWindow, self).__init__(parent, QtCore.Qt.WindowTitleHint | QtCore.Qt.WindowCloseButtonHint)
PyQt5.uic.loadUi(os.path.dirname(os.path.realpath(__file__)) + os.sep + 'files_name.ui'.replace('/', os.sep), self) # Load the .ui file
lng = parent.lang
self.setWindowTitle(lng['Editor']['ContentTableWindow']['NameWindowTitle'])
self.label.setText(lng['Editor']['ContentTableWindow']['NameWindowLabel'])
self.button_box.button(QtWidgets.QDialogButtonBox.Ok).setText(lng['Editor']['ContentTableWindow']['btnOk'])
self.button_box.button(QtWidgets.QDialogButtonBox.Cancel).setText(lng['Editor']['ContentTableWindow']['btnCancel'])
def open_exec(self, text: str = None):
try:
if text is not None:
self.line_edit.setText(text)
ret = self.exec_()
if ret == 1:
print('name = ', self.line_edit.text())
return self.line_edit.text()
else:
return None
except Exception:
traceback.print_exc()
class ContentTableWindow(QDialog):
def __init__(self, parent, folder: str):
super(ContentTableWindow, self).__init__(parent, QtCore.Qt.WindowTitleHint | QtCore.Qt.WindowCloseButtonHint)
PyQt5.uic.loadUi(os.path.dirname(os.path.realpath(__file__)) + os.sep + 'content_table_editor.ui'.replace('/', os.sep), self) # Load the .ui file
self.BDD = parent.BDD
self.style = self.BDD.get_param('style')
lng = lang.Lang()
lng.set_lang(self.BDD.get_param('lang'))
self.lang = lng
self.setStyleSheet(get_style_var(self.style,'QDialog'))
self.setWindowTitle(lng['Editor']['ContentTableWindow']['WindowTitle'])
self.list_label.setText(lng['Editor']['ContentTableWindow']['ListLabel'])
self.addindex_label.setText(lng['Editor']['ContentTableWindow']['AddIndexLabel'])
self.addindex_line_edit.setPlaceholderText(lng['Editor']['ContentTableWindow']['AddIndexPlaceholder'])
self.modify_index_label.setText(lng['Editor']['ContentTableWindow']['ModifyIndexLabel'])
self.btn_rename.setText(lng['Editor']['ContentTableWindow']['BtnRename'])
self.btn_delete.setText(lng['Editor']['ContentTableWindow']['BtnDelete'])
self.button_box.button(QtWidgets.QDialogButtonBox.Ok).setText(lng['Editor']['ContentTableWindow']['btnOk'])
self.button_box.button(QtWidgets.QDialogButtonBox.Cancel).setText(lng['Editor']['ContentTableWindow']['btnCancel'])
self.button_box.button(QtWidgets.QDialogButtonBox.Ok).setStyleSheet(get_style_var(self.style, 'fullAltButton'))
self.button_box.button(QtWidgets.QDialogButtonBox.Cancel).setStyleSheet(get_style_var(self.style, 'fullAltButton'))
# self.list_content = QtWidgets.QListWidget()
self.addindex_btn.clicked.connect(self.new_index)
self.btn_rename.clicked.connect(self.rename)
self.btn_delete.clicked.connect(self.delete)
self.folder = folder
self.selected_folder = ''
self.list_data = dict()
self.files = []
def open_exec(self, text: str = None, url: str = None):
try:
self.list_content.clear()
self.addindex_combobox.clear()
self.files = common.files.list_directory_tree(self.folder, 'html|xhtml')
files = common.files.list_directory(self.folder, 'html|xhtml')
self.addindex_combobox.addItem("")
print(self.files)
for file in files:
self.addindex_combobox.addItem(file.replace(self.folder, ""))
li = common.files.list_directory(self.folder, "opf")
data = ''
with open(li[0]) as myfile:
data = myfile.read()
toc_type, chapters = parse_content_table(
data,
li[0].replace(self.folder, '').replace(li[0][li[0].rindex(os.sep) + 1:], '').replace(os.sep, '/'),
self.folder
)
for chapter in chapters:
try:
item = QtWidgets.QListWidgetItem()
item.setText(chapter['name'] + " (" + chapter['src'] + ")")
item.setData(97, chapter['name'])
item.setData(98, chapter['src'])
self.list_content.addItem(item)
except Exception:
traceback.print_exc()
ret = self.exec_()
content_table = []
max = self.list_content.count()
i = 0
while i < max:
child = self.list_content.item(i)
content_table.append({'name': child.data(97), 'url': child.data(98).replace("\\", "/")})
i += 1
print(content_table)
if ret == 1:
return content_table
else:
return None
except Exception:
traceback.print_exc()
def new_index(self):
# self.addindex_line_edit = QLineEdit()
# self.addindex_combobox = QComboBox()
name = self.addindex_line_edit.text().strip()
url = self.addindex_combobox.currentText().strip()
if name == "" or name is None or url == "" or url is None:
return
item = QListWidgetItem()
item.setData(97, name)
item.setData(98, url)
item.setText(name + " (" + url + ")")
# self.list_content = QListWidget()
self.list_content.insertItem(self.list_content.count(), item)
self.addindex_combobox.setCurrentIndex(0)
self.addindex_line_edit.setText("")
def rename(self):
try:
if self.list_content.currentIndex().row() == -1:
return
# self.list_content = QListWidget()
wn = IndexNameWindow(self)
url = self.list_content.item(self.list_content.currentIndex().row()).data(98)
tx = self.list_content.item(self.list_content.currentIndex().row()).data(97)
name = wn.open_exec(tx)
if name is not None:
self.list_content.item(self.list_content.currentIndex().row()).setData(97, name)
self.list_content.item(self.list_content.currentIndex().row()).setText(name + " (" + url + ")")
except Exception:
traceback.print_exc()
def delete(self):
try:
if self.list_content.currentIndex().row() == -1:
return
# self.list_content = QListWidget()
self.list_content.takeItem(self.list_content.currentIndex().row())
# self.list_content.removeItemWidget(self.list_content.item(self.list_content.currentIndex().row()))
except Exception:
traceback.print_exc()
|
[
"os.path.realpath",
"traceback.print_exc",
"common.lang.Lang"
] |
[((260, 286), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (276, 286), False, 'import os, sys\n'), ((2033, 2044), 'common.lang.Lang', 'lang.Lang', ([], {}), '()\n', (2042, 2044), False, 'from common import lang\n'), ((1563, 1584), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1582, 1584), False, 'import traceback\n'), ((5481, 5502), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (5500, 5502), False, 'import traceback\n'), ((6877, 6898), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6896, 6898), False, 'import traceback\n'), ((7298, 7319), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7317, 7319), False, 'import traceback\n'), ((649, 675), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (665, 675), False, 'import os, sys\n'), ((1826, 1852), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1842, 1852), False, 'import os, sys\n'), ((4948, 4969), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4967, 4969), False, 'import traceback\n')]
|
from django.contrib import admin
from . models import employees
admin.site.register(employees)
|
[
"django.contrib.admin.site.register"
] |
[((65, 95), 'django.contrib.admin.site.register', 'admin.site.register', (['employees'], {}), '(employees)\n', (84, 95), False, 'from django.contrib import admin\n')]
|
import json
from django.http import HttpResponse
from admin.decorators import superuser_only
from instances.models import Instance
from logs.models import Logs
def addlogmsg(user, instance, message):
"""
:param user:
:param instance:
:param message:
:return:
"""
add_log_msg = Logs(user=user, instance=instance, message=message)
add_log_msg.save()
@superuser_only
def vm_logs(request, vname):
"""
:param request:
:param vname:
:return:
"""
vm = Instance.objects.get(name=vname)
logs_ = Logs.objects.filter(instance=vm.name, date__gte=vm.created).order_by("-date")
logs = []
for l in logs_:
log = dict()
log["user"] = l.user
log["instance"] = l.instance
log["message"] = l.message
log["date"] = l.date.strftime("%x %X")
logs.append(log)
return HttpResponse(json.dumps(logs))
|
[
"logs.models.Logs.objects.filter",
"json.dumps",
"instances.models.Instance.objects.get",
"logs.models.Logs"
] |
[((309, 360), 'logs.models.Logs', 'Logs', ([], {'user': 'user', 'instance': 'instance', 'message': 'message'}), '(user=user, instance=instance, message=message)\n', (313, 360), False, 'from logs.models import Logs\n'), ((508, 540), 'instances.models.Instance.objects.get', 'Instance.objects.get', ([], {'name': 'vname'}), '(name=vname)\n', (528, 540), False, 'from instances.models import Instance\n'), ((884, 900), 'json.dumps', 'json.dumps', (['logs'], {}), '(logs)\n', (894, 900), False, 'import json\n'), ((553, 612), 'logs.models.Logs.objects.filter', 'Logs.objects.filter', ([], {'instance': 'vm.name', 'date__gte': 'vm.created'}), '(instance=vm.name, date__gte=vm.created)\n', (572, 612), False, 'from logs.models import Logs\n')]
|
# Copyright (c) 2021 MobileCoin. All rights reserved.
from decimal import Decimal
import factory
import pytz
from django.utils import timezone
from datetime import timedelta
from faker.factory import Factory
from faker.providers import date_time, internet, phone_number, lorem
Faker = Factory.create
fake = Faker()
fake.add_provider(date_time)
fake.add_provider(internet)
fake.add_provider(phone_number)
fake.add_provider(lorem)
fake.seed(0)
dt = fake.date_time_between(start_date='+5d', end_date='+10d', tzinfo=pytz.utc)
from mobot_client.models import (
Drop,
Store,
DropSession,
SessionState,
Customer,
CustomerStorePreferences,
Order,
BonusCoin,
DropType,
Item,
Sku,
)
class StoreFactory(factory.django.DjangoModelFactory):
class Meta:
model = Store
django_get_or_create = ('phone_number',)
id = factory.Faker('pyint')
name = factory.Sequence(lambda n: f"Mobot Store #{n}")
phone_number = factory.Sequence(lambda n: "+448211" + "%06d" % (n + 100000))
description = fake.paragraph(nb_sentences=10)
privacy_policy_url = factory.Sequence(lambda n: f"https://example.com/privacy_{n}")
class DropFactory(factory.django.DjangoModelFactory):
class Meta:
model = Drop
drop_type = DropType.AIRDROP
store = factory.SubFactory(StoreFactory)
id = factory.Sequence(lambda n: n)
pre_drop_description = factory.Sequence(lambda n: f"Item drop {n}")
advertisment_start_time = fake.date_time_between(start_date='-2d', end_date='+10d', tzinfo=pytz.utc)
start_time = timezone.now() - timedelta(days=2)
end_time = timezone.now() + timedelta(days=2)
number_restriction = factory.Iterator(['+44', '+1'])
timezone = 'PST'
initial_coin_amount_mob = Decimal(f"{float(0.2):4f}")
@factory.lazy_attribute
def store_id(self):
return self.store.pk
@factory.lazy_attribute
def item_id(self):
if hasattr(self, 'item'):
return self.item.pk
class OldDropFactory(DropFactory):
start_time = timezone.now() - timedelta(days=3)
end_time = timezone.now() - timedelta(days=1)
advertisment_start_time = fake.date_time_between(start_date='-15d', end_date='-1d', tzinfo=pytz.UTC)
class ItemFactory(factory.django.DjangoModelFactory):
class Meta:
model = Item
@factory.post_generation
def add_items_to_store(obj, created, *args, **kwargs):
obj.store.items.add(obj)
id = factory.Faker('pyint')
name = f"{factory.Faker('name')} {factory.Faker('sentence', nb_words=5)}"
price_in_mob = factory.Faker('pydecimal', positive=True, left_digits=3, right_digits=6)
description = factory.Faker('sentence', nb_words=50)
short_description = factory.Faker('sentence', nb_words=10)
image_link = factory.Sequence(lambda n: f"https://img.com/image{n}")
store = factory.SubFactory(StoreFactory)
@factory.lazy_attribute
def store_id(self):
return self.store.id
class SkuFactory(factory.django.DjangoModelFactory):
class Meta:
model = Sku
identifier = factory.Faker('pystr')
class CustomerFactory(factory.django.DjangoModelFactory):
class Meta:
model = Customer
django_get_or_create = ('phone_number',)
phone_number = factory.Sequence(lambda n: f"+447911" + "%06d" % (n + 100000))
class BonusCoinFactory(factory.django.DjangoModelFactory):
class Meta:
model = BonusCoin
drop = factory.SubFactory(DropFactory, drop_type=DropType.AIRDROP)
amount_mob = factory.Faker('pydecimal', positive=True, left_digits=3, right_digits=6)
number_available_at_start = 10
class ItemDropFactory(DropFactory):
drop_type = DropType.ITEM
class AirDropFactory(DropFactory):
drop_type = DropType.AIRDROP
class DropSessionFactory(factory.django.DjangoModelFactory):
class Meta:
model = DropSession
customer = factory.SubFactory(CustomerFactory)
drop = factory.SubFactory(DropFactory)
class OldDropSessionFactory(DropSessionFactory):
drop = factory.SubFactory(OldDropFactory)
class OrderFactory(factory.django.DjangoModelFactory):
class Meta:
model = Order
inline_args = ('sku',)
drop_session = factory.SubFactory(DropSessionFactory)
@factory.lazy_attribute
def customer(self):
return self.drop_session.customer
class GenericItemDropFactory(factory.django.DjangoModelFactory):
pass
|
[
"factory.Faker",
"django.utils.timezone.now",
"factory.SubFactory",
"factory.Sequence",
"factory.Iterator",
"datetime.timedelta"
] |
[((876, 898), 'factory.Faker', 'factory.Faker', (['"""pyint"""'], {}), "('pyint')\n", (889, 898), False, 'import factory\n'), ((910, 957), 'factory.Sequence', 'factory.Sequence', (["(lambda n: f'Mobot Store #{n}')"], {}), "(lambda n: f'Mobot Store #{n}')\n", (926, 957), False, 'import factory\n'), ((977, 1038), 'factory.Sequence', 'factory.Sequence', (["(lambda n: '+448211' + '%06d' % (n + 100000))"], {}), "(lambda n: '+448211' + '%06d' % (n + 100000))\n", (993, 1038), False, 'import factory\n'), ((1114, 1176), 'factory.Sequence', 'factory.Sequence', (["(lambda n: f'https://example.com/privacy_{n}')"], {}), "(lambda n: f'https://example.com/privacy_{n}')\n", (1130, 1176), False, 'import factory\n'), ((1316, 1348), 'factory.SubFactory', 'factory.SubFactory', (['StoreFactory'], {}), '(StoreFactory)\n', (1334, 1348), False, 'import factory\n'), ((1358, 1387), 'factory.Sequence', 'factory.Sequence', (['(lambda n: n)'], {}), '(lambda n: n)\n', (1374, 1387), False, 'import factory\n'), ((1415, 1459), 'factory.Sequence', 'factory.Sequence', (["(lambda n: f'Item drop {n}')"], {}), "(lambda n: f'Item drop {n}')\n", (1431, 1459), False, 'import factory\n'), ((1692, 1723), 'factory.Iterator', 'factory.Iterator', (["['+44', '+1']"], {}), "(['+44', '+1'])\n", (1708, 1723), False, 'import factory\n'), ((2472, 2494), 'factory.Faker', 'factory.Faker', (['"""pyint"""'], {}), "('pyint')\n", (2485, 2494), False, 'import factory\n'), ((2593, 2665), 'factory.Faker', 'factory.Faker', (['"""pydecimal"""'], {'positive': '(True)', 'left_digits': '(3)', 'right_digits': '(6)'}), "('pydecimal', positive=True, left_digits=3, right_digits=6)\n", (2606, 2665), False, 'import factory\n'), ((2685, 2723), 'factory.Faker', 'factory.Faker', (['"""sentence"""'], {'nb_words': '(50)'}), "('sentence', nb_words=50)\n", (2698, 2723), False, 'import factory\n'), ((2748, 2786), 'factory.Faker', 'factory.Faker', (['"""sentence"""'], {'nb_words': '(10)'}), "('sentence', nb_words=10)\n", (2761, 2786), False, 'import factory\n'), ((2804, 2859), 'factory.Sequence', 'factory.Sequence', (["(lambda n: f'https://img.com/image{n}')"], {}), "(lambda n: f'https://img.com/image{n}')\n", (2820, 2859), False, 'import factory\n'), ((2872, 2904), 'factory.SubFactory', 'factory.SubFactory', (['StoreFactory'], {}), '(StoreFactory)\n', (2890, 2904), False, 'import factory\n'), ((3097, 3119), 'factory.Faker', 'factory.Faker', (['"""pystr"""'], {}), "('pystr')\n", (3110, 3119), False, 'import factory\n'), ((3290, 3352), 'factory.Sequence', 'factory.Sequence', (["(lambda n: f'+447911' + '%06d' % (n + 100000))"], {}), "(lambda n: f'+447911' + '%06d' % (n + 100000))\n", (3306, 3352), False, 'import factory\n'), ((3468, 3527), 'factory.SubFactory', 'factory.SubFactory', (['DropFactory'], {'drop_type': 'DropType.AIRDROP'}), '(DropFactory, drop_type=DropType.AIRDROP)\n', (3486, 3527), False, 'import factory\n'), ((3545, 3617), 'factory.Faker', 'factory.Faker', (['"""pydecimal"""'], {'positive': '(True)', 'left_digits': '(3)', 'right_digits': '(6)'}), "('pydecimal', positive=True, left_digits=3, right_digits=6)\n", (3558, 3617), False, 'import factory\n'), ((3913, 3948), 'factory.SubFactory', 'factory.SubFactory', (['CustomerFactory'], {}), '(CustomerFactory)\n', (3931, 3948), False, 'import factory\n'), ((3960, 3991), 'factory.SubFactory', 'factory.SubFactory', (['DropFactory'], {}), '(DropFactory)\n', (3978, 3991), False, 'import factory\n'), ((4054, 4088), 'factory.SubFactory', 'factory.SubFactory', (['OldDropFactory'], {}), '(OldDropFactory)\n', (4072, 4088), False, 'import factory\n'), ((4235, 4273), 'factory.SubFactory', 'factory.SubFactory', (['DropSessionFactory'], {}), '(DropSessionFactory)\n', (4253, 4273), False, 'import factory\n'), ((1582, 1596), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1594, 1596), False, 'from django.utils import timezone\n'), ((1599, 1616), 'datetime.timedelta', 'timedelta', ([], {'days': '(2)'}), '(days=2)\n', (1608, 1616), False, 'from datetime import timedelta\n'), ((1632, 1646), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1644, 1646), False, 'from django.utils import timezone\n'), ((1649, 1666), 'datetime.timedelta', 'timedelta', ([], {'days': '(2)'}), '(days=2)\n', (1658, 1666), False, 'from datetime import timedelta\n'), ((2057, 2071), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2069, 2071), False, 'from django.utils import timezone\n'), ((2074, 2091), 'datetime.timedelta', 'timedelta', ([], {'days': '(3)'}), '(days=3)\n', (2083, 2091), False, 'from datetime import timedelta\n'), ((2107, 2121), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2119, 2121), False, 'from django.utils import timezone\n'), ((2124, 2141), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2133, 2141), False, 'from datetime import timedelta\n'), ((2509, 2530), 'factory.Faker', 'factory.Faker', (['"""name"""'], {}), "('name')\n", (2522, 2530), False, 'import factory\n'), ((2534, 2571), 'factory.Faker', 'factory.Faker', (['"""sentence"""'], {'nb_words': '(5)'}), "('sentence', nb_words=5)\n", (2547, 2571), False, 'import factory\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..dct2d import Dct2d
EPS = 1e-10
def softmax(a, b, factor=1):
concat = torch.cat([a.unsqueeze(-1), b.unsqueeze(-1)], dim=-1)
softmax_factors = F.softmax(concat * factor, dim=-1)
return a * softmax_factors[:,:,:,:,0] + b * softmax_factors[:,:,:,:,1]
class WatsonDistance(nn.Module):
"""
Loss function based on Watsons perceptual distance.
Based on DCT quantization
"""
def __init__(self, blocksize=8, trainable=False, reduction='sum'):
"""
Parameters:
blocksize: int, size of the Blocks for discrete cosine transform
trainable: bool, if True parameters of the loss are trained and dropout is enabled.
reduction: 'sum' or 'none', determines return format
"""
super().__init__()
# input mapping
blocksize = torch.as_tensor(blocksize)
# module to perform 2D blockwise DCT
self.add_module('dct', Dct2d(blocksize=blocksize.item(), interleaving=False))
# parameters, initialized with values from watson paper
self.blocksize = nn.Parameter(blocksize, requires_grad=False)
if self.blocksize == 8:
# init with Jpeg QM
self.t_tild = nn.Parameter(torch.log(torch.tensor( # log-scaled weights
[[1.40, 1.01, 1.16, 1.66, 2.40, 3.43, 4.79, 6.56],
[1.01, 1.45, 1.32, 1.52, 2.00, 2.71, 3.67, 4.93],
[1.16, 1.32, 2.24, 2.59, 2.98, 3.64, 4.60, 5.88],
[1.66, 1.52, 2.59, 3.77, 4.55, 5.30, 6.28, 7.60],
[2.40, 2.00, 2.98, 4.55, 6.15, 7.46, 8.71, 10.17],
[3.43, 2.71, 3.64, 5.30, 7.46, 9.62, 11.58, 13.51],
[4.79, 3.67, 4.60, 6.28, 8.71, 11.58, 14.50, 17.29],
[6.56, 4.93, 5.88, 7.60, 10.17, 13.51, 17.29, 21.15]]
)), requires_grad=trainable)
else:
# init with uniform QM
self.t_tild = nn.Parameter(torch.zeros((self.blocksize, self.blocksize)), requires_grad=trainable)
# other default parameters
self.alpha = nn.Parameter(torch.tensor(0.649), requires_grad=trainable) # luminance masking
w = torch.tensor(0.7) # contrast masking
self.w_tild = nn.Parameter(torch.log(w / (1- w)), requires_grad=trainable) # inverse of sigmoid
self.beta = nn.Parameter(torch.tensor(4.), requires_grad=trainable) # pooling
# dropout for training
self.dropout = nn.Dropout(0.5 if trainable else 0)
# reduction
self.reduction = reduction
if reduction not in ['sum', 'none']:
raise Exception('Reduction "{}" not supported. Valid values are: "sum", "none".'.format(reduction))
@property
def t(self):
# returns QM
qm = torch.exp(self.t_tild)
return qm
@property
def w(self):
# return luminance masking parameter
return torch.sigmoid(self.w_tild)
def forward(self, input, target):
# dct
c0 = self.dct(target)
c1 = self.dct(input)
N, K, B, B = c0.shape
# luminance masking
avg_lum = torch.mean(c0[:,:,0,0])
t_l = self.t.view(1, 1, B, B).expand(N, K, B, B)
t_l = t_l * (((c0[:,:,0,0] + EPS) / (avg_lum + EPS)) ** self.alpha).view(N, K, 1, 1)
# contrast masking
s = softmax(t_l, (c0.abs() + EPS)**self.w * t_l**(1 - self.w))
# pooling
watson_dist = (((c0 - c1) / s).abs() + EPS) ** self.beta
watson_dist = self.dropout(watson_dist) + EPS
watson_dist = torch.sum(watson_dist, dim=(1,2,3))
watson_dist = watson_dist ** (1 / self.beta)
# reduction
if self.reduction == 'sum':
watson_dist = torch.sum(watson_dist)
return watson_dist
|
[
"torch.nn.Parameter",
"torch.nn.Dropout",
"torch.mean",
"torch.nn.functional.softmax",
"torch.exp",
"torch.sigmoid",
"torch.zeros",
"torch.as_tensor",
"torch.sum",
"torch.log",
"torch.tensor"
] |
[((225, 259), 'torch.nn.functional.softmax', 'F.softmax', (['(concat * factor)'], {'dim': '(-1)'}), '(concat * factor, dim=-1)\n', (234, 259), True, 'import torch.nn.functional as F\n'), ((893, 919), 'torch.as_tensor', 'torch.as_tensor', (['blocksize'], {}), '(blocksize)\n', (908, 919), False, 'import torch\n'), ((1154, 1198), 'torch.nn.Parameter', 'nn.Parameter', (['blocksize'], {'requires_grad': '(False)'}), '(blocksize, requires_grad=False)\n', (1166, 1198), True, 'import torch.nn as nn\n'), ((2317, 2334), 'torch.tensor', 'torch.tensor', (['(0.7)'], {}), '(0.7)\n', (2329, 2334), False, 'import torch\n'), ((2607, 2642), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5 if trainable else 0)'], {}), '(0.5 if trainable else 0)\n', (2617, 2642), True, 'import torch.nn as nn\n'), ((2930, 2952), 'torch.exp', 'torch.exp', (['self.t_tild'], {}), '(self.t_tild)\n', (2939, 2952), False, 'import torch\n'), ((3067, 3093), 'torch.sigmoid', 'torch.sigmoid', (['self.w_tild'], {}), '(self.w_tild)\n', (3080, 3093), False, 'import torch\n'), ((3304, 3330), 'torch.mean', 'torch.mean', (['c0[:, :, 0, 0]'], {}), '(c0[:, :, 0, 0])\n', (3314, 3330), False, 'import torch\n'), ((3753, 3790), 'torch.sum', 'torch.sum', (['watson_dist'], {'dim': '(1, 2, 3)'}), '(watson_dist, dim=(1, 2, 3))\n', (3762, 3790), False, 'import torch\n'), ((2239, 2258), 'torch.tensor', 'torch.tensor', (['(0.649)'], {}), '(0.649)\n', (2251, 2258), False, 'import torch\n'), ((2389, 2411), 'torch.log', 'torch.log', (['(w / (1 - w))'], {}), '(w / (1 - w))\n', (2398, 2411), False, 'import torch\n'), ((2491, 2508), 'torch.tensor', 'torch.tensor', (['(4.0)'], {}), '(4.0)\n', (2503, 2508), False, 'import torch\n'), ((3925, 3947), 'torch.sum', 'torch.sum', (['watson_dist'], {}), '(watson_dist)\n', (3934, 3947), False, 'import torch\n'), ((2085, 2130), 'torch.zeros', 'torch.zeros', (['(self.blocksize, self.blocksize)'], {}), '((self.blocksize, self.blocksize))\n', (2096, 2130), False, 'import torch\n'), ((1312, 1747), 'torch.tensor', 'torch.tensor', (['[[1.4, 1.01, 1.16, 1.66, 2.4, 3.43, 4.79, 6.56], [1.01, 1.45, 1.32, 1.52, \n 2.0, 2.71, 3.67, 4.93], [1.16, 1.32, 2.24, 2.59, 2.98, 3.64, 4.6, 5.88],\n [1.66, 1.52, 2.59, 3.77, 4.55, 5.3, 6.28, 7.6], [2.4, 2.0, 2.98, 4.55, \n 6.15, 7.46, 8.71, 10.17], [3.43, 2.71, 3.64, 5.3, 7.46, 9.62, 11.58, \n 13.51], [4.79, 3.67, 4.6, 6.28, 8.71, 11.58, 14.5, 17.29], [6.56, 4.93,\n 5.88, 7.6, 10.17, 13.51, 17.29, 21.15]]'], {}), '([[1.4, 1.01, 1.16, 1.66, 2.4, 3.43, 4.79, 6.56], [1.01, 1.45, \n 1.32, 1.52, 2.0, 2.71, 3.67, 4.93], [1.16, 1.32, 2.24, 2.59, 2.98, 3.64,\n 4.6, 5.88], [1.66, 1.52, 2.59, 3.77, 4.55, 5.3, 6.28, 7.6], [2.4, 2.0, \n 2.98, 4.55, 6.15, 7.46, 8.71, 10.17], [3.43, 2.71, 3.64, 5.3, 7.46, \n 9.62, 11.58, 13.51], [4.79, 3.67, 4.6, 6.28, 8.71, 11.58, 14.5, 17.29],\n [6.56, 4.93, 5.88, 7.6, 10.17, 13.51, 17.29, 21.15]])\n', (1324, 1747), False, 'import torch\n')]
|
import os, platform
import dapt
config = dapt.Config(path='config.json')
db = dapt.db.Delimited_file('parameters.csv', delimiter=',')
params = dapt.Param(db, config=config)
p = params.next_parameters()
while p is not None:
dapt.tools.create_XML(p, default_settings="PhysiCell_settings_default.xml", save_settings="PhysiCell_settings.xml")
params.update_status(p["id"], 'running simulation')
if platform.system() == 'Windows':
os.system("biorobots.exe")
else:
os.system("./biorobots")
params.successful(p["id"])
p = params.next_parameters()
|
[
"dapt.db.Delimited_file",
"dapt.Param",
"os.system",
"platform.system",
"dapt.Config",
"dapt.tools.create_XML"
] |
[((42, 73), 'dapt.Config', 'dapt.Config', ([], {'path': '"""config.json"""'}), "(path='config.json')\n", (53, 73), False, 'import dapt\n'), ((79, 134), 'dapt.db.Delimited_file', 'dapt.db.Delimited_file', (['"""parameters.csv"""'], {'delimiter': '""","""'}), "('parameters.csv', delimiter=',')\n", (101, 134), False, 'import dapt\n'), ((144, 173), 'dapt.Param', 'dapt.Param', (['db'], {'config': 'config'}), '(db, config=config)\n', (154, 173), False, 'import dapt\n'), ((230, 349), 'dapt.tools.create_XML', 'dapt.tools.create_XML', (['p'], {'default_settings': '"""PhysiCell_settings_default.xml"""', 'save_settings': '"""PhysiCell_settings.xml"""'}), "(p, default_settings='PhysiCell_settings_default.xml',\n save_settings='PhysiCell_settings.xml')\n", (251, 349), False, 'import dapt\n'), ((411, 428), 'platform.system', 'platform.system', ([], {}), '()\n', (426, 428), False, 'import os, platform\n'), ((451, 477), 'os.system', 'os.system', (['"""biorobots.exe"""'], {}), "('biorobots.exe')\n", (460, 477), False, 'import os, platform\n'), ((496, 520), 'os.system', 'os.system', (['"""./biorobots"""'], {}), "('./biorobots')\n", (505, 520), False, 'import os, platform\n')]
|
from django.template import Library
from django.forms.models import model_to_dict
from django.contrib.auth.models import User
from books.models import Book
from libraries.models import BookCopy, Lending, Reading
register = Library()
@register.inclusion_tag('books/tags/book_tag.html')
def render_book(book: Book):
return model_to_dict(book)
@register.inclusion_tag('books/tags/google_book_tag.html')
def render_google_book(book: dict):
return book
@register.inclusion_tag('books/tags/book_copy_tag.html')
def render_book_copy(copy: BookCopy, user: User, **kwargs):
context = book_copy_to_dict(copy)
clean = kwargs.get('clean', False)
context['clean'] = clean
if clean:
context['only_description'] = True
else:
context['only_description'] = kwargs.get('only_description', False)
library = kwargs.get('library')
context['user_library'] = user.userprofile.home_library.pk
context['is_owner'] = library == user.userprofile.home_library
is_book_owner = copy.library == user.userprofile.home_library
context['is_book_owner'] = is_book_owner
context['is_read'] = Reading.objects.filter(copy=copy)
is_kept_by_user = copy.is_kept_by(user.userprofile)
context['is_kept_by_user'] = is_kept_by_user
context['is_read'] = Reading.objects.filter(copy=copy, reader=user.userprofile, is_completed=False).exists()
if is_kept_by_user:
context['is_read'] = Reading.objects.filter(copy=copy, reader=user.userprofile, is_completed=False).exists()
try:
lending = copy.lending_set.get(is_completed=False)
context['lending'] = lending
library = kwargs.get('library')
if library == lending.borrower:
context['borrowed'] = True
context['lender'] = copy.library.owner.user.username if copy.library else None
else:
context['lent'] = True
context['borrower'] = lending.borrower.owner.user.username if lending.borrower else None
context['is_return_available'] = is_book_owner or (lending.borrower and user == lending.borrower.owner.user)
except Lending.DoesNotExist:
context['is_lending_available'] = is_book_owner
return context
def book_copy_to_dict(copy: BookCopy):
book_dict = model_to_dict(copy.book)
book_dict.pop('id')
copy_dict = model_to_dict(copy)
copy_dict.update(book_dict)
return copy_dict
|
[
"django.forms.models.model_to_dict",
"django.template.Library",
"libraries.models.Reading.objects.filter"
] |
[((225, 234), 'django.template.Library', 'Library', ([], {}), '()\n', (232, 234), False, 'from django.template import Library\n'), ((329, 348), 'django.forms.models.model_to_dict', 'model_to_dict', (['book'], {}), '(book)\n', (342, 348), False, 'from django.forms.models import model_to_dict\n'), ((1132, 1165), 'libraries.models.Reading.objects.filter', 'Reading.objects.filter', ([], {'copy': 'copy'}), '(copy=copy)\n', (1154, 1165), False, 'from libraries.models import BookCopy, Lending, Reading\n'), ((2274, 2298), 'django.forms.models.model_to_dict', 'model_to_dict', (['copy.book'], {}), '(copy.book)\n', (2287, 2298), False, 'from django.forms.models import model_to_dict\n'), ((2339, 2358), 'django.forms.models.model_to_dict', 'model_to_dict', (['copy'], {}), '(copy)\n', (2352, 2358), False, 'from django.forms.models import model_to_dict\n'), ((1296, 1374), 'libraries.models.Reading.objects.filter', 'Reading.objects.filter', ([], {'copy': 'copy', 'reader': 'user.userprofile', 'is_completed': '(False)'}), '(copy=copy, reader=user.userprofile, is_completed=False)\n', (1318, 1374), False, 'from libraries.models import BookCopy, Lending, Reading\n'), ((1437, 1515), 'libraries.models.Reading.objects.filter', 'Reading.objects.filter', ([], {'copy': 'copy', 'reader': 'user.userprofile', 'is_completed': '(False)'}), '(copy=copy, reader=user.userprofile, is_completed=False)\n', (1459, 1515), False, 'from libraries.models import BookCopy, Lending, Reading\n')]
|
# -*- coding: utf-8 -*-
import cPickle
from common import json
try:
import yaml
has_yaml = True
except ImportError:
has_yaml = False
from py2xml import PythonToXML
from sajson import SimpleAPIEncoder, SimpleAPIDecoder
__all__ = ('formatters', 'Formatter')
class FormattersSingleton(object):
"""This singleton takes care of all registered formatters. You can easily
register your own formatter for use in both the Namespace and python client.
"""
_formatters = {}
def __new__(cls):
it = cls.__dict__.get("__it__")
if it is not None:
return it
cls.__it__ = it = object.__new__(cls)
return it
def register(self, name, formatter, override=False):
"""Register the given formatter. If there's already an formatter with
the given `name`, you can override by setting `override` to ``True``.
"""
if not isinstance(formatter(None, None), Formatter):
raise TypeError(u"You can only register a Formatter not a %s" % formatter)
if name in self._formatters and not override:
raise AttributeError(u"%s is already a valid format type, try a new name" % name)
self._formatters[name] = formatter
def get_defaults(self):
result = filter(lambda item: getattr(item[1], '__active_by_default__', True),
self._formatters.items())
return dict(result).keys()
def copy(self):
return dict(**self._formatters)
def __contains__(self, value):
return value in self._formatters
def __getitem__(self, name):
return self._formatters.get(name)
def __setitem__(self, *args):
raise AttributeError
formatters = FormattersSingleton()
class Formatter(object):
"""Baseclass for Formatter-implementations"""
def __init__(self, sapi_request, callback):
"""A Formatter takes the original http request (Django's one) and a
callback name, e. g. for JSONP."""
self.sapi_request = sapi_request
self.callback = callback
def build(self, value):
"""Takes care of the building process and returns the encoded data."""
raise NotImplementedError
def kwargs(self, value, action='build'):
"""Is called within ``simpleapi``. This method invokes both the parse
and build function when needed."""
raise NotImplementedError
def parse(self, value):
"""Takes care of the parsing proccess and returns the decoded data."""
raise NotImplementedError
class JSONFormatter(Formatter):
"""Formatter for the JSON-format. Used by default by the python client and
by many Javascript-Frameworks."""
__mime__ = "application/json"
def build(self, value):
return json.dumps(value, cls=SimpleAPIEncoder)
def kwargs(self, value, action='build'):
if action == 'build':
return self.build(value)
elif action == 'parse':
return self.parse(value)
def parse(self, value):
return json.loads(value, cls=SimpleAPIDecoder)
class JSONPFormatter(Formatter):
"""Formatter for JSONP-format. Used for cross-domain requests. If `callback`
isn't provided, `simpleapiCallback` is used."""
__mime__ = "application/javascript"
def build(self, value):
func = self.callback or 'simpleapiCallback'
result = u'%(func)s(%(data)s)' % {'func': func.decode("utf-8"), 'data': json.dumps(value)}
return result.encode("utf-8")
def kwargs(self, value):
if action == 'build':
return json.dumps(value, cls=SimpleAPIEncoder)
elif action == 'parse':
return self.parse(value)
def parse(self, value):
return json.loads(value, cls=SimpleAPIDecoder)
class ValueFormatter(Formatter):
"""Basic formatter for simple, fast and tiny transports (it has a lot of
limitations, though)."""
__mime__ = "text/html"
def build(self, value):
return value
def kwargs(self, value, action='build'):
if action == 'build':
return self.build(value)
elif action == 'parse':
return self.parse(value)
def parse(self, value):
return unicode(value)
class PickleFormatter(Formatter):
"""Formatter for use the cPickle python module which supports python object
serialization. It has the fewest limitations (ie. it can also serialize
datetime objects), but is a security risk and should only be used in a
trusted environment. It's strongly recommended that you use authentication
mechanismen to protect your namespace. The formatter is not activated by
default and can be enabled by putting 'pickle' into Namespace's ``__input__``
and ``__output__`` configuration. """
__mime__ = "application/octet-stream"
__active_by_default__ = False
def build(self, value):
return cPickle.dumps(value)
def kwargs(self, value, action='build'):
if action == 'build':
return self.build(value)
elif action == 'parse':
return self.parse(value)
def parse(self, value):
if isinstance(value, unicode):
value = value.encode("utf-8")
return cPickle.loads(value)
class XMLFormatter(Formatter):
__mime__ = "text/xml"
def build(self, value):
return PythonToXML().build(value)
def kwargs(self, value, action='build'):
if action == 'build':
return self.build(value)
elif action == 'parse':
return self.parse(value)
def parse(self, value):
return PythonToXML().parse(value)
class YAMLFormatter(Formatter):
__mime__ = "application/x-yaml"
def build(self, value):
return yaml.safe_dump(value)
def kwargs(self, value, action='build'):
if action == 'build':
return self.build(value)
elif action == 'parse':
return self.parse(value)
def parse(self, value):
return yaml.safe_load(value)
formatters.register('json', JSONFormatter)
formatters.register('jsonp', JSONPFormatter)
formatters.register('value', ValueFormatter)
formatters.register('pickle', PickleFormatter)
formatters.register('xml', XMLFormatter)
if has_yaml:
formatters.register('yaml', YAMLFormatter)
|
[
"cPickle.loads",
"yaml.safe_dump",
"py2xml.PythonToXML",
"common.json.dumps",
"cPickle.dumps",
"yaml.safe_load",
"common.json.loads"
] |
[((2781, 2820), 'common.json.dumps', 'json.dumps', (['value'], {'cls': 'SimpleAPIEncoder'}), '(value, cls=SimpleAPIEncoder)\n', (2791, 2820), False, 'from common import json\n'), ((3047, 3086), 'common.json.loads', 'json.loads', (['value'], {'cls': 'SimpleAPIDecoder'}), '(value, cls=SimpleAPIDecoder)\n', (3057, 3086), False, 'from common import json\n'), ((3749, 3788), 'common.json.loads', 'json.loads', (['value'], {'cls': 'SimpleAPIDecoder'}), '(value, cls=SimpleAPIDecoder)\n', (3759, 3788), False, 'from common import json\n'), ((4925, 4945), 'cPickle.dumps', 'cPickle.dumps', (['value'], {}), '(value)\n', (4938, 4945), False, 'import cPickle\n'), ((5253, 5273), 'cPickle.loads', 'cPickle.loads', (['value'], {}), '(value)\n', (5266, 5273), False, 'import cPickle\n'), ((5769, 5790), 'yaml.safe_dump', 'yaml.safe_dump', (['value'], {}), '(value)\n', (5783, 5790), False, 'import yaml\n'), ((6017, 6038), 'yaml.safe_load', 'yaml.safe_load', (['value'], {}), '(value)\n', (6031, 6038), False, 'import yaml\n'), ((3596, 3635), 'common.json.dumps', 'json.dumps', (['value'], {'cls': 'SimpleAPIEncoder'}), '(value, cls=SimpleAPIEncoder)\n', (3606, 3635), False, 'from common import json\n'), ((3460, 3477), 'common.json.dumps', 'json.dumps', (['value'], {}), '(value)\n', (3470, 3477), False, 'from common import json\n'), ((5376, 5389), 'py2xml.PythonToXML', 'PythonToXML', ([], {}), '()\n', (5387, 5389), False, 'from py2xml import PythonToXML\n'), ((5629, 5642), 'py2xml.PythonToXML', 'PythonToXML', ([], {}), '()\n', (5640, 5642), False, 'from py2xml import PythonToXML\n')]
|
from collections import defaultdict
class Fishes(object):
def __init__(self, ages):
self.ages = defaultdict(int)
for age in ages:
self.ages[age] += 1
def next_generation(self):
new_born = self.ages[0]
for i in range(8):
self.ages[i] = self.ages[i + 1]
self.ages[6] += new_born
self.ages[8] = new_born
def count(self):
return sum(self.ages.values())
if __name__ == "__main__":
little = "3,4,3,1,2"
fishes = Fishes([int(a) for a in little.split(",")])
for _ in range(256):
fishes.next_generation()
print(fishes.count())
with open("../input", "r") as f:
fishes = Fishes([int(a) for a in f.read().split(",")])
for i in range(256):
fishes.next_generation()
print(fishes.count())
|
[
"collections.defaultdict"
] |
[((111, 127), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (122, 127), False, 'from collections import defaultdict\n')]
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Core\native\animation\__init__.py
# Compiled at: 2019-04-24 01:24:31
# Size of source mod 2**32: 13189 bytes
from _math import Vector3, Quaternion, Transform
from _resourceman import Key
import collections
from native.animation.arb import NativeArb, BoundaryConditionInfo
import api_config, sims4
logger = sims4.log.Logger('Animation(Native)')
try:
from _animation import AsmBase
from _animation import _ASM_ACTORTYPE_INVALID as ASM_ACTORTYPE_INVALID
from _animation import _ASM_ACTORTYPE_OBJECT as ASM_ACTORTYPE_OBJECT
from _animation import _ASM_ACTORTYPE_SIM as ASM_ACTORTYPE_SIM
from _animation import _ASM_ACTORTYPE_PROP as ASM_ACTORTYPE_PROP
from _animation import _ASM_REQUESTRESULT_SUCCESS as ASM_REQUESTRESULT_SUCCESS
from _animation import _ASM_REQUESTRESULT_TARGET_STATE_NOT_FOUND as ASM_REQUESTRESULT_TARGET_STATE_NOT_FOUND
from _animation import _ASM_REQUESTRESULT_TARGET_JUMPED_TO_TARGET_STATE as ASM_REQUESTRESULT_TARGET_JUMPED_TO_TARGET_STATE
except:
ASM_REQUESTRESULT_SUCCESS = 0
ASM_REQUESTRESULT_TARGET_STATE_NOT_FOUND = 1
ASM_REQUESTRESULT_TARGET_JUMPED_TO_TARGET_STATE = 2
class AsmBase:
def __init__(self, key):
pass
def _request(self, to_state, arb, request_id=0, interrupt=False):
return ASM_REQUESTRESULT_TARGET_STATE_NOT_FOUND
def _traverse(self, from_state, to_state, arb, request_id=0, from_boundary_conditions=False):
return False
def _set_actor(self, actor_name, actor_id, suffix):
return False
def _clear_actor(self, actor_name):
return False
def _add_virtual_actor(self, actor_name, actor_id, suffix):
return False
def _remove_virtual_actor(self, actor_name, actor_id, suffix):
return False
def _set_parameter(self, parameter_name, value):
return False
def _set_actor_parameter(self, actor_name, actor_id, parameter_name, value):
return False
def _set_single_actor_parameter_if_possible(self, actor_name, parameter_name, value):
return False
def _add_actor_instance_namespace_override(self, actor_name, actor_id, actor_suffix, namespace, target_id, target_suffix):
return False
def _enter(self):
return False
def _exit(self, arb, request_id=0):
return ASM_REQUESTRESULT_TARGET_STATE_NOT_FOUND
def _schedule_exit_content(self, arb):
pass
def _set_current_state(self, state_name):
return False
def _get_supported_postures_for_actor(self, actor_name):
return False
def _get_resource_key_for_actor(self, actor_name):
return False
def _get_props_in_traversal(self, from_state, to_state):
return False
def _get_actor_definition(self, actor_name):
pass
class NativeAsm(AsmBase):
_BASE_ROOT_STRING = 'b__subroot__'
class ActorDescription(collections.namedtuple('_ActorDescription', ('actor_name', 'actor_name_hash', 'actor_type',
'is_master', 'is_virtual', 'prop_resource_key'))):
slots = []
def set_actor(self, name, actor, rig_key=None, suffix=None):
if actor is not None:
return self._set_actor(name, actor.id, suffix)
return self._clear_actor(name)
def set_reaction_actor(self, name):
return self._set_reaction_actor(name)
def add_virtual_actor(self, name, actor, suffix=None):
return self._add_virtual_actor(name, actor.id, suffix)
def remove_virtual_actor(self, name, actor, suffix=None):
return self._remove_virtual_actor(name, actor.id, suffix)
def get_actor_name(self):
return '<unknown>'
def set_parameter(self, parameter, value):
return self._set_parameter(parameter, value)
def set_actor_parameter(self, actor, instance, parameter, value, suffix=None):
return self._set_actor_parameter(actor, instance.id, parameter, value, suffix)
def specialize_virtual_actor_relationship(self, actor_name, actor, actor_suffix, namespace, target, target_suffix):
return self._add_actor_instance_namespace_override(actor_name, actor.id, actor_suffix, namespace, target.id, target_suffix)
def request(self, state_name, arb_instance, request_id=0, interrupt=False):
return self._request(state_name, arb_instance, request_id, interrupt)
def traverse(self, from_state_name, to_state_name, arb_instance, request_id=0, from_boundary_conditions=False):
return self._traverse(from_state_name, to_state_name, arb_instance, request_id, from_boundary_conditions)
def set_current_state(self, state_name):
self._set_current_state(state_name)
def get_supported_postures_for_actor(self, actor_name):
postures_actor = self._get_supported_postures_for_actor(actor_name)
postures_default = self._get_supported_postures_for_actor(None)
if postures_default is not None:
if postures_actor is not None:
combined_postures = set(postures_actor)
combined_postures.update(postures_default)
return combined_postures
return postures_default
return postures_actor
def get_resource_key_for_actor(self, actor_name):
return self._get_resource_key_for_actor(actor_name)
def get_props_in_traversal(self, from_state, to_state):
return self._get_props_in_traversal(from_state, to_state)
def get_actor_definition(self, actor_name):
description_args = self._get_actor_definition(actor_name)
if not description_args:
return
return (self.ActorDescription)(*description_args)
def enter(self):
self._enter()
def exit(self, arb_instance, request_id=0):
return self._exit(arb_instance, request_id)
def schedule_exit_content(self, arb_instance):
return self._schedule_exit_content(arb_instance)
def set_param_sequence(self, param_dict):
if param_dict is not None:
for key, value in param_dict.items():
if isinstance(key, tuple):
param = key[0]
actor = key[1]
if actor is not None:
self._set_single_actor_parameter_if_possible(actor, param, value)
else:
self.set_parameter(param, value)
else:
self.set_parameter(key, value)
def get_initial_offset(self, actor, to_state_name, from_state_name='entry'):
arb = NativeArb()
self.traverse(from_state_name, to_state_name, arb, from_boundary_conditions=True)
offset = arb.get_initial_offset(actor)
return Transform(Vector3(*offset[0]), Quaternion(*offset[1]))
def get_boundary_conditions(self, actor, to_state_name, from_state_name='entry'):
arb = NativeArb()
self.traverse(from_state_name, to_state_name, arb, from_boundary_conditions=True)
return arb.get_boundary_conditions(actor)
Asm = NativeAsm
def get_joint_transform_from_rig(rig_key, joint_name):
import _animation
try:
transform = _animation.get_joint_transform_from_rig(rig_key, joint_name)
except Exception as exe:
try:
logger.error('Failed to get transform from rig: {}, {}'.format(rig_key, joint_name))
raise exe
finally:
exe = None
del exe
return transform
def get_joint_name_for_hash_from_rig(rig_key, joint_name_hash):
import _animation
return _animation.get_joint_name_for_hash_from_rig(rig_key, joint_name_hash)
def get_joint_name_for_index_from_rig(rig_key, joint_index):
import _animation
return _animation.get_joint_name_for_index_from_rig(rig_key, joint_index)
def get_mirrored_joint_name_hash(rig_key, joint_name):
import _animation
return _animation.get_mirrored_joint_name_hash(rig_key, joint_name)
def update_post_condition_arb(post_condition, content):
import _animation
return _animation.update_post_condition_arb(post_condition, content)
def enable_native_reaction_event_handling(enabled):
import _animation
return _animation.enable_native_reaction_event_handling(enabled)
|
[
"_animation.get_joint_name_for_hash_from_rig",
"_animation.update_post_condition_arb",
"_math.Vector3",
"_animation.get_joint_transform_from_rig",
"_math.Quaternion",
"_animation.get_mirrored_joint_name_hash",
"_animation.enable_native_reaction_event_handling",
"collections.namedtuple",
"native.animation.arb.NativeArb",
"_animation.get_joint_name_for_index_from_rig",
"sims4.log.Logger"
] |
[((519, 556), 'sims4.log.Logger', 'sims4.log.Logger', (['"""Animation(Native)"""'], {}), "('Animation(Native)')\n", (535, 556), False, 'import api_config, sims4\n'), ((3234, 3382), 'collections.namedtuple', 'collections.namedtuple', (['"""_ActorDescription"""', "('actor_name', 'actor_name_hash', 'actor_type', 'is_master', 'is_virtual',\n 'prop_resource_key')"], {}), "('_ActorDescription', ('actor_name',\n 'actor_name_hash', 'actor_type', 'is_master', 'is_virtual',\n 'prop_resource_key'))\n", (3256, 3382), False, 'import collections\n'), ((7902, 7971), '_animation.get_joint_name_for_hash_from_rig', '_animation.get_joint_name_for_hash_from_rig', (['rig_key', 'joint_name_hash'], {}), '(rig_key, joint_name_hash)\n', (7945, 7971), False, 'import _animation\n'), ((8068, 8134), '_animation.get_joint_name_for_index_from_rig', '_animation.get_joint_name_for_index_from_rig', (['rig_key', 'joint_index'], {}), '(rig_key, joint_index)\n', (8112, 8134), False, 'import _animation\n'), ((8225, 8285), '_animation.get_mirrored_joint_name_hash', '_animation.get_mirrored_joint_name_hash', (['rig_key', 'joint_name'], {}), '(rig_key, joint_name)\n', (8264, 8285), False, 'import _animation\n'), ((8377, 8438), '_animation.update_post_condition_arb', '_animation.update_post_condition_arb', (['post_condition', 'content'], {}), '(post_condition, content)\n', (8413, 8438), False, 'import _animation\n'), ((8526, 8583), '_animation.enable_native_reaction_event_handling', '_animation.enable_native_reaction_event_handling', (['enabled'], {}), '(enabled)\n', (8574, 8583), False, 'import _animation\n'), ((6902, 6913), 'native.animation.arb.NativeArb', 'NativeArb', ([], {}), '()\n', (6911, 6913), False, 'from native.animation.arb import NativeArb, BoundaryConditionInfo\n'), ((7222, 7233), 'native.animation.arb.NativeArb', 'NativeArb', ([], {}), '()\n', (7231, 7233), False, 'from native.animation.arb import NativeArb, BoundaryConditionInfo\n'), ((7499, 7559), '_animation.get_joint_transform_from_rig', '_animation.get_joint_transform_from_rig', (['rig_key', 'joint_name'], {}), '(rig_key, joint_name)\n', (7538, 7559), False, 'import _animation\n'), ((7076, 7095), '_math.Vector3', 'Vector3', (['*offset[0]'], {}), '(*offset[0])\n', (7083, 7095), False, 'from _math import Vector3, Quaternion, Transform\n'), ((7097, 7119), '_math.Quaternion', 'Quaternion', (['*offset[1]'], {}), '(*offset[1])\n', (7107, 7119), False, 'from _math import Vector3, Quaternion, Transform\n')]
|
import colorpennester
print ("------------------我是分割线-------------------")
movies = ["The Holy Grail", 1975, "<NAME> & <NAME>", 91, ["<NAME>", ["<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>"]]]
# 调用函数
# 函数前面需要加上命名空间 -> 名字与包同名
colorpennester.printListMethod(movies,True,0)
|
[
"colorpennester.printListMethod"
] |
[((231, 278), 'colorpennester.printListMethod', 'colorpennester.printListMethod', (['movies', '(True)', '(0)'], {}), '(movies, True, 0)\n', (261, 278), False, 'import colorpennester\n')]
|
import abc
import copy
import os
from typing import List
import torch
import wandb
from hive.utils.registry import Registrable, registry
from hive.utils.schedule import ConstantSchedule, Schedule, get_schedule
from hive.utils.utils import Chomp, create_folder
class Logger(abc.ABC, Registrable):
"""Abstract class for logging in hive."""
def __init__(self, timescales=None):
"""Constructor for base Logger class. Every Logger must call this constructor
in its own constructor
Args:
timescales (str | list(str)): The different timescales at which logger
needs to log. If only logging at one timescale, it is acceptable to
only pass a string.
"""
if timescales is None:
self._timescales = []
elif isinstance(timescales, str):
self._timescales = [timescales]
elif isinstance(timescales, list):
self._timescales = timescales
else:
raise ValueError("Need string or list of strings for timescales")
def register_timescale(self, timescale):
"""Register a new timescale with the logger.
Args:
timescale (str): Timescale to register.
"""
self._timescales.append(timescale)
@abc.abstractmethod
def log_config(self, config):
"""Log the config.
Args:
config (dict): Config parameters.
"""
pass
@abc.abstractmethod
def log_scalar(self, name, value, prefix):
"""Log a scalar variable.
Args:
name (str): Name of the metric to be logged.
value (float): Value to be logged.
prefix (str): Prefix to append to metric name.
"""
pass
@abc.abstractmethod
def log_metrics(self, metrics, prefix):
"""Log a dictionary of values.
Args:
metrics (dict): Dictionary of metrics to be logged.
prefix (str): Prefix to append to metric name.
"""
pass
@abc.abstractmethod
def save(self, dir_name):
"""Saves the current state of the log files.
Args:
dir_name (str): Name of the directory to save the log files.
"""
pass
@abc.abstractmethod
def load(self, dir_name):
"""Loads the log files from given directory.
Args:
dir_name (str): Name of the directory to load the log file from.
"""
pass
@classmethod
def type_name(cls):
return "logger"
class ScheduledLogger(Logger):
"""Abstract class that manages a schedule for logging.
The update_step method should be called for each step in the loop to update
the logger's schedule. The should_log method can be used to check whether
the logger should log anything.
This schedule is not strictly enforced! It is still possible to log something
even if should_log returns false. These functions are just for the purpose
of convenience.
"""
def __init__(self, timescales=None, logger_schedules=None):
"""
Any timescales not assigned schedule from logger_schedules will be assigned
a ConstantSchedule(True).
Args:
timescales (str|list[str]): The different timescales at which logger needs
to log. If only logging at one timescale, it is acceptable to only pass
a string.
logger_schedules (Schedule|list|dict): Schedules used to keep track of when
to log. If a single schedule, it is copied for each timescale. If a
list of schedules, the schedules are matched up in order with the list
of timescales provided. If a dictionary, the keys should be the
timescale and the values should be the schedule.
"""
super().__init__(timescales)
if logger_schedules is None:
logger_schedules = ConstantSchedule(True)
if isinstance(logger_schedules, dict):
self._logger_schedules = logger_schedules
elif isinstance(logger_schedules, list):
self._logger_schedules = {
self._timescales[idx]: logger_schedules[idx]
for idx in range(min(len(logger_schedules), len(self._timescales)))
}
elif isinstance(logger_schedules, Schedule):
self._logger_schedules = {
timescale: copy.deepcopy(logger_schedules)
for timescale in self._timescales
}
else:
raise ValueError(
"logger_schedule must be a dict, list of Schedules, or Schedule object"
)
for timescale, schedule in self._logger_schedules.items():
if isinstance(schedule, dict):
self._logger_schedules[timescale] = get_schedule(
schedule["name"], schedule["kwargs"]
)
for timescale in self._timescales:
if timescale not in self._logger_schedules:
self._logger_schedules[timescale] = ConstantSchedule(True)
self._steps = {timescale: 0 for timescale in self._timescales}
def register_timescale(self, timescale, schedule=None):
"""Register a new timescale.
Args:
timescale (str): Timescale to register.
schedule (Schedule): Schedule to use for this timescale.
"""
super().register_timescale(timescale)
if schedule is None:
schedule = ConstantSchedule(True)
self._logger_schedules[timescale] = schedule
self._steps[timescale] = 0
def update_step(self, timescale):
"""Update the step and schedule for a given timescale.
Args:
timescale (str): A registered timescale.
"""
self._steps[timescale] += 1
self._logger_schedules[timescale].update()
return self.should_log(timescale)
def should_log(self, timescale):
"""Check if you should log for a given timescale.
Args:
timescale (str): A registered timescale.
"""
return self._logger_schedules[timescale].get_value()
def save(self, dir_name):
logger_state = Chomp()
logger_state.timescales = self._timescales
logger_state.schedules = self._logger_schedules
logger_state.steps = self._steps
logger_state.save(os.path.join(dir_name, "logger_state.p"))
def load(self, dir_name):
logger_state = Chomp()
logger_state.load(os.path.join(dir_name, "logger_state.p"))
self._timescales = logger_state.timescales
self._logger_schedules = logger_state.schedules
self._steps = logger_state.steps
class NullLogger(ScheduledLogger):
"""A null logger that does not log anything.
Used if you don't want to log anything, but still want to use parts of the
framework that ask for a logger.
"""
def __init__(self, timescales=None, logger_schedules=None):
super().__init__(timescales, logger_schedules)
def log_config(self, config):
pass
def log_scalar(self, name, value, timescale):
pass
def log_metrics(self, metrics, timescale):
pass
def save(self, dir_name):
pass
def load(self, dir_name):
pass
class WandbLogger(ScheduledLogger):
"""A Wandb logger.
This logger can be used to log to wandb. It assumes that wandb is configured
locally on your system. Multiple timescales/loggers can be implemented by
instantiating multiple loggers with different logger_names. These should still
have the same project and run names.
Check the wandb documentation for more details on the parameters.
"""
def __init__(
self,
timescales=None,
logger_schedules=None,
project=None,
name=None,
dir=None,
mode=None,
id=None,
resume=None,
start_method=None,
**kwargs,
):
"""
Args:
timescales (str|list[str]): The different timescales at which logger needs
to log. If only logging at one timescale, it is acceptable to only pass
a string.
logger_schedules (Schedule|list|dict): Schedules used to keep track of when
to log. If a single schedule, it is copied for each timescale. If a
list of schedules, the schedules are matched up in order with the list
of timescales provided. If a dictionary, the keys should be the
timescale and the values should be the schedule.
project (str): Name of the project. Wandb's dash groups all runs with
the same project name together.
name (str): Name of the run. Used to identify the run on the wandb
dash.
dir (str): Local directory where wandb saves logs.
mode (str): The mode of logging. Can be "online", "offline" or "disabled".
In offline mode, writes all data to disk for later syncing to a server,
while in disabled mode, it makes all calls to wandb api's noop's, while
maintaining core functionality.
id (str, optional): A unique ID for this run, used for resuming.
It must be unique in the project, and if you delete a run you can't
reuse the ID.
resume (bool, str, optional): Sets the resuming behavior.
Options are the same as mentioned in Wandb's doc.
start_method (str): The start method to use for wandb's process. See
https://docs.wandb.ai/guides/track/launch#init-start-error.
**kwargs: You can pass any other arguments to wandb's init method as
keyword arguments. Note, these arguments can't be overriden from the
command line.
"""
super().__init__(timescales, logger_schedules)
settings = None
if start_method is not None:
settings = wandb.Settings(start_method=start_method)
wandb.init(
project=project,
name=name,
dir=dir,
mode=mode,
id=id,
resume=resume,
settings=settings,
**kwargs,
)
def log_config(self, config):
# Convert list parameters to nested dictionary
for k, v in config.items():
if isinstance(v, list):
config[k] = {}
for idx, param in enumerate(v):
config[k][idx] = param
wandb.config.update(config)
def log_scalar(self, name, value, prefix):
metrics = {f"{prefix}/{name}": value}
metrics.update(
{
f"{timescale}_step": self._steps[timescale]
for timescale in self._timescales
}
)
wandb.log(metrics)
def log_metrics(self, metrics, prefix):
metrics = {f"{prefix}/{name}": value for (name, value) in metrics.items()}
metrics.update(
{
f"{timescale}_step": self._steps[timescale]
for timescale in self._timescales
}
)
wandb.log(metrics)
class ChompLogger(ScheduledLogger):
"""This logger uses the Chomp data structure to store all logged values which are
then directly saved to disk.
"""
def __init__(self, timescales=None, logger_schedules=None):
super().__init__(timescales, logger_schedules)
self._log_data = Chomp()
def log_config(self, config):
self._log_data["config"] = config
def log_scalar(self, name, value, prefix):
metric_name = f"{prefix}/{name}"
if metric_name not in self._log_data:
self._log_data[metric_name] = [[], []]
if isinstance(value, torch.Tensor):
self._log_data[metric_name][0].append(value.item())
else:
self._log_data[metric_name][0].append(value)
self._log_data[metric_name][1].append(
{timescale: self._steps[timescale] for timescale in self._timescales}
)
def log_metrics(self, metrics, prefix):
for name in metrics:
metric_name = f"{prefix}/{name}"
if metric_name not in self._log_data:
self._log_data[metric_name] = [[], []]
if isinstance(metrics[name], torch.Tensor):
self._log_data[metric_name][0].append(metrics[name].item())
else:
self._log_data[metric_name][0].append(metrics[name])
self._log_data[metric_name][1].append(
{timescale: self._steps[timescale] for timescale in self._timescales}
)
def save(self, dir_name):
super().save(dir_name)
self._log_data.save(os.path.join(dir_name, "log_data.p"))
def load(self, dir_name):
super().load(dir_name)
self._log_data.load(os.path.join(dir_name, "log_data.p"))
class CompositeLogger(Logger):
"""This Logger aggregates multiple loggers together.
This logger is for convenience and allows for logging using multiple loggers without
having to keep track of several loggers. When timescales are updated, this logger
updates the timescale for each one of its component loggers. When logging, logs to
each of its component loggers as long as the logger is not a ScheduledLogger that
should not be logging for the timescale.
"""
def __init__(self, logger_list: List[Logger]):
super().__init__([])
self._logger_list = logger_list
def register_timescale(self, timescale, schedule=None):
for logger in self._logger_list:
if isinstance(logger, ScheduledLogger):
logger.register_timescale(timescale, schedule)
else:
logger.register_timescale(timescale)
def log_config(self, config):
for logger in self._logger_list:
logger.log_config(config)
def log_scalar(self, name, value, prefix):
for logger in self._logger_list:
logger.log_scalar(name, value, prefix)
def log_metrics(self, metrics, prefix):
for logger in self._logger_list:
logger.log_metrics(metrics, prefix=prefix)
def update_step(self, timescale):
"""Update the step and schedule for a given timescale for every
ScheduledLogger.
Args:
timescale (str): A registered timescale.
"""
for logger in self._logger_list:
if isinstance(logger, ScheduledLogger):
logger.update_step(timescale)
return self.should_log(timescale)
def should_log(self, timescale):
"""Check if you should log for a given timescale. If any logger in the list
is scheduled to log, returns True.
Args:
timescale (str): A registered timescale.
"""
for logger in self._logger_list:
if not isinstance(logger, ScheduledLogger) or logger.should_log(timescale):
return True
return False
def save(self, dir_name):
for idx, logger in enumerate(self._logger_list):
save_dir = os.path.join(dir_name, f"logger_{idx}")
create_folder(save_dir)
logger.save(save_dir)
def load(self, dir_name):
for idx, logger in enumerate(self._logger_list):
load_dir = os.path.join(dir_name, f"logger_{idx}")
logger.load(load_dir)
registry.register_all(
Logger,
{
"NullLogger": NullLogger,
"WandbLogger": WandbLogger,
"ChompLogger": ChompLogger,
"CompositeLogger": CompositeLogger,
},
)
get_logger = getattr(registry, f"get_{Logger.type_name()}")
|
[
"wandb.log",
"hive.utils.registry.registry.register_all",
"copy.deepcopy",
"wandb.config.update",
"wandb.Settings",
"hive.utils.utils.Chomp",
"wandb.init",
"hive.utils.utils.create_folder",
"hive.utils.schedule.get_schedule",
"hive.utils.schedule.ConstantSchedule",
"os.path.join"
] |
[((15569, 15726), 'hive.utils.registry.registry.register_all', 'registry.register_all', (['Logger', "{'NullLogger': NullLogger, 'WandbLogger': WandbLogger, 'ChompLogger':\n ChompLogger, 'CompositeLogger': CompositeLogger}"], {}), "(Logger, {'NullLogger': NullLogger, 'WandbLogger':\n WandbLogger, 'ChompLogger': ChompLogger, 'CompositeLogger':\n CompositeLogger})\n", (15590, 15726), False, 'from hive.utils.registry import Registrable, registry\n'), ((6243, 6250), 'hive.utils.utils.Chomp', 'Chomp', ([], {}), '()\n', (6248, 6250), False, 'from hive.utils.utils import Chomp, create_folder\n'), ((6521, 6528), 'hive.utils.utils.Chomp', 'Chomp', ([], {}), '()\n', (6526, 6528), False, 'from hive.utils.utils import Chomp, create_folder\n'), ((10137, 10251), 'wandb.init', 'wandb.init', ([], {'project': 'project', 'name': 'name', 'dir': 'dir', 'mode': 'mode', 'id': 'id', 'resume': 'resume', 'settings': 'settings'}), '(project=project, name=name, dir=dir, mode=mode, id=id, resume=\n resume, settings=settings, **kwargs)\n', (10147, 10251), False, 'import wandb\n'), ((10647, 10674), 'wandb.config.update', 'wandb.config.update', (['config'], {}), '(config)\n', (10666, 10674), False, 'import wandb\n'), ((10949, 10967), 'wandb.log', 'wandb.log', (['metrics'], {}), '(metrics)\n', (10958, 10967), False, 'import wandb\n'), ((11276, 11294), 'wandb.log', 'wandb.log', (['metrics'], {}), '(metrics)\n', (11285, 11294), False, 'import wandb\n'), ((11605, 11612), 'hive.utils.utils.Chomp', 'Chomp', ([], {}), '()\n', (11610, 11612), False, 'from hive.utils.utils import Chomp, create_folder\n'), ((3957, 3979), 'hive.utils.schedule.ConstantSchedule', 'ConstantSchedule', (['(True)'], {}), '(True)\n', (3973, 3979), False, 'from hive.utils.schedule import ConstantSchedule, Schedule, get_schedule\n'), ((5530, 5552), 'hive.utils.schedule.ConstantSchedule', 'ConstantSchedule', (['(True)'], {}), '(True)\n', (5546, 5552), False, 'from hive.utils.schedule import ConstantSchedule, Schedule, get_schedule\n'), ((6425, 6465), 'os.path.join', 'os.path.join', (['dir_name', '"""logger_state.p"""'], {}), "(dir_name, 'logger_state.p')\n", (6437, 6465), False, 'import os\n'), ((6555, 6595), 'os.path.join', 'os.path.join', (['dir_name', '"""logger_state.p"""'], {}), "(dir_name, 'logger_state.p')\n", (6567, 6595), False, 'import os\n'), ((10086, 10127), 'wandb.Settings', 'wandb.Settings', ([], {'start_method': 'start_method'}), '(start_method=start_method)\n', (10100, 10127), False, 'import wandb\n'), ((12878, 12914), 'os.path.join', 'os.path.join', (['dir_name', '"""log_data.p"""'], {}), "(dir_name, 'log_data.p')\n", (12890, 12914), False, 'import os\n'), ((13006, 13042), 'os.path.join', 'os.path.join', (['dir_name', '"""log_data.p"""'], {}), "(dir_name, 'log_data.p')\n", (13018, 13042), False, 'import os\n'), ((15272, 15311), 'os.path.join', 'os.path.join', (['dir_name', 'f"""logger_{idx}"""'], {}), "(dir_name, f'logger_{idx}')\n", (15284, 15311), False, 'import os\n'), ((15324, 15347), 'hive.utils.utils.create_folder', 'create_folder', (['save_dir'], {}), '(save_dir)\n', (15337, 15347), False, 'from hive.utils.utils import Chomp, create_folder\n'), ((15493, 15532), 'os.path.join', 'os.path.join', (['dir_name', 'f"""logger_{idx}"""'], {}), "(dir_name, f'logger_{idx}')\n", (15505, 15532), False, 'import os\n'), ((4851, 4901), 'hive.utils.schedule.get_schedule', 'get_schedule', (["schedule['name']", "schedule['kwargs']"], {}), "(schedule['name'], schedule['kwargs'])\n", (4863, 4901), False, 'from hive.utils.schedule import ConstantSchedule, Schedule, get_schedule\n'), ((5092, 5114), 'hive.utils.schedule.ConstantSchedule', 'ConstantSchedule', (['(True)'], {}), '(True)\n', (5108, 5114), False, 'from hive.utils.schedule import ConstantSchedule, Schedule, get_schedule\n'), ((4447, 4478), 'copy.deepcopy', 'copy.deepcopy', (['logger_schedules'], {}), '(logger_schedules)\n', (4460, 4478), False, 'import copy\n')]
|
import tensorflow as tf
from functools import reduce
from operator import mul
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
def dropout(x, keep_prob, is_train, noise_shape=None, seed=None, name=None):
"""
with tf.name_scope(name or "dropout"):
if is_train is None:
if keep_prob < 1.0:
return tf.nn.dropout(x, keep_prob, noise_shape=noise_shape, seed=seed)
else:
if keep_prob < 1.0:
out = tf.cond(
is_train,
lambda: tf.nn.dropout(x, keep_prob, noise_shape=noise_shape, seed=seed),
lambda: x
)
return out
"""
with tf.name_scope(name or "dropout"):
if is_train is None:
if keep_prob < 1.0:
return tf.nn.dropout(x, keep_prob, noise_shape=noise_shape, seed=seed)
else:
if is_train and keep_prob < 1.0:
return tf.nn.dropout(x, keep_prob, noise_shape=noise_shape, seed=seed)
return x
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def selu(x):
with tf.name_scope('elu') as scope:
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale * tf.where(x >= 0.0, x, alpha * tf.nn.elu(x))
def gelu(x): # read
# return 0.5*x*(1+tf.tanh(math.sqrt(2/math.pi)*(x+0.044715*tf.pow(x, 3))))
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
input_tensor: float Tensor to perform activation.
Returns:
`input_tensor` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.erf(x / tf.sqrt(2.0)))
return x * cdf
def swish(x):
return x*tf.nn.sigmoid(x)
def activation_name_to_func(activation_name):
assert isinstance(activation_name, str)
if isinstance(activation_name, str):
if activation_name == 'linear':
act_fn = tf.identity
elif activation_name == 'relu':
act_fn = tf.nn.relu
elif activation_name == 'elu':
act_fn = tf.nn.elu
elif activation_name == 'selu':
act_fn = selu
elif activation_name == 'sigmoid':
act_fn = tf.nn.sigmoid
elif activation_name == 'tanh':
act_fn = tf.nn.tanh
elif activation_name == 'exp':
act_fn = tf.exp
elif activation_name == 'log':
act_fn = tf.log
elif activation_name == 'gelu':
act_fn = gelu
elif activation_name == 'swish':
act_fn = swish
elif activation_name == 'lrelu':
act_fn = tf.nn.leaky_relu
else:
raise AttributeError('no activation function named as %s' % activation_name)
elif hasattr(activation_name, '__call__'): # callable
act_fn = activation_name
else:
raise AttributeError
return act_fn
def act_name2fn(afn):
return activation_name_to_func(afn)
def bn_dense_layer_v2(
input_tensor, hn, bias, bias_start=0.0, scope=None,
activation='relu', enable_bn=False,
wd=0., keep_prob=1.0, is_train=None, dup_num=1, merge_var=False
):
act_fn = act_name2fn(activation)
with tf.variable_scope(scope or 'bn_dense_layer'):
input_tensor = dropout(input_tensor, keep_prob, is_train)
# the comment use a 3d tensor [bs,sl,hn] as a example
input_shape = get_shape_list(input_tensor) # [3]
assert len(input_shape) >= 2 # at least [bs,hn]
# merge
dims_merge = input_shape[:-1] # [all unrelated dims]
new_dim = reduce(mul, dims_merge) # get the merged dim
new_shape = [new_dim, input_shape[-1]] # new shape for matmul [2]
input_tensor_rsp = tf.reshape(input_tensor, new_shape) # [xx,dim]
# dense layer
input_dim = new_shape[-1]
if merge_var:
weight = tf.get_variable('W', shape=[input_dim, hn * dup_num], dtype=tf.float32)
else:
weight_list = []
for i in range(dup_num):
weight_list.append(tf.get_variable('W_%d' % i, shape=[input_dim, hn]))
weight = tf.concat(weight_list, -1)
output_rsp = tf.matmul(input_tensor_rsp, weight)
if bias:
if merge_var or dup_num == 1:
bias_val = tf.get_variable(
'bias', shape=[hn * dup_num], dtype=tf.float32,
initializer=tf.constant_initializer(bias_start)
)
else:
bias_list = []
for i in range(dup_num):
bias_list.append(
tf.get_variable(
'bias_%d' % i, shape=[hn], dtype=tf.float32,
initializer=tf.constant_initializer(bias_start))
)
bias_val = tf.concat(bias_list, -1)
output_rsp += bias_val
# output reshape
output_shape = dims_merge + [hn * dup_num] # [3] for [bs,sl,new_hn]
output = tf.reshape(output_rsp, output_shape) # [bs,sl,new_hn]
if enable_bn:
output = tf.contrib.layers.batch_norm(
output, center=True, scale=True, is_training=is_train,
updates_collections=None, decay=0.9,
scope='bn')
if wd:
tf.add_to_collection('reg_vars', weight)
return act_fn(output)
|
[
"tensorflow.nn.elu",
"tensorflow.constant_initializer",
"tensorflow.reshape",
"tensorflow.get_variable_scope",
"tensorflow.variable_scope",
"tensorflow.concat",
"tensorflow.nn.sigmoid",
"tensorflow.contrib.layers.batch_norm",
"tensorflow.matmul",
"tensorflow.shape",
"tensorflow.add_to_collection",
"functools.reduce",
"tensorflow.sqrt",
"tensorflow.name_scope",
"tensorflow.nn.dropout",
"tensorflow.get_variable"
] |
[((3106, 3122), 'tensorflow.shape', 'tf.shape', (['tensor'], {}), '(tensor)\n', (3114, 3122), True, 'import tensorflow as tf\n'), ((1714, 1746), 'tensorflow.name_scope', 'tf.name_scope', (["(name or 'dropout')"], {}), "(name or 'dropout')\n", (1727, 1746), True, 'import tensorflow as tf\n'), ((3240, 3260), 'tensorflow.name_scope', 'tf.name_scope', (['"""elu"""'], {}), "('elu')\n", (3253, 3260), True, 'import tensorflow as tf\n'), ((3908, 3924), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['x'], {}), '(x)\n', (3921, 3924), True, 'import tensorflow as tf\n'), ((5399, 5443), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'bn_dense_layer')"], {}), "(scope or 'bn_dense_layer')\n", (5416, 5443), True, 'import tensorflow as tf\n'), ((5784, 5807), 'functools.reduce', 'reduce', (['mul', 'dims_merge'], {}), '(mul, dims_merge)\n', (5790, 5807), False, 'from functools import reduce\n'), ((5932, 5967), 'tensorflow.reshape', 'tf.reshape', (['input_tensor', 'new_shape'], {}), '(input_tensor, new_shape)\n', (5942, 5967), True, 'import tensorflow as tf\n'), ((6389, 6424), 'tensorflow.matmul', 'tf.matmul', (['input_tensor_rsp', 'weight'], {}), '(input_tensor_rsp, weight)\n', (6398, 6424), True, 'import tensorflow as tf\n'), ((7231, 7267), 'tensorflow.reshape', 'tf.reshape', (['output_rsp', 'output_shape'], {}), '(output_rsp, output_shape)\n', (7241, 7267), True, 'import tensorflow as tf\n'), ((867, 890), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (888, 890), True, 'import tensorflow as tf\n'), ((6081, 6152), 'tensorflow.get_variable', 'tf.get_variable', (['"""W"""'], {'shape': '[input_dim, hn * dup_num]', 'dtype': 'tf.float32'}), "('W', shape=[input_dim, hn * dup_num], dtype=tf.float32)\n", (6096, 6152), True, 'import tensorflow as tf\n'), ((6341, 6367), 'tensorflow.concat', 'tf.concat', (['weight_list', '(-1)'], {}), '(weight_list, -1)\n', (6350, 6367), True, 'import tensorflow as tf\n'), ((7330, 7467), 'tensorflow.contrib.layers.batch_norm', 'tf.contrib.layers.batch_norm', (['output'], {'center': '(True)', 'scale': '(True)', 'is_training': 'is_train', 'updates_collections': 'None', 'decay': '(0.9)', 'scope': '"""bn"""'}), "(output, center=True, scale=True, is_training=\n is_train, updates_collections=None, decay=0.9, scope='bn')\n", (7358, 7467), True, 'import tensorflow as tf\n'), ((7541, 7581), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""reg_vars"""', 'weight'], {}), "('reg_vars', weight)\n", (7561, 7581), True, 'import tensorflow as tf\n'), ((1832, 1895), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['x', 'keep_prob'], {'noise_shape': 'noise_shape', 'seed': 'seed'}), '(x, keep_prob, noise_shape=noise_shape, seed=seed)\n', (1845, 1895), True, 'import tensorflow as tf\n'), ((1978, 2041), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['x', 'keep_prob'], {'noise_shape': 'noise_shape', 'seed': 'seed'}), '(x, keep_prob, noise_shape=noise_shape, seed=seed)\n', (1991, 2041), True, 'import tensorflow as tf\n'), ((7051, 7075), 'tensorflow.concat', 'tf.concat', (['bias_list', '(-1)'], {}), '(bias_list, -1)\n', (7060, 7075), True, 'import tensorflow as tf\n'), ((3424, 3436), 'tensorflow.nn.elu', 'tf.nn.elu', (['x'], {}), '(x)\n', (3433, 3436), True, 'import tensorflow as tf\n'), ((3846, 3858), 'tensorflow.sqrt', 'tf.sqrt', (['(2.0)'], {}), '(2.0)\n', (3853, 3858), True, 'import tensorflow as tf\n'), ((6268, 6318), 'tensorflow.get_variable', 'tf.get_variable', (["('W_%d' % i)"], {'shape': '[input_dim, hn]'}), "('W_%d' % i, shape=[input_dim, hn])\n", (6283, 6318), True, 'import tensorflow as tf\n'), ((6629, 6664), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['bias_start'], {}), '(bias_start)\n', (6652, 6664), True, 'import tensorflow as tf\n'), ((6965, 7000), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['bias_start'], {}), '(bias_start)\n', (6988, 7000), True, 'import tensorflow as tf\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-11-18 00:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('beers', '0021_auto_20171117_1846'),
]
operations = [
migrations.AddField(
model_name='contest_checkin',
name='bonus_type',
field=models.CharField(blank=True, max_length=10, null=True),
),
migrations.AddField(
model_name='contest_checkin',
name='tx_type',
field=models.CharField(choices=[('BE', 'Beer'), ('BR', 'Brewery'), ('CB', 'Challenge Beer'), ('CL', 'Challenge Beer Loss'), ('BO', 'Bonus')], default='BE', max_length=2),
),
]
|
[
"django.db.models.CharField"
] |
[((408, 462), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(10)', 'null': '(True)'}), '(blank=True, max_length=10, null=True)\n', (424, 462), False, 'from django.db import migrations, models\n'), ((592, 763), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('BE', 'Beer'), ('BR', 'Brewery'), ('CB', 'Challenge Beer'), ('CL',\n 'Challenge Beer Loss'), ('BO', 'Bonus')]", 'default': '"""BE"""', 'max_length': '(2)'}), "(choices=[('BE', 'Beer'), ('BR', 'Brewery'), ('CB',\n 'Challenge Beer'), ('CL', 'Challenge Beer Loss'), ('BO', 'Bonus')],\n default='BE', max_length=2)\n", (608, 763), False, 'from django.db import migrations, models\n')]
|
import struct
import sys
import time
import json
import os
from PyQt5.QtCore import QDir, Qt
from PyQt5.QtGui import QBrush, QPen
from PyQt5.QtWidgets import (QAction, QApplication, QFileDialog, QLabel, QToolButton, QFileDialog,
QMainWindow, QMenu, QMessageBox, QScrollArea, QSizePolicy, QGridLayout, QLayout, QListWidget, QWidget, QCheckBox, QTabWidget,
QGraphicsScene, QGraphicsView)
class Button(QToolButton):
def __init__(self, text, parent=None):
super(Button, self).__init__(parent)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
self.setText(text)
def sizeHint(self):
size = super(Button, self).sizeHint()
size.setHeight(size.height() + 20)
size.setWidth(max(size.width(), size.height()))
return size
class MainWin(QMainWindow):
files = []
def __init__(self):
super(MainWin, self).__init__()
self.setupGUI()
self.setWindowTitle("TES Mapper")
self.resize(500, 400)
# config
self.config = json.loads(open('config.json').read())
def setupGUI(self):
tabmain = QTabWidget()
self.setCentralWidget(tabmain)
# GPS Converter
widget = QWidget()
mainLayout = QGridLayout()
widget.setLayout(mainLayout)
self.openButton = self.createButton("Open Files", self.openFileClicked)
mainLayout.addWidget(self.openButton, 0, 0)
self.listWidget = QListWidget()
mainLayout.addWidget(self.listWidget, 2, 0, 1, 2)
self.runConvButton = self.createButton("Run Conversion", self.runConversionClicked)
mainLayout.addWidget(self.runConvButton, 0, 1)
self.runConvButton.setEnabled(False)
self.multiCheckbox = self.createCheckbox("Multiple Markers per Map")
mainLayout.addWidget(self.multiCheckbox, 1,1)
tabmain.addTab(widget, "GPS Data Conversion")
# GPS View
gpswidget = QWidget()
gpsLayout = QGridLayout()
gpswidget.setLayout(gpsLayout)
gview = QGraphicsView()
scene = QGraphicsScene()
gview.setScene(scene)
gpsLayout.addWidget(gview)
blueBrush = QBrush(Qt.blue)
mypen = QPen(Qt.black)
scene.addRect(100, 0, 80, 100, mypen, blueBrush)
tabmain.addTab(gpswidget, "GPS Visualisation")
def createButton(self, text, member):
button = Button(text)
button.clicked.connect(member)
return button
def createCheckbox(self, text):
checkbox = QCheckBox(text)
return checkbox
def openFileClicked(self):
fnames = QFileDialog.getOpenFileNames(self, 'Open files',
'./')
self.listWidget.clear()
self.files.clear()
if len(fnames[0]) > 0:
self.runConvButton.setEnabled(True)
for f in fnames[0]:
self.listWidget.addItem(f)
self.files.append(f)
else:
self.runConvButton.setEnabled(False)
def runConversionClicked(self):
multiPositions = []
multiTimediffs = []
multiNames = []
for fi in self.files:
positions = []
timediffs = []
lasttime = -1
printedtime = False
with open(fi, 'rb') as f:
print("Processing ["+fi+"]")
if os.path.splitext(fi)[1].lower() == ".nmea":
print("\tNMEA parsing mode")
for line in f:
parts = line.decode().split(',')
if parts[0] == "$GPRMC":
parttime = parts[1]
status = parts[2] #A okay, V Warnings
lat = parts[3]
latori = parts[4]
lon = parts[5]
lonori = parts[6] #1 is fix, 0 is no fix
speed = parts[7] #knots
course = parts[8] # to true north
date = parts[9]
signalValid = parts[12] # signal integrity Axx valid, Nxx invalid or no signal
mytime = time.strptime(date[0:2]+"."+date[2:4]+"."+"20"+date[4:6]+" - "+parttime[0:2]+':'+parttime[2:4]+':'+parttime[4:6], '%d.%m.%Y - %H:%M:%S')
if len(lat) > 0 and len(lon) > 0:
# convert to decimal degrees
dlat = int(lat[0:2])
dlon = int(lon[0:3])
mlat = float(lat[2:])/60.0
mlon = float(lon[3:])/60.0
rlat = dlat + mlat
rlon = dlon + mlon
positions.append([rlat, rlon])
if printedtime == False:
print("\t"+date[0:2]+"."+date[2:4]+"."+"20"+date[4:6]+" - "+parttime[0:2]+':'+parttime[2:4]+':'+parttime[4:6])
print("\tInit at: "+str([rlat, rlon]))
printedtime = True
ticks = int(time.mktime(mytime))
myticks = 0
if lasttime == -1:
lasttime = ticks
myticks = 0
else:
myticks = ticks - lasttime
lasttime = ticks
timediffs.append(myticks*1000)
if self.multiCheckbox.checkState() == Qt.Unchecked:
with open('template.html', 'r') as template:
tempstr = template.read()
tempstr = tempstr.replace('_ACCESS_TOKEN_', self.config["mapbox_access_token"])
tempstr = tempstr.replace('_REPLACE_POS_', str(positions))
tempstr = tempstr.replace('_REPLACE_TIME_', str(timediffs))
tempstr = tempstr.replace('_REPLACE_MULTINAMES_', str([os.path.split(fi)[1]]))
tempstr = tempstr.replace('_REPLACE_MULTIMAP_', "false")
out = fi.replace('.NMEA', '.nmea')
out = out.replace('.nmea', '.html')
out = open(out, 'w')
out.write(tempstr)
out.close()
else:
multiPositions.append(positions)
multiTimediffs.append(timediffs)
multiNames.append(os.path.split(fi)[1])
else:
print("\tTES parsing mode")
while True:
bytes = f.read(2)
if len(bytes) < 2:
break # exit if eof
# type of point
types = struct.unpack('=h', bytes)
#if types[0] & 1 == 1:
# print('Split mark')
#elif types[0] & 2 == 1:
# print('Interest point')
#elif types[0] & 4 == 1:
# print('Track point')
# date of record
bytes = f.read(4)
date = struct.unpack('=L', bytes)
s = int(0)
smask = 63
s = (date[0] & smask)
m = int(0)
mmask = smask << 6
m = (date[0] & mmask) >> 6
h = int(0)
hmask = 31 << 12
h = (date[0] & hmask) >> 12
d = int(0)
dmask = 31 << 17
d = (date[0] & dmask) >> 17
mo = int(0)
momask = 15 << 22
mo = (date[0] & momask) >> 22
y = int(0)
ymask = 63 << 26
y = ((date[0] & ymask) >> 26) + 2000
if printedtime == False:
print('\tDate: '+str(d)+'.'+str(mo)+'.'+str(y)+" - "+str(h)+':'+str(m)+':'+str(s))
printedtime = True
mytime = time.strptime(str(d)+'.'+str(mo)+'.'+str(y)+" - "+str(h)+':'+str(m)+':'+str(s), '%d.%m.%Y - %H:%M:%S')
ticks = int(time.mktime(mytime))
myticks = 0
if lasttime == -1:
lasttime = ticks
myticks = 0
else:
myticks = ticks - lasttime
lasttime = ticks
# lat
bytes = f.read(4)
lat = struct.unpack('=l', bytes)
#print('\tLat: '+str(lat[0]*1e-7))
# lon
bytes = f.read(4)
lon = struct.unpack('=l', bytes)
#print('\tLon: '+str(lon[0]*1e-7))
# alt
bytes = f.read(2)
alt = struct.unpack('=h', bytes)
#print('\tAlt:'+str(alt[0]))
#print('')
positions.append([lat[0]*1e-7,lon[0]*1e-7]);
timediffs.append(myticks*1000)
if self.multiCheckbox.checkState() == Qt.Unchecked:
with open('template.html', 'r') as template:
tempstr = template.read()
tempstr = tempstr.replace('_ACCESS_TOKEN_', self.config["mapbox_access_token"])
tempstr = tempstr.replace('_REPLACE_POS_', str(positions))
tempstr = tempstr.replace('_REPLACE_TIME_', str(timediffs))
tempstr = tempstr.replace('_REPLACE_MULTINAMES_', str([os.path.split(fi)[1]]))
tempstr = tempstr.replace('_REPLACE_MULTIMAP_', "false")
fi = fi.replace('.TES', '.html')
out = open(fi, 'w')
out.write(tempstr)
out.close()
else:
multiPositions.append(positions)
multiTimediffs.append(timediffs)
multiNames.append(os.path.split(fi)[1])
# processing of individual files finishes
if self.multiCheckbox.checkState() == Qt.Checked:
print("in Multimode")
with open('template.html', 'r') as template:
tempstr = template.read()
tempstr = tempstr.replace('_ACCESS_TOKEN_', self.config["mapbox_access_token"])
tempstr = tempstr.replace('_REPLACE_POS_', str(multiPositions))
tempstr = tempstr.replace('_REPLACE_TIME_', str(multiTimediffs))
tempstr = tempstr.replace('_REPLACE_MULTINAMES_', str(multiNames))
tempstr = tempstr.replace('_REPLACE_MULTIMAP_', "true")
out = open("multimap.html", 'w')
out.write(tempstr)
out.close()
QMessageBox.information(self, "Information",
"Processing has finished")
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWin = MainWin()
mainWin.show()
sys.exit(app.exec_())
|
[
"time.strptime",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QGridLayout",
"struct.unpack",
"PyQt5.QtWidgets.QCheckBox",
"PyQt5.QtWidgets.QListWidget",
"PyQt5.QtWidgets.QGraphicsView",
"PyQt5.QtGui.QPen",
"PyQt5.QtGui.QBrush",
"PyQt5.QtWidgets.QFileDialog.getOpenFileNames",
"time.mktime",
"os.path.splitext",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QMessageBox.information",
"PyQt5.QtWidgets.QTabWidget",
"PyQt5.QtWidgets.QGraphicsScene",
"os.path.split"
] |
[((11795, 11817), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (11807, 11817), False, 'from PyQt5.QtWidgets import QAction, QApplication, QFileDialog, QLabel, QToolButton, QFileDialog, QMainWindow, QMenu, QMessageBox, QScrollArea, QSizePolicy, QGridLayout, QLayout, QListWidget, QWidget, QCheckBox, QTabWidget, QGraphicsScene, QGraphicsView\n'), ((1138, 1150), 'PyQt5.QtWidgets.QTabWidget', 'QTabWidget', ([], {}), '()\n', (1148, 1150), False, 'from PyQt5.QtWidgets import QAction, QApplication, QFileDialog, QLabel, QToolButton, QFileDialog, QMainWindow, QMenu, QMessageBox, QScrollArea, QSizePolicy, QGridLayout, QLayout, QListWidget, QWidget, QCheckBox, QTabWidget, QGraphicsScene, QGraphicsView\n'), ((1232, 1241), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (1239, 1241), False, 'from PyQt5.QtWidgets import QAction, QApplication, QFileDialog, QLabel, QToolButton, QFileDialog, QMainWindow, QMenu, QMessageBox, QScrollArea, QSizePolicy, QGridLayout, QLayout, QListWidget, QWidget, QCheckBox, QTabWidget, QGraphicsScene, QGraphicsView\n'), ((1263, 1276), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (1274, 1276), False, 'from PyQt5.QtWidgets import QAction, QApplication, QFileDialog, QLabel, QToolButton, QFileDialog, QMainWindow, QMenu, QMessageBox, QScrollArea, QSizePolicy, QGridLayout, QLayout, QListWidget, QWidget, QCheckBox, QTabWidget, QGraphicsScene, QGraphicsView\n'), ((1472, 1485), 'PyQt5.QtWidgets.QListWidget', 'QListWidget', ([], {}), '()\n', (1483, 1485), False, 'from PyQt5.QtWidgets import QAction, QApplication, QFileDialog, QLabel, QToolButton, QFileDialog, QMainWindow, QMenu, QMessageBox, QScrollArea, QSizePolicy, QGridLayout, QLayout, QListWidget, QWidget, QCheckBox, QTabWidget, QGraphicsScene, QGraphicsView\n'), ((1961, 1970), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (1968, 1970), False, 'from PyQt5.QtWidgets import QAction, QApplication, QFileDialog, QLabel, QToolButton, QFileDialog, QMainWindow, QMenu, QMessageBox, QScrollArea, QSizePolicy, QGridLayout, QLayout, QListWidget, QWidget, QCheckBox, QTabWidget, QGraphicsScene, QGraphicsView\n'), ((1991, 2004), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', ([], {}), '()\n', (2002, 2004), False, 'from PyQt5.QtWidgets import QAction, QApplication, QFileDialog, QLabel, QToolButton, QFileDialog, QMainWindow, QMenu, QMessageBox, QScrollArea, QSizePolicy, QGridLayout, QLayout, QListWidget, QWidget, QCheckBox, QTabWidget, QGraphicsScene, QGraphicsView\n'), ((2060, 2075), 'PyQt5.QtWidgets.QGraphicsView', 'QGraphicsView', ([], {}), '()\n', (2073, 2075), False, 'from PyQt5.QtWidgets import QAction, QApplication, QFileDialog, QLabel, QToolButton, QFileDialog, QMainWindow, QMenu, QMessageBox, QScrollArea, QSizePolicy, QGridLayout, QLayout, QListWidget, QWidget, QCheckBox, QTabWidget, QGraphicsScene, QGraphicsView\n'), ((2092, 2108), 'PyQt5.QtWidgets.QGraphicsScene', 'QGraphicsScene', ([], {}), '()\n', (2106, 2108), False, 'from PyQt5.QtWidgets import QAction, QApplication, QFileDialog, QLabel, QToolButton, QFileDialog, QMainWindow, QMenu, QMessageBox, QScrollArea, QSizePolicy, QGridLayout, QLayout, QListWidget, QWidget, QCheckBox, QTabWidget, QGraphicsScene, QGraphicsView\n'), ((2195, 2210), 'PyQt5.QtGui.QBrush', 'QBrush', (['Qt.blue'], {}), '(Qt.blue)\n', (2201, 2210), False, 'from PyQt5.QtGui import QBrush, QPen\n'), ((2227, 2241), 'PyQt5.QtGui.QPen', 'QPen', (['Qt.black'], {}), '(Qt.black)\n', (2231, 2241), False, 'from PyQt5.QtGui import QBrush, QPen\n'), ((2546, 2561), 'PyQt5.QtWidgets.QCheckBox', 'QCheckBox', (['text'], {}), '(text)\n', (2555, 2561), False, 'from PyQt5.QtWidgets import QAction, QApplication, QFileDialog, QLabel, QToolButton, QFileDialog, QMainWindow, QMenu, QMessageBox, QScrollArea, QSizePolicy, QGridLayout, QLayout, QListWidget, QWidget, QCheckBox, QTabWidget, QGraphicsScene, QGraphicsView\n'), ((2636, 2690), 'PyQt5.QtWidgets.QFileDialog.getOpenFileNames', 'QFileDialog.getOpenFileNames', (['self', '"""Open files"""', '"""./"""'], {}), "(self, 'Open files', './')\n", (2664, 2690), False, 'from PyQt5.QtWidgets import QAction, QApplication, QFileDialog, QLabel, QToolButton, QFileDialog, QMainWindow, QMenu, QMessageBox, QScrollArea, QSizePolicy, QGridLayout, QLayout, QListWidget, QWidget, QCheckBox, QTabWidget, QGraphicsScene, QGraphicsView\n'), ((11649, 11720), 'PyQt5.QtWidgets.QMessageBox.information', 'QMessageBox.information', (['self', '"""Information"""', '"""Processing has finished"""'], {}), "(self, 'Information', 'Processing has finished')\n", (11672, 11720), False, 'from PyQt5.QtWidgets import QAction, QApplication, QFileDialog, QLabel, QToolButton, QFileDialog, QMainWindow, QMenu, QMessageBox, QScrollArea, QSizePolicy, QGridLayout, QLayout, QListWidget, QWidget, QCheckBox, QTabWidget, QGraphicsScene, QGraphicsView\n'), ((7145, 7171), 'struct.unpack', 'struct.unpack', (['"""=h"""', 'bytes'], {}), "('=h', bytes)\n", (7158, 7171), False, 'import struct\n'), ((7584, 7610), 'struct.unpack', 'struct.unpack', (['"""=L"""', 'bytes'], {}), "('=L', bytes)\n", (7597, 7610), False, 'import struct\n'), ((9186, 9212), 'struct.unpack', 'struct.unpack', (['"""=l"""', 'bytes'], {}), "('=l', bytes)\n", (9199, 9212), False, 'import struct\n'), ((9375, 9401), 'struct.unpack', 'struct.unpack', (['"""=l"""', 'bytes'], {}), "('=l', bytes)\n", (9388, 9401), False, 'import struct\n'), ((9564, 9590), 'struct.unpack', 'struct.unpack', (['"""=h"""', 'bytes'], {}), "('=h', bytes)\n", (9577, 9590), False, 'import struct\n'), ((4233, 4399), 'time.strptime', 'time.strptime', (["(date[0:2] + '.' + date[2:4] + '.' + '20' + date[4:6] + ' - ' + parttime[0:\n 2] + ':' + parttime[2:4] + ':' + parttime[4:6])", '"""%d.%m.%Y - %H:%M:%S"""'], {}), "(date[0:2] + '.' + date[2:4] + '.' + '20' + date[4:6] + ' - ' +\n parttime[0:2] + ':' + parttime[2:4] + ':' + parttime[4:6],\n '%d.%m.%Y - %H:%M:%S')\n", (4246, 4399), False, 'import time\n'), ((8768, 8787), 'time.mktime', 'time.mktime', (['mytime'], {}), '(mytime)\n', (8779, 8787), False, 'import time\n'), ((3382, 3402), 'os.path.splitext', 'os.path.splitext', (['fi'], {}), '(fi)\n', (3398, 3402), False, 'import os\n'), ((6815, 6832), 'os.path.split', 'os.path.split', (['fi'], {}), '(fi)\n', (6828, 6832), False, 'import os\n'), ((10852, 10869), 'os.path.split', 'os.path.split', (['fi'], {}), '(fi)\n', (10865, 10869), False, 'import os\n'), ((5265, 5284), 'time.mktime', 'time.mktime', (['mytime'], {}), '(mytime)\n', (5276, 5284), False, 'import time\n'), ((6261, 6278), 'os.path.split', 'os.path.split', (['fi'], {}), '(fi)\n', (6274, 6278), False, 'import os\n'), ((10365, 10382), 'os.path.split', 'os.path.split', (['fi'], {}), '(fi)\n', (10378, 10382), False, 'import os\n')]
|
#
# styleopt.py
# Artistic Style Transfer
# Optimisation method
# as defined in Gatys et. al
#
import os
import api
import numpy as np
import tensorflow as tf
import keras.backend as K
import matplotlib.pyplot as plt
import stylefn
from PIL import Image
from keras.models import Model, Sequential
from util import apply_settings
from tensorflow.contrib.opt import ScipyOptimizerInterface
from datetime import datetime
# Style transfer settings
# NOTE: the following are default settings and may be overriden
SETTINGS = {
"image_shape": (512, 512, 3),
# Optimisation settings
"learning_rate": 10,
"n_epochs": 100,
}
# Represents the computational graph that will perform style transfer using the
# optimisation method
class TransfuseGraph:
# Create a style transfer graph that caters the style and content images shapes
# with the given style transfer settings overrides & pastiche init value
def __init__(self, pastiche_init, settings):
self.settings = settings
# Define tensor shapes
self.style_shape = self.settings["image_shape"]
self.content_shape = self.settings["image_shape"]
self.pastiche_shape = self.settings["image_shape"]
self.build(pastiche_init)
# Build style transfer graph for the given pastiche_init value
def build(self, pastiche_init):
K.clear_session()
# Setup content and style tensors
self.content_op = K.placeholder(self.content_shape, name="content")
self.style_op = K.placeholder(self.style_shape, name="style")
# Setup pastiche tensor derieved from random noise
self.pastiche_op = K.variable(pastiche_init, name="pastiche")
# Build style transfer graph
self.loss_op = stylefn.build_loss(self.pastiche_op, self.content_op,
self.style_op, self.settings)
# Setup optimisation
self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(
self.loss_op, method='L-BFGS-B', options={'maxiter': 20},
var_list=[self.pastiche_op])
# Setup tensorboard
self.summary_op = tf.summary.merge_all()
self.writer = tf.summary.FileWriter("./logs/{}-{}".format(
self.settings, datetime.now().strftime("%H:%M:%S")))
self.session = K.get_session()
# Perform one iteration of style transfer using the inputs in feed dic
def transfer(self, feed):
# Perform training setup
self.optimizer.minimize(self.session, feed_dict=feed)
# Callback for writing tensorboard infomation given transfuse graph and current
# epoch number i_epoch and feed dict to run the graph
def callback_tensorboard(graph, feed, i_epoch):
summary = graph.session.run(graph.summary_op, feed_dict=feed)
graph.writer.add_summary(summary, i_epoch)
# Callback for display progress infomation given transfuse graph and current
# epoch number i_epoch and feed dict to run the graph
def callback_progress(graph, feed, i_epoch):
loss = graph.session.run(graph.loss_op, feed_dict=feed)
print("[{}/{}] loss: {:e}".format(i_epoch, graph.settings["n_epochs"], loss))
# Callback to display current pastiche given transfuse graph and current
# epoch number i_epoch and feed dict to run the graph
def callback_pastiche(graph, feed, i_epoch):
pastiche = graph.session.run(graph.pastiche_op, feed_dict=feed)
pastiche_image = stylefn.deprocess_image(pastiche, graph.pastiche_shape)
# Display image as a plot
plt.imshow(np.asarray(pastiche_image))
plt.draw()
plt.pause(1e-6)
plt.clf()
# Perform style transfer using the optimisation method on the given content imag
# using the style from the given style image, parameterised by settings
# Applys the given style transfer settings before performing style transfer
# Every callback_step number of epochs, will call the given callbacks
# Returns the pastiche, the results of performing style transfer
def transfer_style(content_image, style_image, settings={}, callbacks=[], callback_step=1):
# Apply setting overrides
settings = apply_settings(settings, SETTINGS)
print(settings)
# Preprocess image data
image_shape = settings["image_shape"]
content = stylefn.preprocess_image(content_image, image_shape)
style = stylefn.preprocess_image(style_image, image_shape)
# Define limits for generated pastiche
min_limits = - stylefn.IMG_BGR_MEAN
max_limits = 255.0 - stylefn.IMG_BGR_MEAN
# Build style transfer graph
pastiche_init = np.random.uniform(size=image_shape) * 255.0 - 127.5
graph = TransfuseGraph(pastiche_init=pastiche_init, settings=settings)
session = graph.session
session.run(tf.global_variables_initializer())
# Optimise style transfer graph to perform style transfer
feed = {graph.content_op: content, graph.style_op: style}
n_epochs = settings["n_epochs"]
for i_epoch in range(1, n_epochs + 1):
# Clip the pastiche to ensure values say within limits
clipped_pastiche_op = tf.clip_by_value(graph.pastiche_op,
min_limits, max_limits)
graph.pastiche_op.assign(clipped_pastiche_op)
# Perform style transfer
graph.transfer(feed)
# Call callbacks
if i_epoch % callback_step == 0:
for callback in callbacks: callback(graph, feed, i_epoch)
# Deprocess style transfered image
pastiche = session.run(graph.pastiche_op, feed_dict=feed)
pastiche_image = stylefn.deprocess_image(pastiche, image_shape)
return pastiche_image
if __name__ == "__main__":
content_image = Image.open("data/Tuebingen_Neckarfront.jpg")
style_image = Image.open("data/stary_night.jpg")
settings = {
"image_shape": (32, 32, 3),
"n_epochs": 100
}
pastiche_image = transfer_style(content_image, style_image, settings=settings,
callbacks=[callback_pastiche, callback_progress,
callback_tensorboard],
callback_step=20)
pastiche_image.save("pastiche.jpg")
|
[
"tensorflow.clip_by_value",
"matplotlib.pyplot.clf",
"keras.backend.placeholder",
"matplotlib.pyplot.draw",
"stylefn.build_loss",
"datetime.datetime.now",
"matplotlib.pyplot.pause",
"tensorflow.summary.merge_all",
"keras.backend.clear_session",
"tensorflow.contrib.opt.ScipyOptimizerInterface",
"util.apply_settings",
"tensorflow.global_variables_initializer",
"numpy.asarray",
"stylefn.deprocess_image",
"stylefn.preprocess_image",
"numpy.random.uniform",
"keras.backend.get_session",
"PIL.Image.open",
"keras.backend.variable"
] |
[((3469, 3524), 'stylefn.deprocess_image', 'stylefn.deprocess_image', (['pastiche', 'graph.pastiche_shape'], {}), '(pastiche, graph.pastiche_shape)\n', (3492, 3524), False, 'import stylefn\n'), ((3607, 3617), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (3615, 3617), True, 'import matplotlib.pyplot as plt\n'), ((3622, 3638), 'matplotlib.pyplot.pause', 'plt.pause', (['(1e-06)'], {}), '(1e-06)\n', (3631, 3638), True, 'import matplotlib.pyplot as plt\n'), ((3642, 3651), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3649, 3651), True, 'import matplotlib.pyplot as plt\n'), ((4154, 4188), 'util.apply_settings', 'apply_settings', (['settings', 'SETTINGS'], {}), '(settings, SETTINGS)\n', (4168, 4188), False, 'from util import apply_settings\n'), ((4294, 4346), 'stylefn.preprocess_image', 'stylefn.preprocess_image', (['content_image', 'image_shape'], {}), '(content_image, image_shape)\n', (4318, 4346), False, 'import stylefn\n'), ((4359, 4409), 'stylefn.preprocess_image', 'stylefn.preprocess_image', (['style_image', 'image_shape'], {}), '(style_image, image_shape)\n', (4383, 4409), False, 'import stylefn\n'), ((5600, 5646), 'stylefn.deprocess_image', 'stylefn.deprocess_image', (['pastiche', 'image_shape'], {}), '(pastiche, image_shape)\n', (5623, 5646), False, 'import stylefn\n'), ((5726, 5770), 'PIL.Image.open', 'Image.open', (['"""data/Tuebingen_Neckarfront.jpg"""'], {}), "('data/Tuebingen_Neckarfront.jpg')\n", (5736, 5770), False, 'from PIL import Image\n'), ((5789, 5823), 'PIL.Image.open', 'Image.open', (['"""data/stary_night.jpg"""'], {}), "('data/stary_night.jpg')\n", (5799, 5823), False, 'from PIL import Image\n'), ((1369, 1386), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (1384, 1386), True, 'import keras.backend as K\n'), ((1456, 1505), 'keras.backend.placeholder', 'K.placeholder', (['self.content_shape'], {'name': '"""content"""'}), "(self.content_shape, name='content')\n", (1469, 1505), True, 'import keras.backend as K\n'), ((1530, 1575), 'keras.backend.placeholder', 'K.placeholder', (['self.style_shape'], {'name': '"""style"""'}), "(self.style_shape, name='style')\n", (1543, 1575), True, 'import keras.backend as K\n'), ((1671, 1713), 'keras.backend.variable', 'K.variable', (['pastiche_init'], {'name': '"""pastiche"""'}), "(pastiche_init, name='pastiche')\n", (1681, 1713), True, 'import keras.backend as K\n'), ((1775, 1863), 'stylefn.build_loss', 'stylefn.build_loss', (['self.pastiche_op', 'self.content_op', 'self.style_op', 'self.settings'], {}), '(self.pastiche_op, self.content_op, self.style_op, self.\n settings)\n', (1793, 1863), False, 'import stylefn\n'), ((1961, 2090), 'tensorflow.contrib.opt.ScipyOptimizerInterface', 'tf.contrib.opt.ScipyOptimizerInterface', (['self.loss_op'], {'method': '"""L-BFGS-B"""', 'options': "{'maxiter': 20}", 'var_list': '[self.pastiche_op]'}), "(self.loss_op, method='L-BFGS-B',\n options={'maxiter': 20}, var_list=[self.pastiche_op])\n", (1999, 2090), True, 'import tensorflow as tf\n'), ((2176, 2198), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (2196, 2198), True, 'import tensorflow as tf\n'), ((2355, 2370), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (2368, 2370), True, 'import keras.backend as K\n'), ((3575, 3601), 'numpy.asarray', 'np.asarray', (['pastiche_image'], {}), '(pastiche_image)\n', (3585, 3601), True, 'import numpy as np\n'), ((4766, 4799), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4797, 4799), True, 'import tensorflow as tf\n'), ((5103, 5162), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['graph.pastiche_op', 'min_limits', 'max_limits'], {}), '(graph.pastiche_op, min_limits, max_limits)\n', (5119, 5162), True, 'import tensorflow as tf\n'), ((4595, 4630), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'image_shape'}), '(size=image_shape)\n', (4612, 4630), True, 'import numpy as np\n'), ((2293, 2307), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2305, 2307), False, 'from datetime import datetime\n')]
|
from random import randint
a = input('Nome do 1° aluno: ')
b = input('Nome do 2° aluno: ')
c = input('Nome do 3° aluno: ')
d = input('Nome do 4° aluno: ')
esc = randint(1, 4)
print('=' * 12)
print(f'Aluno 1: {a}')
print(f'Aluno 2: {b}')
print(f'Aludo 3: {c}')
print(f'Aludo 4: {d}')
print(f'Escolhido: aluno {esc}')
|
[
"random.randint"
] |
[((161, 174), 'random.randint', 'randint', (['(1)', '(4)'], {}), '(1, 4)\n', (168, 174), False, 'from random import randint\n')]
|
# A simple python script to plot the GW
# signals over time, for a chosen mode
import numpy as np;
import matplotlib.pyplot as plt;
# output data for setup
M = 1.0
mu = 0.05
r = 300
symmetry = 4
# make the plot
fig = plt.figure()
# volume integral dataset out
data1 = np.loadtxt("VolumeIntegrals.dat")
timedata = data1[:,0]
dM = symmetry*data1[:,3] - symmetry*data1[0,3]
Source = symmetry*data1[:,4]
# flux dataset out
data1 = np.loadtxt("SurfaceIntegrals.dat")
labelstring = "integral(Flux * dt)"
timedata = data1[:,0]
dt = timedata[1] - timedata[0]
NetEiFlux = data1[:,3]
NetEoFlux = data1[:,6]
FEodt = np.zeros_like(timedata)
FEidt = np.zeros_like(timedata)
Source_dt = np.zeros_like(timedata)
for i, F in enumerate(timedata) :
if (i > 0) :
FEodt[i] += FEodt[i-1] + NetEoFlux[i] * dt
FEidt[i] += FEidt[i-1] + NetEiFlux[i] * dt
Source_dt[i] += Source_dt[i-1]+ Source[i] * dt
plt.plot(timedata, FEodt, '-', lw = 1.0, label="Mdot outer dt")
plt.plot(timedata, FEidt, '-', lw = 1.0, label="Mdot inner dt")
plt.plot(timedata, Source_dt, '-', lw = 1.0, label="Source dt")
plt.plot(timedata, dM, '-', lw = 1.0, label="M-M0")
plt.plot(timedata, FEidt - FEodt + Source_dt, '--', lw = 1.0, label="check M-M0")
# make the plot look nice
plt.xlabel("time")
plt.ylabel("Change in Cloud Mom")
#plt.xlim(0, 100)
#plt.ylim(-10, 10)
plt.legend(loc=0)
plt.grid()
# save as png image
filename = "MvsT.png"
plt.savefig(filename)
|
[
"numpy.zeros_like",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig"
] |
[((219, 231), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (229, 231), True, 'import matplotlib.pyplot as plt\n'), ((271, 304), 'numpy.loadtxt', 'np.loadtxt', (['"""VolumeIntegrals.dat"""'], {}), "('VolumeIntegrals.dat')\n", (281, 304), True, 'import numpy as np\n'), ((431, 465), 'numpy.loadtxt', 'np.loadtxt', (['"""SurfaceIntegrals.dat"""'], {}), "('SurfaceIntegrals.dat')\n", (441, 465), True, 'import numpy as np\n'), ((609, 632), 'numpy.zeros_like', 'np.zeros_like', (['timedata'], {}), '(timedata)\n', (622, 632), True, 'import numpy as np\n'), ((641, 664), 'numpy.zeros_like', 'np.zeros_like', (['timedata'], {}), '(timedata)\n', (654, 664), True, 'import numpy as np\n'), ((677, 700), 'numpy.zeros_like', 'np.zeros_like', (['timedata'], {}), '(timedata)\n', (690, 700), True, 'import numpy as np\n'), ((907, 968), 'matplotlib.pyplot.plot', 'plt.plot', (['timedata', 'FEodt', '"""-"""'], {'lw': '(1.0)', 'label': '"""Mdot outer dt"""'}), "(timedata, FEodt, '-', lw=1.0, label='Mdot outer dt')\n", (915, 968), True, 'import matplotlib.pyplot as plt\n'), ((971, 1032), 'matplotlib.pyplot.plot', 'plt.plot', (['timedata', 'FEidt', '"""-"""'], {'lw': '(1.0)', 'label': '"""Mdot inner dt"""'}), "(timedata, FEidt, '-', lw=1.0, label='Mdot inner dt')\n", (979, 1032), True, 'import matplotlib.pyplot as plt\n'), ((1035, 1096), 'matplotlib.pyplot.plot', 'plt.plot', (['timedata', 'Source_dt', '"""-"""'], {'lw': '(1.0)', 'label': '"""Source dt"""'}), "(timedata, Source_dt, '-', lw=1.0, label='Source dt')\n", (1043, 1096), True, 'import matplotlib.pyplot as plt\n'), ((1099, 1148), 'matplotlib.pyplot.plot', 'plt.plot', (['timedata', 'dM', '"""-"""'], {'lw': '(1.0)', 'label': '"""M-M0"""'}), "(timedata, dM, '-', lw=1.0, label='M-M0')\n", (1107, 1148), True, 'import matplotlib.pyplot as plt\n'), ((1151, 1230), 'matplotlib.pyplot.plot', 'plt.plot', (['timedata', '(FEidt - FEodt + Source_dt)', '"""--"""'], {'lw': '(1.0)', 'label': '"""check M-M0"""'}), "(timedata, FEidt - FEodt + Source_dt, '--', lw=1.0, label='check M-M0')\n", (1159, 1230), True, 'import matplotlib.pyplot as plt\n'), ((1260, 1278), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (1270, 1278), True, 'import matplotlib.pyplot as plt\n'), ((1279, 1312), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Change in Cloud Mom"""'], {}), "('Change in Cloud Mom')\n", (1289, 1312), True, 'import matplotlib.pyplot as plt\n'), ((1350, 1367), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (1360, 1367), True, 'import matplotlib.pyplot as plt\n'), ((1368, 1378), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1376, 1378), True, 'import matplotlib.pyplot as plt\n'), ((1422, 1443), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (1433, 1443), True, 'import matplotlib.pyplot as plt\n')]
|
import os
import csv
import datetime
import jinja2
import webapp2
from webapp2_extras import json
from google.appengine.api import urlfetch
from google.appengine.api import memcache
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
def fetchData():
departures = memcache.get('departures')
if departures is not None:
return departures
headers = {}
etag = memcache.get('etag')
if etag:
headers['If-None-Match'] = etag
result = urlfetch.fetch(
url='http://developer.mbta.com/lib/gtrtfs/Departures.csv',
headers=headers)
#result = urlfetch.fetch('http://localhost:8080/static/example.csv')
if result.status_code == 200:
memcache.set('etag', result.headers['etag'])
memcache.set('lastresult', result.content)
result = result.content
elif result.status_code == 304:
result = memcache.get('lastresult')
else:
return None
response = []
boardcsv = csv.DictReader(result.splitlines())
for row in boardcsv:
if row['Origin'] == 'North Station':
response.append(row)
memcache.set('departures', response, time=15)
return response
class BoardHandler(webapp2.RequestHandler):
def get(self):
departures = fetchData()
if departures is not None:
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.encode(departures))
class NoJsHandler(webapp2.RequestHandler):
def get(self):
departures = fetchData()
if departures is not None:
nowtime = datetime.datetime.now()
for row in departures:
timestamp = datetime.datetime.fromtimestamp(int(row['ScheduledTime']))
row['ScheduledTime'] = timestamp.strftime("%H:%M")
template_values = {
'refreshtime': 'last updated',
'dayofweek': nowtime.strftime("%A"),
'curdate': nowtime.strftime("%Y-%m-%d"),
'curtime': nowtime.strftime("%H:%M") + ' utc',
'tracks': departures,
}
template = JINJA_ENVIRONMENT.get_template('board.templ')
self.response.write(template.render(template_values))
application = webapp2.WSGIApplication([
('/board', BoardHandler),
('/nojs.html', NoJsHandler),
], debug=True)
|
[
"google.appengine.api.urlfetch.fetch",
"os.path.dirname",
"webapp2_extras.json.encode",
"webapp2.WSGIApplication",
"datetime.datetime.now",
"google.appengine.api.memcache.set",
"google.appengine.api.memcache.get"
] |
[((2374, 2470), 'webapp2.WSGIApplication', 'webapp2.WSGIApplication', (["[('/board', BoardHandler), ('/nojs.html', NoJsHandler)]"], {'debug': '(True)'}), "([('/board', BoardHandler), ('/nojs.html',\n NoJsHandler)], debug=True)\n", (2397, 2470), False, 'import webapp2\n'), ((386, 412), 'google.appengine.api.memcache.get', 'memcache.get', (['"""departures"""'], {}), "('departures')\n", (398, 412), False, 'from google.appengine.api import memcache\n'), ((499, 519), 'google.appengine.api.memcache.get', 'memcache.get', (['"""etag"""'], {}), "('etag')\n", (511, 519), False, 'from google.appengine.api import memcache\n'), ((587, 681), 'google.appengine.api.urlfetch.fetch', 'urlfetch.fetch', ([], {'url': '"""http://developer.mbta.com/lib/gtrtfs/Departures.csv"""', 'headers': 'headers'}), "(url='http://developer.mbta.com/lib/gtrtfs/Departures.csv',\n headers=headers)\n", (601, 681), False, 'from google.appengine.api import urlfetch\n'), ((1226, 1271), 'google.appengine.api.memcache.set', 'memcache.set', (['"""departures"""', 'response'], {'time': '(15)'}), "('departures', response, time=15)\n", (1238, 1271), False, 'from google.appengine.api import memcache\n'), ((810, 854), 'google.appengine.api.memcache.set', 'memcache.set', (['"""etag"""', "result.headers['etag']"], {}), "('etag', result.headers['etag'])\n", (822, 854), False, 'from google.appengine.api import memcache\n'), ((863, 905), 'google.appengine.api.memcache.set', 'memcache.set', (['"""lastresult"""', 'result.content'], {}), "('lastresult', result.content)\n", (875, 905), False, 'from google.appengine.api import memcache\n'), ((260, 285), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (275, 285), False, 'import os\n'), ((991, 1017), 'google.appengine.api.memcache.get', 'memcache.get', (['"""lastresult"""'], {}), "('lastresult')\n", (1003, 1017), False, 'from google.appengine.api import memcache\n'), ((1705, 1728), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1726, 1728), False, 'import datetime\n'), ((1527, 1550), 'webapp2_extras.json.encode', 'json.encode', (['departures'], {}), '(departures)\n', (1538, 1550), False, 'from webapp2_extras import json\n')]
|
import os
import matplotlib.pyplot as plt
import numpy as np
import json
import seaborn as sns;
from collections import deque
sns.set()
import glob2
import argparse
from cycler import cycler
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
from matplotlib.ticker import MaxNLocator
def load_results(file, dtype=None):
if not os.path.exists(file):
return None
with open(file, 'r') as f:
lines = [line for line in f]
if len(lines) < 2:
return None
keys = [name.strip() for name in lines[0].split(',')]
if dtype is None:
data = np.genfromtxt(file, delimiter=',', skip_header=1, filling_values=0.)
else:
data = np.genfromtxt(file, delimiter=',', skip_header=1, filling_values=0., dtype=dtype)
if data.ndim == 1:
data = data.reshape(1, -1)
assert data.ndim == 2
assert data.shape[-1] == len(keys)
result = {}
for idx, key in enumerate(keys):
result[key] = data[:, idx]
return result
# def pad(xs, value=np.nan, maxlen=None):
# if maxlen is None:
# maxlen = np.max([len(x) for x in xs])
#
# padded_xs = []
# for x in xs:
# if x.shape[0] >= maxlen:
# padded_xs.append(x)
#
# padding = np.ones((maxlen - x.shape[0],) + x.shape[1:]) * value
# x_padded = np.concatenate([x, padding], axis=0)
# assert x_padded.shape[1:] == x.shape[1:]
# assert x_padded.shape[0] == maxlen
# padded_xs.append(x_padded)
# return np.array(padded_xs)
#
# def smooth_curve(x, y):
# halfwidth = int(np.ceil(len(x) / 60)) # Halfwidth of our smoothing convolution
# k = halfwidth
# xsmoo = x
# ysmoo = np.convolve(y, np.ones(2 * k + 1), mode='same') / np.convolve(np.ones_like(y), np.ones(2 * k + 1),
# mode='same')
# return xsmoo, ysmoo
def prepare_data(paths):
inter_dict = {}
var_param_keys = set()
max_episodes = 0
for curr_path in paths:
if not os.path.isdir(curr_path):
continue
print('loading {}'.format(curr_path))
# results = load_results(os.path.join(curr_path, 'mask_records.csv'))
# if not results:
# print('skipping {}'.format(curr_path))
# continue
with open(os.path.join(curr_path, 'params.json'), 'r') as f:
params = json.load(f)
for k,v in params.items():
if k not in inter_dict.keys():
inter_dict[k] = [v]
if v not in inter_dict[k]:
inter_dict[k].append(v)
var_param_keys.add(k)
# max_episodes = max(max_episodes, len(results['episode']))
return var_param_keys
def plot_epochs_success(data, percent_to_achieve, fig_dir):
plt.clf()
# fig = plt.figure(figsize=(20, 8))
plt.figure(figsize=(9, 4.5))
new_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
# plt.rc('axes', prop_cycle=(cycler('linestyle', ['-', '--', ':']) * cycler('color', new_colors)))
surf_plot_data = {}
uniform_sampling_epochs = []
none_sampling_epochs = []
kappa_s = set()
rg_s = set()
for config in sorted(data.keys()):
epochs = []
for d in data[config]:
try:
epoch = min(np.argwhere(d[1] > percent_to_achieve))[0]
except:
print("Not enough data for {}".format(config))
continue
epochs.append(epoch)
# epochs = [len(d[0]) for d in data[config]]
if 'curriculum_sampling: none' in config:
none_sampling_epochs += epochs
kappa_s.add(-1)
continue
median_epochs = np.median(epochs)
min_perc = np.nanpercentile(epochs, 25, axis=0)
max_perc = np.nanpercentile(epochs, 75, axis=0)
avg_epochs = np.mean(epochs)
n_runs = len(epochs)
std_epochs = np.std(epochs)
if 'stochastic3_' not in config:
continue
rg = float(config.split("stochastic3_")[1].split("_")[0])
rg_s.add(rg)
kappa = float(config.split("stochastic3_")[1].split("_")[2])
kappa_s.add(kappa)
if rg not in surf_plot_data.keys():
surf_plot_data[rg] = {}
if kappa == 0.0:
uniform_sampling_epochs += epochs
surf_plot_data[rg][kappa] = (avg_epochs, std_epochs, n_runs, median_epochs, min_perc, max_perc)
uniform_avg_epochs = np.mean(uniform_sampling_epochs)
none_avg_epochs = np.mean(none_sampling_epochs)
uniform_std_epochs = np.std(uniform_sampling_epochs)
none_std_epochs = np.std(none_sampling_epochs)
uniform_median_epochs = np.median(uniform_sampling_epochs)
none_median_epochs = np.median(none_sampling_epochs)
uniform_min_perc = np.nanpercentile(uniform_sampling_epochs, 25, axis=0)
none_min_perc = np.nanpercentile(none_sampling_epochs, 25, axis=0)
uniform_max_perc = np.nanpercentile(uniform_sampling_epochs, 75, axis=0)
none_max_perc = np.nanpercentile(none_sampling_epochs, 75, axis=0)
for rg in surf_plot_data.keys():
surf_plot_data[rg][0.0] = (
uniform_avg_epochs, uniform_std_epochs, len(uniform_sampling_epochs), uniform_median_epochs, uniform_min_perc,
uniform_max_perc)
surf_plot_data[rg][-1] = (
none_avg_epochs, none_std_epochs, len(none_sampling_epochs), none_median_epochs, none_min_perc,
none_max_perc)
kappa_s = sorted(list(kappa_s))
# kappa_s.insert(1,0)
rg_s = sorted(list(rg_s))
# surf_plot_data_arr = np.array(list(surf_plot_data.items()))
for idx, kappa in enumerate(kappa_s):
# label = "c={} -n: {}".format(c, len(surf_plot_data[0][kappa]))
# n_runs = ''
# n_runs = np.mean(0)
c_label = "$\kappa$={}".format(kappa)
if kappa== -1:
c_label = "no CGM"
continue
if kappa== 0:
c_label = "uniform GM"
continue
label = "{}".format(c_label)
xs = sorted(list(surf_plot_data.keys()))
xs = np.array([k for k in sorted(surf_plot_data.keys()) if kappa in surf_plot_data[k].keys()])
# ys = np.array([surf_plot_data[k][kappa][0] for k in sorted(surf_plot_data.keys()) if kappain surf_plot_data[k].keys()])
# std_ys = np.array([surf_plot_data[k][kappa][1] for k in sorted(surf_plot_data.keys()) if kappa in surf_plot_data[k].keys()])
# min_vals = ys + std_ys
# max_vals = ys - std_ys
ys = np.array([surf_plot_data[k][kappa][3] for k in sorted(surf_plot_data.keys()) if kappa in surf_plot_data[k].keys()])
n_runs = np.array([surf_plot_data[k][kappa][2] for k in sorted(surf_plot_data.keys()) if kappa in surf_plot_data[k].keys()])
min_vals = np.array([surf_plot_data[k][kappa][4] for k in sorted(surf_plot_data.keys()) if kappa in surf_plot_data[k].keys()])
max_vals = np.array([surf_plot_data[k][kappa][5] for k in sorted(surf_plot_data.keys()) if kappa in surf_plot_data[k].keys()])
if np.array(xs).shape != ys.shape:
print("This data probably has not all kappas")
continue
color = new_colors[idx]
print("C {} has color {}".format(kappa,color))
# Add median points
plt.scatter(xs, ys, color=color)
# Add number of runs
# for d_idx, n in enumerate(n_runs):
# plt.gca().annotate(str(n), (xs[d_idx], ys[d_idx]))
# Add lines
plt.plot(xs, ys, label=label, color=color)
# Add quartiles
plt.plot(xs, min_vals, linestyle='dashed', color=color, alpha=0.25)
plt.plot(xs, max_vals, linestyle='dashed', color=color, alpha=0.25)
# break
# plt.fill_between(xs, min_vals, max_vals, alpha=0.25)
# plt.fill_between(xs, min_vals, max_vals, alpha=0.1)
# plt.legend(loc='upper left', bbox_to_anchor=(5.05,1.83))
ax = plt.gca()
# ax.set_xlim([0, 70])
ax.set_ylim([20, 80])
plt.xlabel('$c_g$')
plt.ylabel('epochs to achieve {}% success rate'.format(int(percent_to_achieve*100)))
plt.legend(loc='upper left')
# plt.title("Number of epochs to achieve {}% success rate".format(int(percent_to_achieve*100)), loc='center', pad=-20)
plt.savefig(os.path.join(fig_dir, 'penalty_hyperopt_.png'))
if __name__ == '__main__':
matplotlib.rcParams['font.family'] = "serif"
matplotlib.rcParams['font.weight'] = 'normal'
new_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
parallel_rollouts=4
training_rollout_cycles_per_epoch=64
eval_rollout_cycles_per_epoch = 10
parser = argparse.ArgumentParser()
parser.add_argument('dir', type=str)
parser.add_argument('--smooth', type=int, default=1)
args = parser.parse_args()
plot_epochs_success(data, 50, parser.args.dir)
|
[
"numpy.nanpercentile",
"argparse.ArgumentParser",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.pyplot.gca",
"os.path.join",
"numpy.std",
"os.path.exists",
"numpy.genfromtxt",
"seaborn.set",
"numpy.median",
"matplotlib.pyplot.legend",
"numpy.argwhere",
"json.load",
"matplotlib.pyplot.plot",
"os.path.isdir",
"matplotlib.pyplot.scatter",
"numpy.array",
"matplotlib.pyplot.xlabel"
] |
[((126, 135), 'seaborn.set', 'sns.set', ([], {}), '()\n', (133, 135), True, 'import seaborn as sns\n'), ((2994, 3003), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3001, 3003), True, 'import matplotlib.pyplot as plt\n'), ((3048, 3076), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 4.5)'}), '(figsize=(9, 4.5))\n', (3058, 3076), True, 'import matplotlib.pyplot as plt\n'), ((4769, 4801), 'numpy.mean', 'np.mean', (['uniform_sampling_epochs'], {}), '(uniform_sampling_epochs)\n', (4776, 4801), True, 'import numpy as np\n'), ((4824, 4853), 'numpy.mean', 'np.mean', (['none_sampling_epochs'], {}), '(none_sampling_epochs)\n', (4831, 4853), True, 'import numpy as np\n'), ((4879, 4910), 'numpy.std', 'np.std', (['uniform_sampling_epochs'], {}), '(uniform_sampling_epochs)\n', (4885, 4910), True, 'import numpy as np\n'), ((4933, 4961), 'numpy.std', 'np.std', (['none_sampling_epochs'], {}), '(none_sampling_epochs)\n', (4939, 4961), True, 'import numpy as np\n'), ((4990, 5024), 'numpy.median', 'np.median', (['uniform_sampling_epochs'], {}), '(uniform_sampling_epochs)\n', (4999, 5024), True, 'import numpy as np\n'), ((5050, 5081), 'numpy.median', 'np.median', (['none_sampling_epochs'], {}), '(none_sampling_epochs)\n', (5059, 5081), True, 'import numpy as np\n'), ((5105, 5158), 'numpy.nanpercentile', 'np.nanpercentile', (['uniform_sampling_epochs', '(25)'], {'axis': '(0)'}), '(uniform_sampling_epochs, 25, axis=0)\n', (5121, 5158), True, 'import numpy as np\n'), ((5179, 5229), 'numpy.nanpercentile', 'np.nanpercentile', (['none_sampling_epochs', '(25)'], {'axis': '(0)'}), '(none_sampling_epochs, 25, axis=0)\n', (5195, 5229), True, 'import numpy as np\n'), ((5253, 5306), 'numpy.nanpercentile', 'np.nanpercentile', (['uniform_sampling_epochs', '(75)'], {'axis': '(0)'}), '(uniform_sampling_epochs, 75, axis=0)\n', (5269, 5306), True, 'import numpy as np\n'), ((5327, 5377), 'numpy.nanpercentile', 'np.nanpercentile', (['none_sampling_epochs', '(75)'], {'axis': '(0)'}), '(none_sampling_epochs, 75, axis=0)\n', (5343, 5377), True, 'import numpy as np\n'), ((8220, 8229), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8227, 8229), True, 'import matplotlib.pyplot as plt\n'), ((8287, 8306), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$c_g$"""'], {}), "('$c_g$')\n", (8297, 8306), True, 'import matplotlib.pyplot as plt\n'), ((8401, 8429), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (8411, 8429), True, 'import matplotlib.pyplot as plt\n'), ((9027, 9052), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9050, 9052), False, 'import argparse\n'), ((532, 552), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (546, 552), False, 'import os\n'), ((780, 849), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '""","""', 'skip_header': '(1)', 'filling_values': '(0.0)'}), "(file, delimiter=',', skip_header=1, filling_values=0.0)\n", (793, 849), True, 'import numpy as np\n'), ((874, 961), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '""","""', 'skip_header': '(1)', 'filling_values': '(0.0)', 'dtype': 'dtype'}), "(file, delimiter=',', skip_header=1, filling_values=0.0, dtype\n =dtype)\n", (887, 961), True, 'import numpy as np\n'), ((4008, 4025), 'numpy.median', 'np.median', (['epochs'], {}), '(epochs)\n', (4017, 4025), True, 'import numpy as np\n'), ((4045, 4081), 'numpy.nanpercentile', 'np.nanpercentile', (['epochs', '(25)'], {'axis': '(0)'}), '(epochs, 25, axis=0)\n', (4061, 4081), True, 'import numpy as np\n'), ((4101, 4137), 'numpy.nanpercentile', 'np.nanpercentile', (['epochs', '(75)'], {'axis': '(0)'}), '(epochs, 75, axis=0)\n', (4117, 4137), True, 'import numpy as np\n'), ((4159, 4174), 'numpy.mean', 'np.mean', (['epochs'], {}), '(epochs)\n', (4166, 4174), True, 'import numpy as np\n'), ((4225, 4239), 'numpy.std', 'np.std', (['epochs'], {}), '(epochs)\n', (4231, 4239), True, 'import numpy as np\n'), ((7588, 7620), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xs', 'ys'], {'color': 'color'}), '(xs, ys, color=color)\n', (7599, 7620), True, 'import matplotlib.pyplot as plt\n'), ((7788, 7830), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys'], {'label': 'label', 'color': 'color'}), '(xs, ys, label=label, color=color)\n', (7796, 7830), True, 'import matplotlib.pyplot as plt\n'), ((7863, 7930), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'min_vals'], {'linestyle': '"""dashed"""', 'color': 'color', 'alpha': '(0.25)'}), "(xs, min_vals, linestyle='dashed', color=color, alpha=0.25)\n", (7871, 7930), True, 'import matplotlib.pyplot as plt\n'), ((7939, 8006), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'max_vals'], {'linestyle': '"""dashed"""', 'color': 'color', 'alpha': '(0.25)'}), "(xs, max_vals, linestyle='dashed', color=color, alpha=0.25)\n", (7947, 8006), True, 'import matplotlib.pyplot as plt\n'), ((8569, 8615), 'os.path.join', 'os.path.join', (['fig_dir', '"""penalty_hyperopt_.png"""'], {}), "(fig_dir, 'penalty_hyperopt_.png')\n", (8581, 8615), False, 'import os\n'), ((2227, 2251), 'os.path.isdir', 'os.path.isdir', (['curr_path'], {}), '(curr_path)\n', (2240, 2251), False, 'import os\n'), ((2591, 2603), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2600, 2603), False, 'import json\n'), ((2519, 2557), 'os.path.join', 'os.path.join', (['curr_path', '"""params.json"""'], {}), "(curr_path, 'params.json')\n", (2531, 2557), False, 'import os\n'), ((7350, 7362), 'numpy.array', 'np.array', (['xs'], {}), '(xs)\n', (7358, 7362), True, 'import numpy as np\n'), ((3604, 3642), 'numpy.argwhere', 'np.argwhere', (['(d[1] > percent_to_achieve)'], {}), '(d[1] > percent_to_achieve)\n', (3615, 3642), True, 'import numpy as np\n')]
|
"""
Tests of neo.io.elphyo
"""
import unittest
from neo.io import ElphyIO
from neo.test.iotest.common_io_test import BaseTestIO
class TestElphyIO(BaseTestIO, unittest.TestCase):
ioclass = ElphyIO
entities_to_download = [
'elphy'
]
entities_to_test = ['elphy/DATA1.DAT',
'elphy/ElphyExample.DAT',
'elphy/ElphyExample_Mode1.dat',
'elphy/ElphyExample_Mode2.dat',
'elphy/ElphyExample_Mode3.dat']
def test_read_data(self):
for filename in self.entities_to_test:
io = ElphyIO(self.get_local_path(filename))
bl = io.read_block()
self.assertTrue(len(bl.segments) > 0)
# ensure that at least one data object is generated for each file
self.assertTrue(any(list(bl.segments[0].size.values())))
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main"
] |
[((914, 929), 'unittest.main', 'unittest.main', ([], {}), '()\n', (927, 929), False, 'import unittest\n')]
|
# -*- encoding: UTF-8 -*-
from __future__ import absolute_import, unicode_literals
from os import environ
from mongorest.settings import settings
from mongorest.testcase import TestCase
class TestSettings(TestCase):
def test_settings_default_values(self):
environ.pop('MONGOREST_SETTINGS_MODULE', None)
self.assertEqual(settings.AUTH_COLLECTION, '')
self.assertIsNotNone(settings.CORS)
self.assertEqual(
settings.CORS['Access-Control-Allow-Origin'],
'*'
)
self.assertEqual(
settings.CORS['Access-Control-Allow-Methods'],
'GET,POST,PUT,PATCH,DELETE,OPTIONS'
)
self.assertEqual(
settings.CORS['Access-Control-Allow-Headers'],
'Accept,Accept-Encoding,Authorization,Content-Length,Content-Type,'
'Origin,User-Agent,X-CSRFToken,X-Requested-With'
)
self.assertEqual(
settings.CORS['Access-Control-Allow-Credentials'], 'true'
)
self.assertEqual(settings.MIDDLEWARES, [])
self.assertIsNotNone(settings.MONGODB)
self.assertEqual(settings.MONGODB['URI'], '')
self.assertEqual(settings.MONGODB['USERNAME'], '')
self.assertEqual(settings.MONGODB['PASSWORD'], '')
self.assertEqual(settings.MONGODB['HOST'], 'localhost')
self.assertEqual(settings.MONGODB['HOSTS'], [])
self.assertEqual(settings.MONGODB['PORT'], 27017)
self.assertEqual(settings.MONGODB['PORTS'], [])
self.assertEqual(settings.MONGODB['DATABASE'], 'mongorest')
self.assertEqual(settings.MONGODB['OPTIONS'], [])
self.assertEqual(settings.RETRY_LIMIT, 5)
self.assertEqual(settings.BASE_RETRY_TIME, 2)
self.assertEqual(settings.LINEAR_RETRIES, False)
self.assertEqual(settings.SESSION_STORE, '')
def test_a_default_setting_can_be_overwritten(self):
environ.pop('MONGOREST_SETTINGS_MODULE', None)
self.assertEqual(settings.MONGODB['URI'], '')
environ['MONGOREST_SETTINGS_MODULE'] = 'tests.fixtures.settings_test_settings'
self.assertEqual(settings.MONGODB['URI'], 'test')
def test_a_new_setting_value_can_be_added(self):
environ.pop('MONGOREST_SETTINGS_MODULE', None)
environ['MONGOREST_SETTINGS_MODULE'] = 'tests.fixtures.settings_test_settings'
self.assertEqual(settings.TEST, 'test')
def test_an_invalid_setting_will_raise_error(self):
environ.pop('MONGOREST_SETTINGS_MODULE', None)
with self.assertRaises(AttributeError):
return settings.i_am_an_invalid_setting
|
[
"os.environ.pop"
] |
[((273, 319), 'os.environ.pop', 'environ.pop', (['"""MONGOREST_SETTINGS_MODULE"""', 'None'], {}), "('MONGOREST_SETTINGS_MODULE', None)\n", (284, 319), False, 'from os import environ\n'), ((1930, 1976), 'os.environ.pop', 'environ.pop', (['"""MONGOREST_SETTINGS_MODULE"""', 'None'], {}), "('MONGOREST_SETTINGS_MODULE', None)\n", (1941, 1976), False, 'from os import environ\n'), ((2241, 2287), 'os.environ.pop', 'environ.pop', (['"""MONGOREST_SETTINGS_MODULE"""', 'None'], {}), "('MONGOREST_SETTINGS_MODULE', None)\n", (2252, 2287), False, 'from os import environ\n'), ((2489, 2535), 'os.environ.pop', 'environ.pop', (['"""MONGOREST_SETTINGS_MODULE"""', 'None'], {}), "('MONGOREST_SETTINGS_MODULE', None)\n", (2500, 2535), False, 'from os import environ\n')]
|
# give the base class a short, readable nickname
from SimpleXMLRPCServer import SimpleXMLRPCServer as BaseServer
class Server(BaseServer):
def __init__(self, host, port):
# accept separate hostname and portnumber and group them
BaseServer.__init__(self, (host, port))
def server_bind(self):
# allow fast restart of the server after it's killed
import socket
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
BaseServer.server_bind(self)
allowedClientHosts = '127.0.0.1', '192.168.0.15',
def verify_request(self, request, client_address):
# forbid requests except from specific client hosts
return client_address[0] in self.allowedClientHosts
|
[
"SimpleXMLRPCServer.SimpleXMLRPCServer.__init__",
"SimpleXMLRPCServer.SimpleXMLRPCServer.server_bind"
] |
[((248, 287), 'SimpleXMLRPCServer.SimpleXMLRPCServer.__init__', 'BaseServer.__init__', (['self', '(host, port)'], {}), '(self, (host, port))\n', (267, 287), True, 'from SimpleXMLRPCServer import SimpleXMLRPCServer as BaseServer\n'), ((480, 508), 'SimpleXMLRPCServer.SimpleXMLRPCServer.server_bind', 'BaseServer.server_bind', (['self'], {}), '(self)\n', (502, 508), True, 'from SimpleXMLRPCServer import SimpleXMLRPCServer as BaseServer\n')]
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import warnings
from math import ceil
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.preprocessing.label import _encode, _encode_check_unknown
from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples
from sagemaker_sklearn_extension.impute import RobustImputer
class ThresholdOneHotEncoder(OneHotEncoder):
"""Encode categorical integer features as a one-hot numeric array, with optional restrictions on feature encoding.
This adds functionality to encode only if a feature appears more than ``threshold`` number of times. It also adds
functionality to bound the number of categories per feature to ``max_categories``.
This transformer is an extension of ``OneHotEncoder`` from the ``sklearn.preprocessing`` module.
Parameters
----------
categories : 'auto' or a list of lists/arrays of values (default = 'auto')
Categories (unique values) per feature:
- 'auto' : Determine categories automatically from the training data.
- list : ``categories[i]`` holds the categories expected in the ith column. The passed categories should not
mix strings and numeric values within a single feature, and should be sorted in case of numeric values.
The used categories can be found in the ``categories_`` attribute.
drop : 'first' or a list/array of shape (n_features,) (default = None)
Specifies a methodology to use to drop one of the categories per feature. This is useful in situations where
perfectly collinear features cause problems, such as when feeding the resulting data into a neural network or
an unregularized regression.
- None : retain all features (the default).
- 'first' : drop the first category in each feature. If only one category is present, the feature will be
dropped entirely.
- array : ``drop[i]`` is the category in feature ``X[:, i]`` that should be dropped.
sparse : boolean (default = True)
Will return sparse matrix if set True else will return an array.
dtype : number type (default = np.float64)
Desired dtype of output.
threshold : float (default = max(10, n_features / 1000))
The threshold for including a value in the encoding of the result. Default value is the maximum of `10` or
`n_features / 1000` where `n_features` is the number of columns of input X. How this parameter is interpreted
depends on whether it is more than or equal to or less than 1.
- If `threshold` is more than or equal to one, it represents the number of times a value must appear to be
one hot encoded in the result.
- If `threshold` is less than one, it represents the fraction of rows which must contain the value for it to be
one hot encoded in the result. The values is rounded up, so if `threshold` is 0.255 and there are 100 rows, a
value must appear at least 26 times to be included.
max_categories : int (default = 100)
Maximum number of categories to encode per feature. If the number of observed categories is greater than
``max_categories``, the encoder will take the top ``max_categories`` observed categories, sorted by count.
Attributes
----------
categories_ : list of arrays
The categories of each feature determined during fitting (in order of the features in X and corresponding with
the output of ``transform``). This includes the category specified in ``drop`` (if any).
drop_idx_ : array of shape (n_features,)
``drop_idx_[i]`` is the index in ``categories_[i]`` of the category to be dropped for each feature. None if all
the transformed features will be retained.
"""
def __init__(self, categories=None, drop=None, sparse=True, dtype=np.float64, threshold=None, max_categories=100):
super().__init__(None, None, categories, drop, sparse, dtype, "ignore")
self.threshold = threshold
self.max_categories = max_categories
def fit(self, X, y=None):
"""Fit ThresholdOneHotEncoder to X.
Overrides self.categories_ under the following conditions:
- include values that appear at least ``threshold`` number of times
- include the top ``self.max_categories`` number of categories to encode
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to determine the categories of each feature.
Returns
-------
self : ThresholdOneHotEncoder
"""
super().fit(X, y)
assert self.max_categories >= 1
_, n_samples, n_features = self._check_X(X)
if not self.threshold:
threshold = max(10, n_samples / 1000)
elif self.threshold >= 1:
threshold = self.threshold
else:
threshold = ceil(self.threshold * n_samples)
n_features_completely_under_threshold = 0
for j in range(n_features):
# get unique values and their counts
items, counts = np.unique([row[j] for row in X], return_counts=True)
# add items that appear more than threshold times
self.categories_[j] = items[counts >= threshold].astype("O")
if self.categories_[j].size == 0:
n_features_completely_under_threshold += 1
# If no category is above the threshold, then create an unknown category to prevent
# self.transform() from raising an IndexError.
items.sort()
unknown_category = "{}___".format(items[-1])
# It's important to keep the dtype of `self.categories_[j]` as 'U' here because our `unknown_category`
# might end up being longer than any of the seen categories, and that changes the behavior of
# the `self._transform` method.
self.categories_[j] = np.asarray([unknown_category], dtype="U")
elif len(self.categories_[j]) > self.max_categories:
items_and_counts = dict(zip(items, counts))
self.categories_[j] = np.asarray(
sorted(items_and_counts, key=items_and_counts.get, reverse=True)[: self.max_categories], dtype="O"
)
if n_features_completely_under_threshold > 0:
times = "time" if self.threshold == 1 else "times"
warnings.warn(
"{} out of {} features do not have any categories appearing more than threshold={} {}.".format(
n_features_completely_under_threshold, n_features, self.threshold, times
)
)
return self
def _more_tags(self):
return {"X_types": ["categorical"]}
class RobustLabelEncoder(LabelEncoder):
"""Encode labels for seen and unseen labels.
Seen labels are encoded with value between 0 and n_classes-1. Unseen labels are encoded with
``self.fill_encoded_label_value`` with a default value of n_classes.
Similar to ``sklearn.preprocessing.LabelEncoder`` with additional features.
- ``RobustLabelEncoder`` encodes unseen values with ``fill_encoded_label_value`` or ``fill_label_value``
if ``fill_unseen_labels=True`` for ``transform`` or ``inverse_transform`` respectively
- ``RobustLabelEncoder`` can use predetermined labels with the parameter``labels``.
Examples
--------
>>> from sagemaker_sklearn_extension.preprocessing import RobustLabelEncoder
>>> rle = RobustLabelEncoder()
>>> rle.fit([1, 2, 2, 6])
RobustLabelEncoder(fill_encoded_label_value=None,
fill_label_value='<unseen_label>', fill_unseen_labels=True,
labels=None)
>>> rle.classes_
array([1, 2, 6])
>>> rle.transform([1, 1, 2, 6])
array([0, 0, 1, 2])
>>> rle.transform([1, 1, 2, 6, 1738])
array([ 0, 0, 1, 2, 3])
>>> rle.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
>>> rle.inverse_transform([-1738, 0, 0, 1, 2])
['<unseen_label>', 1, 1, 2, 6]
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> rle = RobustLabelEncoder()
>>> rle.fit(["hot dog", "hot dog", "banana"])
RobustLabelEncoder(fill_encoded_label_value=None,
fill_label_value='<unseen_label>', fill_unseen_labels=True,
labels=None)
>>> list(rle.classes_)
['banana', 'hot dog']
>>> rle.transform(["hot dog", "hot dog"])
array([1, 1])
>>> rle.transform(["banana", "llama"])
array([0, 2])
>>> list(rle.inverse_transform([2, 2, 1]))
['<unseen_label>', '<unseen_label>', 'hot dog']
Parameters
----------
labels : list of values (default = None)
List of unique values for label encoding. Overrides ``self.classes_``.
If ``labels`` is None, RobustLabelEncoder will automatically determine the labels.
fill_unseen_labels : boolean (default = True)
Whether or not to fill unseen values during transform or inverse_transform.
fill_encoded_label_value : int (default = n_classes)
Replacement value for unseen labels during ``transform``.
Default value is n_classes.
fill_label_value : str (default = '<unseen_label>')
Replacement value for unseen encoded labels during ``inverse_transform``.
Attributes
----------
classes_ : array of shape (n_classes,)
Holds the label for each class.
"""
def __init__(
self, labels=None, fill_unseen_labels=True, fill_encoded_label_value=None, fill_label_value="<unseen_label>"
):
super().__init__()
self.labels = labels
self.fill_unseen_labels = fill_unseen_labels
self.fill_encoded_label_value = fill_encoded_label_value
self.fill_label_value = fill_label_value
def fit(self, y):
"""Fit label encoder.
Parameters
----------
y : array-like of shape (n_samples,)
Label values.
Returns
-------
self : RobustLabelEncoder.
"""
y = column_or_1d(y, warn=True)
self.classes_ = self._check_labels_and_sort() or _encode(y)
return self
def _check_labels_and_sort(self):
if not self.labels:
return None
if self._is_sorted(self.labels):
return self.labels
warnings.warn("`labels` parameter is expected to be sorted. Sorting `labels`.")
return sorted(self.labels)
def _is_sorted(self, iterable):
return all(iterable[i] <= iterable[i + 1] for i in range(len(iterable) - 1))
def fit_transform(self, y):
"""Fit label encoder and return encoded labels.
``fill_unseen_labels=True`` does nothing in ``fit_transform`` because there will be no unseen labels.
Parameters
----------
y : array-like of shape [n_samples]
Label values.
Returns
-------
y_encoded : array-like of shape [n_samples]
Encoded label values.
"""
y = column_or_1d(y, warn=True)
sorted_labels = self._check_labels_and_sort()
self.classes_, y_encoded = (
_encode(y, uniques=sorted_labels, encode=True) if sorted_labels else _encode(y, encode=True)
)
return y_encoded
def transform(self, y):
"""Transform labels to normalized encoding.
If ``self.fill_unseen_labels`` is ``True``, use ``self.fill_encoded_label_value`` for unseen values.
Seen labels are encoded with value between 0 and n_classes-1. Unseen labels are encoded with
``self.fill_encoded_label_value`` with a default value of n_classes.
Parameters
----------
y : array-like of shape [n_samples]
Label values.
Returns
-------
y_encoded : array-like of shape [n_samples]
Encoded label values.
"""
check_is_fitted(self, "classes_")
y = column_or_1d(y, warn=True)
# transform of empty array is empty array
if _num_samples(y) == 0:
return np.array([])
if self.fill_unseen_labels:
_, mask = _encode_check_unknown(y, self.classes_, return_mask=True)
y_encoded = np.searchsorted(self.classes_, y)
fill_encoded_label_value = self.fill_encoded_label_value or len(self.classes_)
y_encoded[~mask] = fill_encoded_label_value
else:
_, y_encoded = _encode(y, uniques=self.classes_, encode=True)
return y_encoded
def inverse_transform(self, y):
"""Transform labels back to original encoding.
If ``self.fill_unseen_labels`` is ``True``, use ``self.fill_label_value`` for unseen values.
Parameters
----------
y : numpy array of shape [n_samples]
Encoded label values.
Returns
-------
y_decoded : numpy array of shape [n_samples]
Label values.
"""
check_is_fitted(self, "classes_")
y = column_or_1d(y, warn=True)
if y.dtype.kind not in ("i", "u"):
try:
y = y.astype(np.float).astype(np.int)
except ValueError:
raise ValueError("`y` contains values not convertible to integer.")
# inverse transform of empty array is empty array
if _num_samples(y) == 0:
return np.array([])
labels = np.arange(len(self.classes_))
diff = np.setdiff1d(y, labels)
if diff and not self.fill_unseen_labels:
raise ValueError("y contains previously unseen labels: %s" % str(diff))
y_decoded = [self.classes_[idx] if idx in labels else self.fill_label_value for idx in y]
return y_decoded
class NALabelEncoder(BaseEstimator, TransformerMixin):
"""Encoder for transforming labels to NA values.
Uses `RobustImputer` on 1D inputs of labels
- Uses `is_finite_numeric` mask for encoding by default
- Only uses the `RobustImputer` strategy `constant` and fills using `np.nan`
- Default behavior encodes non-float and non-finite values as nan values in
the target column of a given regression dataset
Parameters
----------
mask_function : callable -> np.array, dtype('bool') (default=None)
A vectorized python function, accepts np.array, returns np.array
with dtype('bool')
For each value, if mask_function(val) == False, that value will
be imputed. mask_function is used to create a boolean mask that determines
which values in the input to impute.
Use np.vectorize to vectorize singular python functions.
"""
def __init__(self, mask_function=None):
self.mask_function = mask_function
def fit(self, y):
"""Fit the encoder on y.
Parameters
----------
y : {array-like}, shape (n_samples,)
Input column, where `n_samples` is the number of samples.
Returns
-------
self : NALabelEncoder
"""
self.model_ = RobustImputer(strategy="constant", fill_values=np.nan, mask_function=self.mask_function)
y = y.reshape(-1, 1)
self.model_.fit(X=y)
return self
def transform(self, y):
"""Encode all non-float and non-finite values in y as NA values.
Parameters
----------
y : {array-like}, shape (n_samples)
The input column to encode.
Returns
-------
yt : {ndarray}, shape (n_samples,)
The encoded input column.
"""
check_is_fitted(self, "model_")
y = y.reshape(-1, 1)
return self.model_.transform(y).flatten()
def inverse_transform(self, y):
"""Returns input column"""
return y
def _more_tags(self):
return {"X_types": ["1dlabels"]}
|
[
"sklearn.preprocessing.label._encode_check_unknown",
"math.ceil",
"numpy.asarray",
"numpy.setdiff1d",
"sklearn.preprocessing.label._encode",
"sklearn.utils.validation._num_samples",
"sklearn.utils.validation.check_is_fitted",
"numpy.searchsorted",
"sklearn.utils.validation.column_or_1d",
"sagemaker_sklearn_extension.impute.RobustImputer",
"numpy.array",
"warnings.warn",
"numpy.unique"
] |
[((10817, 10843), 'sklearn.utils.validation.column_or_1d', 'column_or_1d', (['y'], {'warn': '(True)'}), '(y, warn=True)\n', (10829, 10843), False, 'from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples\n'), ((11103, 11182), 'warnings.warn', 'warnings.warn', (['"""`labels` parameter is expected to be sorted. Sorting `labels`."""'], {}), "('`labels` parameter is expected to be sorted. Sorting `labels`.')\n", (11116, 11182), False, 'import warnings\n'), ((11800, 11826), 'sklearn.utils.validation.column_or_1d', 'column_or_1d', (['y'], {'warn': '(True)'}), '(y, warn=True)\n', (11812, 11826), False, 'from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples\n'), ((12684, 12717), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""classes_"""'], {}), "(self, 'classes_')\n", (12699, 12717), False, 'from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples\n'), ((12730, 12756), 'sklearn.utils.validation.column_or_1d', 'column_or_1d', (['y'], {'warn': '(True)'}), '(y, warn=True)\n', (12742, 12756), False, 'from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples\n'), ((13761, 13794), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""classes_"""'], {}), "(self, 'classes_')\n", (13776, 13794), False, 'from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples\n'), ((13807, 13833), 'sklearn.utils.validation.column_or_1d', 'column_or_1d', (['y'], {'warn': '(True)'}), '(y, warn=True)\n', (13819, 13833), False, 'from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples\n'), ((14251, 14274), 'numpy.setdiff1d', 'np.setdiff1d', (['y', 'labels'], {}), '(y, labels)\n', (14263, 14274), True, 'import numpy as np\n'), ((15884, 15977), 'sagemaker_sklearn_extension.impute.RobustImputer', 'RobustImputer', ([], {'strategy': '"""constant"""', 'fill_values': 'np.nan', 'mask_function': 'self.mask_function'}), "(strategy='constant', fill_values=np.nan, mask_function=self.\n mask_function)\n", (15897, 15977), False, 'from sagemaker_sklearn_extension.impute import RobustImputer\n'), ((16410, 16441), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""model_"""'], {}), "(self, 'model_')\n", (16425, 16441), False, 'from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples\n'), ((5740, 5792), 'numpy.unique', 'np.unique', (['[row[j] for row in X]'], {'return_counts': '(True)'}), '([row[j] for row in X], return_counts=True)\n', (5749, 5792), True, 'import numpy as np\n'), ((10901, 10911), 'sklearn.preprocessing.label._encode', '_encode', (['y'], {}), '(y)\n', (10908, 10911), False, 'from sklearn.preprocessing.label import _encode, _encode_check_unknown\n'), ((11930, 11976), 'sklearn.preprocessing.label._encode', '_encode', (['y'], {'uniques': 'sorted_labels', 'encode': '(True)'}), '(y, uniques=sorted_labels, encode=True)\n', (11937, 11976), False, 'from sklearn.preprocessing.label import _encode, _encode_check_unknown\n'), ((11999, 12022), 'sklearn.preprocessing.label._encode', '_encode', (['y'], {'encode': '(True)'}), '(y, encode=True)\n', (12006, 12022), False, 'from sklearn.preprocessing.label import _encode, _encode_check_unknown\n'), ((12819, 12834), 'sklearn.utils.validation._num_samples', '_num_samples', (['y'], {}), '(y)\n', (12831, 12834), False, 'from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples\n'), ((12860, 12872), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (12868, 12872), True, 'import numpy as np\n'), ((12932, 12989), 'sklearn.preprocessing.label._encode_check_unknown', '_encode_check_unknown', (['y', 'self.classes_'], {'return_mask': '(True)'}), '(y, self.classes_, return_mask=True)\n', (12953, 12989), False, 'from sklearn.preprocessing.label import _encode, _encode_check_unknown\n'), ((13014, 13047), 'numpy.searchsorted', 'np.searchsorted', (['self.classes_', 'y'], {}), '(self.classes_, y)\n', (13029, 13047), True, 'import numpy as np\n'), ((13236, 13282), 'sklearn.preprocessing.label._encode', '_encode', (['y'], {'uniques': 'self.classes_', 'encode': '(True)'}), '(y, uniques=self.classes_, encode=True)\n', (13243, 13282), False, 'from sklearn.preprocessing.label import _encode, _encode_check_unknown\n'), ((14134, 14149), 'sklearn.utils.validation._num_samples', '_num_samples', (['y'], {}), '(y)\n', (14146, 14149), False, 'from sklearn.utils.validation import check_is_fitted, column_or_1d, _num_samples\n'), ((14175, 14187), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (14183, 14187), True, 'import numpy as np\n'), ((5542, 5574), 'math.ceil', 'ceil', (['(self.threshold * n_samples)'], {}), '(self.threshold * n_samples)\n', (5546, 5574), False, 'from math import ceil\n'), ((6603, 6644), 'numpy.asarray', 'np.asarray', (['[unknown_category]'], {'dtype': '"""U"""'}), "([unknown_category], dtype='U')\n", (6613, 6644), True, 'import numpy as np\n')]
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import xija
import sys
from os.path import expanduser
home = expanduser("~")
addthispath = home + '/AXAFLIB/xijafit/'
sys.path.insert(0, addthispath)
import xijafit
stars = '*'*80
n = 0
newmodel = xijafit.XijaFit('aca_model_spec.json', start='2014:001', stop='2018:300',
set_data_exprs=(u'aca0=12.0',), quiet=False, name='aacccdpt')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_param(u'heatsink__aca0__tau')
newmodel.thaw_param(u'heatsink__aca0__T')
newmodel.thaw_param(u'coupling__aacccdpt__aca0')
newmodel.fit(method='moncar')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_solarheat_p()
newmodel.thaw_param(u'coupling__aacccdpt__aca0__tau')
newmodel.fit(method='moncar')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_param(u'solarheat__aca0__ampl')
# newmodel.thaw_solarheat_roll()
newmodel.fit(method='moncar')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_param(u'heatsink__aca0__tau')
newmodel.thaw_param(u'heatsink__aca0__T')
newmodel.thaw_param(u'coupling__aacccdpt__aca0')
newmodel.fit(method='moncar')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_solarheat_p()
newmodel.thaw_param(u'coupling__aacccdpt__aca0__tau')
newmodel.fit(method='moncar')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_solarheat_dp()
newmodel.thaw_param(u'solarheat__aca0__ampl')
newmodel.fit(method='moncar')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_solarheat_p()
newmodel.thaw_param(u'coupling__aacccdpt__aca0__tau')
newmodel.fit(method='moncar')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_param(u'solarheat__aca0__ampl')
# newmodel.thaw_solarheat_roll()
newmodel.fit(method='moncar')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_param(u'heatsink__aca0__tau')
newmodel.thaw_param(u'heatsink__aca0__T')
newmodel.thaw_param(u'coupling__aacccdpt__aca0')
newmodel.fit(method='moncar')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_solarheat_dp()
newmodel.thaw_param(u'solarheat__aca0__ampl')
newmodel.fit(method='moncar')
newmodel.write_spec_file()
newmodel.write_snapshots_file()
newmodel = xijafit.XijaFit('aacccdpt_model_spec.json', start='2018:270', stop='2018:305',
set_data_exprs=(u'aca0=-11.0',), quiet=False, name='aacccdpt')
n = n + 1
print('{}\nStep {}\n{}'.format(stars, n, stars))
newmodel.freeze_all()
newmodel.thaw_param(u'step_power__aca0__P')
newmodel.fit(method='moncar')
newmodel.write_spec_file()
newmodel.write_snapshots_file()
|
[
"xijafit.XijaFit",
"os.path.expanduser",
"sys.path.insert"
] |
[((127, 142), 'os.path.expanduser', 'expanduser', (['"""~"""'], {}), "('~')\n", (137, 142), False, 'from os.path import expanduser\n'), ((184, 215), 'sys.path.insert', 'sys.path.insert', (['(0)', 'addthispath'], {}), '(0, addthispath)\n', (199, 215), False, 'import sys\n'), ((265, 404), 'xijafit.XijaFit', 'xijafit.XijaFit', (['"""aca_model_spec.json"""'], {'start': '"""2014:001"""', 'stop': '"""2018:300"""', 'set_data_exprs': "(u'aca0=12.0',)", 'quiet': '(False)', 'name': '"""aacccdpt"""'}), "('aca_model_spec.json', start='2014:001', stop='2018:300',\n set_data_exprs=(u'aca0=12.0',), quiet=False, name='aacccdpt')\n", (280, 404), False, 'import xijafit\n'), ((2556, 2702), 'xijafit.XijaFit', 'xijafit.XijaFit', (['"""aacccdpt_model_spec.json"""'], {'start': '"""2018:270"""', 'stop': '"""2018:305"""', 'set_data_exprs': "(u'aca0=-11.0',)", 'quiet': '(False)', 'name': '"""aacccdpt"""'}), "('aacccdpt_model_spec.json', start='2018:270', stop=\n '2018:305', set_data_exprs=(u'aca0=-11.0',), quiet=False, name='aacccdpt')\n", (2571, 2702), False, 'import xijafit\n')]
|
import torch
from typing import Callable, TypeVar
from functools import wraps
from . import debug
class ScopedDebugTensorList:
def __init__(self) -> None:
self._hidden_states = []
@property
def hidden_states(self):
return self._hidden_states
def _set_hidden_states(self, hidden_states):
self._hidden_states = hidden_states
class ScopedTensorInspectorContext:
def __init__(self):
pass
def __enter__(self):
self.prev_hidden = debug.get("_inspect_hidden_states", [])
debug.set("_inspect_hidden_states", [])
self._local_list = ScopedDebugTensorList()
return self._local_list
def __exit__(self, *args):
self._local_list._set_hidden_states(debug.get("_inspect_hidden_states", []))
debug.set("_inspect_hidden_states", self.prev_hidden)
self.prev_hidden = None
class CheckpointFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, placeholder, func, preserve_rng_state, *args):
ctx.func = func
ctx.preserve_rng_state = preserve_rng_state
ctx.cuda_rng_state = torch.cuda.get_rng_state() if preserve_rng_state else None
tensors = []
others = []
for arg in args:
if torch.is_tensor(arg):
tensors.append(arg)
others.append(None)
else:
tensors.append(None)
others.append(arg)
ctx.nontensor_inputs = others
ctx.save_for_backward(*tensors)
with torch.no_grad(), ScopedTensorInspectorContext() as inspector:
outputs = func(*args)
# append scoped hidden states to global list as a placeholder
for it in inspector.hidden_states:
debug.append("_inspect_hidden_states", it)
ctx.inspect_list = inspector.hidden_states
return outputs
@staticmethod
def backward(ctx, *grad_outputs):
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError(
"Checkpointing is not compatible with .grad() or when an `inputs` parameter"
" is passed to .backward(). Please use .backward() and do not pass its `inputs`"
" argument.")
all_inputs = []
input_reqires_grad = []
for tensor, other in zip(ctx.saved_tensors, ctx.nontensor_inputs):
if tensor is None:
all_inputs.append(other)
input_reqires_grad.append(False)
else:
input_reqires_grad.append( tensor.requires_grad )
nw_tensor = tensor.detach()
nw_tensor.requires_grad = tensor.requires_grad
all_inputs.append(nw_tensor)
with torch.random.fork_rng(devices=[torch.cuda.current_device()], enabled=ctx.preserve_rng_state):
if ctx.preserve_rng_state:
torch.cuda.set_rng_state(ctx.cuda_rng_state)
with torch.enable_grad(), ScopedTensorInspectorContext() as inspector:
outputs = ctx.func(*all_inputs)
assert len(ctx.inspect_list) == len(inspector.hidden_states), "Backward step changed"
for i, it in enumerate(inspector.hidden_states):
assert it["name"] == ctx.inspect_list[i]["name"], "Backward step changed"
assert it["shape"] == ctx.inspect_list[i]["shape"], "Backward step changed"
assert it["group"] == ctx.inspect_list[i]["group"], "Backward step changed"
# change the tensor in placeholder
ctx.inspect_list[i]["tensor"] = it["tensor"]
if not isinstance(outputs, tuple):
outputs = (outputs,)
assert len(outputs) == len(grad_outputs)
outputs_with_grad = []
grad_of_output = []
for i, output in enumerate(outputs):
if torch.is_tensor(output) and output.requires_grad:
outputs_with_grad.append(output)
grad_of_output.append(grad_outputs[i])
torch.autograd.backward(
outputs_with_grad,
grad_of_output,
)
grads = []
for inp, requires_grad in zip(all_inputs, input_reqires_grad):
if requires_grad:
grads.append(inp.grad)
else:
grads.append(None)
return (None, None, None) + tuple(grads)
R = TypeVar("R")
def checkpoint(func : Callable[..., R]) -> Callable[..., R]:
@wraps(func)
def wrapper(*args):
placeholder = torch.tensor([], requires_grad=torch.is_grad_enabled())
return CheckpointFunction.apply(placeholder, func, True, *args)
return wrapper
|
[
"torch.cuda.set_rng_state",
"torch.is_grad_enabled",
"torch.cuda.get_rng_state",
"torch.autograd.backward",
"torch.autograd._is_checkpoint_valid",
"functools.wraps",
"torch.enable_grad",
"typing.TypeVar",
"torch.is_tensor",
"torch.no_grad",
"torch.cuda.current_device"
] |
[((4473, 4485), 'typing.TypeVar', 'TypeVar', (['"""R"""'], {}), "('R')\n", (4480, 4485), False, 'from typing import Callable, TypeVar\n'), ((4552, 4563), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (4557, 4563), False, 'from functools import wraps\n'), ((4112, 4170), 'torch.autograd.backward', 'torch.autograd.backward', (['outputs_with_grad', 'grad_of_output'], {}), '(outputs_with_grad, grad_of_output)\n', (4135, 4170), False, 'import torch\n'), ((1139, 1165), 'torch.cuda.get_rng_state', 'torch.cuda.get_rng_state', ([], {}), '()\n', (1163, 1165), False, 'import torch\n'), ((1288, 1308), 'torch.is_tensor', 'torch.is_tensor', (['arg'], {}), '(arg)\n', (1303, 1308), False, 'import torch\n'), ((1564, 1579), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1577, 1579), False, 'import torch\n'), ((1984, 2021), 'torch.autograd._is_checkpoint_valid', 'torch.autograd._is_checkpoint_valid', ([], {}), '()\n', (2019, 2021), False, 'import torch\n'), ((2936, 2980), 'torch.cuda.set_rng_state', 'torch.cuda.set_rng_state', (['ctx.cuda_rng_state'], {}), '(ctx.cuda_rng_state)\n', (2960, 2980), False, 'import torch\n'), ((2998, 3017), 'torch.enable_grad', 'torch.enable_grad', ([], {}), '()\n', (3015, 3017), False, 'import torch\n'), ((3941, 3964), 'torch.is_tensor', 'torch.is_tensor', (['output'], {}), '(output)\n', (3956, 3964), False, 'import torch\n'), ((4641, 4664), 'torch.is_grad_enabled', 'torch.is_grad_enabled', ([], {}), '()\n', (4662, 4664), False, 'import torch\n'), ((2818, 2845), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (2843, 2845), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
u"""Common module for SecureTea.
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Author: <NAME> <<EMAIL>> , Jan 30 2019
Version: 1.1
Module: SecureTea
"""
import time
def getdatetime():
"""Date and time.
Returns:
TYPE: String with the current date and time
"""
return str(time.strftime("%Y-%m-%d %H:%M:%S"))
def check_config(cred):
"""
Check whether the credentials are valid or not.
Args:
-----
:cred : dict
Credentials dictionary
Raises:
-------
None
Returns:
--------
TYPE: Bool
True if valid else False
"""
for key in cred:
if cred[key] == "XXXX":
return False
return True
|
[
"time.strftime"
] |
[((395, 429), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (408, 429), False, 'import time\n')]
|
import netCDF4
import numpy
import vtk
from reader_base import ReaderBase
class LatLonReader(ReaderBase):
def __init__(self, filename, padding=0):
"""
Constructor
@param filename UM netCDF file
@param padding number of extra cells to add on the high end of longitudes
@note padding add extra cells on the high end of longitudes
"""
super(LatLonReader, self).__init__()
# read file
nc = netCDF4.Dataset(filename, 'r')
lon_units = ''
lat_units = ''
# gather all the latitudes and longitudes
lats, lons = None, None
lats_0, lons_0 = None, None
for varname in nc.variables:
var = nc.variables[varname]
if hasattr(var, 'standard_name'):
if var.standard_name == 'longitude':
if varname.find('_0') >= 0:
lons_0 = var[:]
else:
lons = var[:]
lons_units = var.units
elif var.standard_name == 'latitude':
if varname.find('_0') >= 0:
lats_0 = var[:]
else:
lats = var[:]
lats_units = var.units
ncells_lat, ncells_lon = len(lats_0), len(lons_0)
ncells = ncells_lat * (ncells_lon + padding)
# construct the unstructured grid as a collection of
# 2D cells
pointArray = numpy.zeros((4 * ncells, 3))
self.vtk['pointArray'] = pointArray
pointData = self.vtk['pointData']
pointData.SetNumberOfComponents(3)
pointData.SetNumberOfTuples(4 * ncells)
pointData.SetVoidArray(pointArray, 4 * ncells * 3, 1)
points = self.vtk['points']
points.SetNumberOfPoints(4 * ncells)
points.SetData(pointData)
grid = self.vtk['grid']
grid.Allocate(ncells, 1)
ptIds = vtk.vtkIdList()
ptIds.SetNumberOfIds(4)
periodicity_length = 360. # in deg
icell = 0
for j0 in range(ncells_lat):
j1 = j0 + 1
for i in range(ncells_lon + padding):
i0 = (i + 0) % ncells_lon
i1 = (i + 1) % ncells_lon
offset0 = periodicity_length * ((i + 0) // ncells_lon)
offset1 = periodicity_length * ((i + 1) // ncells_lon)
lon00, lat00 = lons[i0] + offset0, lats[j0]
lon10, lat10 = lons[i1] + offset1, lats[j0]
lon11, lat11 = lons[i1] + offset1, lats[j1]
lon01, lat01 = lons[i0] + offset0, lats[j1]
k0 = 4*icell
k1, k2, k3 = k0 + 1, k0 + 2, k0 + 3
# storing coords as lon, lat, 0
pointArray[k0, :] = lon00, lat00, 0.
pointArray[k1, :] = lon10, lat10, 0.
pointArray[k2, :] = lon11, lat11, 0.
pointArray[k3, :] = lon01, lat01, 0.
ptIds.SetId(0, k0)
ptIds.SetId(1, k1)
ptIds.SetId(2, k2)
ptIds.SetId(3, k3)
grid.InsertNextCell(vtk.VTK_QUAD, ptIds)
icell += 1
grid.SetPoints(points)
###############################################################################
def main():
import argparse
from numpy import pi, cos, sin, exp
parser = argparse.ArgumentParser(description='Read ugrid file')
parser.add_argument('-i', dest='input', default='ll.nc', help='Specify UM input netCDF file')
parser.add_argument('-p', dest='padding', type=int, default=0,
help='Specify by how much the grid should be padded on the high lon side')
parser.add_argument('-V', dest='vtk_file', default='lonlat.vtk', help='Save grid in VTK file')
parser.add_argument('-b', dest='binary', action='store_true', help='Write binary file')
parser.add_argument('-stream', dest='streamFunc', default='x',
help='Stream function as a function of x (longitude in rad) and y (latitude in rad)')
args = parser.parse_args()
reader = LatLonReader(filename=args.input, padding=args.padding)
if args.streamFunc:
# compute the edge velocity if user provides the stream function
x, y = reader.getLonLat()
streamData = eval(args.streamFunc)
edgeVel = reader.getEdgeFieldFromStreamData(streamData)
reader.setEdgeField('edge_integrated_velocity', edgeVel)
loopIntegrals = reader.getLoopIntegralsFromStreamData(streamData)
reader.setLoopIntegrals('cell_loop_integrals', loopIntegrals)
if args.vtk_file:
reader.saveToVtkFile(args.vtk_file, binary=args.binary)
if __name__ == '__main__':
main()
|
[
"netCDF4.Dataset",
"numpy.zeros",
"argparse.ArgumentParser",
"vtk.vtkIdList"
] |
[((3437, 3491), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Read ugrid file"""'}), "(description='Read ugrid file')\n", (3460, 3491), False, 'import argparse\n'), ((475, 505), 'netCDF4.Dataset', 'netCDF4.Dataset', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (490, 505), False, 'import netCDF4\n'), ((1508, 1536), 'numpy.zeros', 'numpy.zeros', (['(4 * ncells, 3)'], {}), '((4 * ncells, 3))\n', (1519, 1536), False, 'import numpy\n'), ((1975, 1990), 'vtk.vtkIdList', 'vtk.vtkIdList', ([], {}), '()\n', (1988, 1990), False, 'import vtk\n')]
|
import os, glob, tempfile, warnings
import numpy as np
from traitlets import (HasTraits,
Integer,
Unicode,
Float,
Integer,
Instance,
Dict,
Bool,
default)
# Rpy
try:
import rpy2.robjects as rpy
from rpy2.robjects import numpy2ri
rpy.r('library(knockoff); library(glmnet)')
from rpy2 import rinterface
except ImportError:
warnings.warn("rpy2 with knockoff and glmnet unavailable")
def null_print(x):
pass
# Knockoff selection
methods = {}
class generic_method(HasTraits):
need_CV = False
selectiveR_method = False
wide_ok = True # ok for p>= n?
# Traits
q = Float(0.2)
method_name = Unicode('Generic method')
model_target = Unicode()
@classmethod
def setup(cls, feature_cov):
cls.feature_cov = feature_cov
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
(self.X,
self.Y,
self.l_theory,
self.l_min,
self.l_1se,
self.sigma_reid) = (X,
Y,
l_theory,
l_min,
l_1se,
sigma_reid)
def select(self):
raise NotImplementedError('abstract method')
@classmethod
def register(cls):
methods[cls.__name__] = cls
def selected_target(self, active, beta):
C = self.feature_cov[active]
Q = C[:,active]
return np.linalg.inv(Q).dot(C.dot(beta))
def full_target(self, active, beta):
return beta[active]
def get_target(self, active, beta):
if self.model_target not in ['selected', 'full', 'debiased']:
raise ValueError('Gaussian methods only have selected or full targets')
if self.model_target in ['full', 'debiased']:
return self.full_target(active, beta)
else:
return self.selected_target(active, beta)
class lasso_glmnet(generic_method):
def select(self, CV=True, seed=0):
numpy2ri.activate()
rpy.r.assign('X', self.X.copy())
rpy.r.assign('Y', self.Y.copy())
rpy.r('X = as.matrix(X)')
rpy.r('Y = as.numeric(Y)')
rpy.r('set.seed(%d)' % seed)
rpy.r('cvG = cv.glmnet(X, Y, intercept=FALSE, standardize=FALSE)')
rpy.r("L1 = cvG[['lambda.min']]")
rpy.r("L2 = cvG[['lambda.1se']]")
if CV:
rpy.r("L = L1")
else:
rpy.r("L = 0.99 * L2")
rpy.r("G = glmnet(X, Y, intercept=FALSE, standardize=FALSE)")
n, p = self.X.shape
L = rpy.r('L')
rpy.r('B = as.numeric(coef(G, s=L, exact=TRUE, x=X, y=Y))[-1]')
B = np.asarray(rpy.r('B'))
selected = (B != 0)
if selected.sum():
V = np.nonzero(selected)[0]
return V, V
else:
return [], []
lasso_glmnet.register()
def factor_knockoffs(feature_cov, method='asdp'):
numpy2ri.activate()
rpy.r.assign('Sigma', feature_cov)
rpy.r.assign('method', method)
rpy.r('''
# Compute the Cholesky -- from create.gaussian
Sigma = as.matrix(Sigma)
diag_s = diag(switch(method, equi = create.solve_equi(Sigma),
sdp = create.solve_sdp(Sigma), asdp = create.solve_asdp(Sigma)))
if (is.null(dim(diag_s))) {
diag_s = diag(diag_s, length(diag_s))
}
SigmaInv_s = solve(Sigma, diag_s)
Sigma_k = 2 * diag_s - diag_s %*% SigmaInv_s
chol_k = chol(Sigma_k)
''')
knockoff_chol = np.asarray(rpy.r('chol_k'))
SigmaInv_s = np.asarray(rpy.r('SigmaInv_s'))
diag_s = np.asarray(rpy.r('diag_s'))
np.savez('.knockoff_factorizations/%s.npz' % (os.path.split(tempfile.mkstemp()[1])[1],),
method=method,
feature_cov=feature_cov,
knockoff_chol=knockoff_chol)
return knockoff_chol
def cv_glmnet_lam(X, Y, seed=0):
"""
Some calculations that can be reused by methods:
lambda.min, lambda.1se, lambda.theory and Reid et al. estimate of noise
"""
numpy2ri.activate()
rpy.r('set.seed(%d)' % seed)
rpy.r.assign('X', X.copy())
rpy.r.assign('Y', Y.copy())
rpy.r('X=as.matrix(X)')
rpy.r('Y=as.numeric(Y)')
rpy.r('set.seed(1)')
rpy.r('G = cv.glmnet(X, Y, intercept=FALSE, standardize=FALSE)')
rpy.r("L = G[['lambda.min']]")
rpy.r("L1 = G[['lambda.1se']]")
L = rpy.r('L')
L1 = rpy.r('L1')
numpy2ri.deactivate()
return float(1.00001 * L[0]), float(1.00001 * L1[0]),
|
[
"rpy2.robjects.numpy2ri.activate",
"traitlets.Float",
"tempfile.mkstemp",
"rpy2.robjects.r",
"traitlets.Unicode",
"numpy.nonzero",
"rpy2.robjects.r.assign",
"numpy.linalg.inv",
"rpy2.robjects.numpy2ri.deactivate",
"warnings.warn"
] |
[((433, 476), 'rpy2.robjects.r', 'rpy.r', (['"""library(knockoff); library(glmnet)"""'], {}), "('library(knockoff); library(glmnet)')\n", (438, 476), True, 'import rpy2.robjects as rpy\n'), ((799, 809), 'traitlets.Float', 'Float', (['(0.2)'], {}), '(0.2)\n', (804, 809), False, 'from traitlets import HasTraits, Integer, Unicode, Float, Integer, Instance, Dict, Bool, default\n'), ((828, 853), 'traitlets.Unicode', 'Unicode', (['"""Generic method"""'], {}), "('Generic method')\n", (835, 853), False, 'from traitlets import HasTraits, Integer, Unicode, Float, Integer, Instance, Dict, Bool, default\n'), ((873, 882), 'traitlets.Unicode', 'Unicode', ([], {}), '()\n', (880, 882), False, 'from traitlets import HasTraits, Integer, Unicode, Float, Integer, Instance, Dict, Bool, default\n'), ((3115, 3134), 'rpy2.robjects.numpy2ri.activate', 'numpy2ri.activate', ([], {}), '()\n', (3132, 3134), False, 'from rpy2.robjects import numpy2ri\n'), ((3139, 3173), 'rpy2.robjects.r.assign', 'rpy.r.assign', (['"""Sigma"""', 'feature_cov'], {}), "('Sigma', feature_cov)\n", (3151, 3173), True, 'import rpy2.robjects as rpy\n'), ((3178, 3208), 'rpy2.robjects.r.assign', 'rpy.r.assign', (['"""method"""', 'method'], {}), "('method', method)\n", (3190, 3208), True, 'import rpy2.robjects as rpy\n'), ((3213, 3671), 'rpy2.robjects.r', 'rpy.r', (['"""\n\n # Compute the Cholesky -- from create.gaussian\n\n Sigma = as.matrix(Sigma)\n diag_s = diag(switch(method, equi = create.solve_equi(Sigma), \n sdp = create.solve_sdp(Sigma), asdp = create.solve_asdp(Sigma)))\n if (is.null(dim(diag_s))) {\n diag_s = diag(diag_s, length(diag_s))\n }\n SigmaInv_s = solve(Sigma, diag_s)\n Sigma_k = 2 * diag_s - diag_s %*% SigmaInv_s\n chol_k = chol(Sigma_k)\n """'], {}), '(\n """\n\n # Compute the Cholesky -- from create.gaussian\n\n Sigma = as.matrix(Sigma)\n diag_s = diag(switch(method, equi = create.solve_equi(Sigma), \n sdp = create.solve_sdp(Sigma), asdp = create.solve_asdp(Sigma)))\n if (is.null(dim(diag_s))) {\n diag_s = diag(diag_s, length(diag_s))\n }\n SigmaInv_s = solve(Sigma, diag_s)\n Sigma_k = 2 * diag_s - diag_s %*% SigmaInv_s\n chol_k = chol(Sigma_k)\n """\n )\n', (3218, 3671), True, 'import rpy2.robjects as rpy\n'), ((4217, 4236), 'rpy2.robjects.numpy2ri.activate', 'numpy2ri.activate', ([], {}), '()\n', (4234, 4236), False, 'from rpy2.robjects import numpy2ri\n'), ((4241, 4269), 'rpy2.robjects.r', 'rpy.r', (["('set.seed(%d)' % seed)"], {}), "('set.seed(%d)' % seed)\n", (4246, 4269), True, 'import rpy2.robjects as rpy\n'), ((4338, 4361), 'rpy2.robjects.r', 'rpy.r', (['"""X=as.matrix(X)"""'], {}), "('X=as.matrix(X)')\n", (4343, 4361), True, 'import rpy2.robjects as rpy\n'), ((4366, 4390), 'rpy2.robjects.r', 'rpy.r', (['"""Y=as.numeric(Y)"""'], {}), "('Y=as.numeric(Y)')\n", (4371, 4390), True, 'import rpy2.robjects as rpy\n'), ((4395, 4415), 'rpy2.robjects.r', 'rpy.r', (['"""set.seed(1)"""'], {}), "('set.seed(1)')\n", (4400, 4415), True, 'import rpy2.robjects as rpy\n'), ((4420, 4484), 'rpy2.robjects.r', 'rpy.r', (['"""G = cv.glmnet(X, Y, intercept=FALSE, standardize=FALSE)"""'], {}), "('G = cv.glmnet(X, Y, intercept=FALSE, standardize=FALSE)')\n", (4425, 4484), True, 'import rpy2.robjects as rpy\n'), ((4489, 4519), 'rpy2.robjects.r', 'rpy.r', (['"""L = G[[\'lambda.min\']]"""'], {}), '("L = G[[\'lambda.min\']]")\n', (4494, 4519), True, 'import rpy2.robjects as rpy\n'), ((4524, 4555), 'rpy2.robjects.r', 'rpy.r', (['"""L1 = G[[\'lambda.1se\']]"""'], {}), '("L1 = G[[\'lambda.1se\']]")\n', (4529, 4555), True, 'import rpy2.robjects as rpy\n'), ((4564, 4574), 'rpy2.robjects.r', 'rpy.r', (['"""L"""'], {}), "('L')\n", (4569, 4574), True, 'import rpy2.robjects as rpy\n'), ((4584, 4595), 'rpy2.robjects.r', 'rpy.r', (['"""L1"""'], {}), "('L1')\n", (4589, 4595), True, 'import rpy2.robjects as rpy\n'), ((4600, 4621), 'rpy2.robjects.numpy2ri.deactivate', 'numpy2ri.deactivate', ([], {}), '()\n', (4619, 4621), False, 'from rpy2.robjects import numpy2ri\n'), ((533, 591), 'warnings.warn', 'warnings.warn', (['"""rpy2 with knockoff and glmnet unavailable"""'], {}), "('rpy2 with knockoff and glmnet unavailable')\n", (546, 591), False, 'import os, glob, tempfile, warnings\n'), ((2187, 2206), 'rpy2.robjects.numpy2ri.activate', 'numpy2ri.activate', ([], {}), '()\n', (2204, 2206), False, 'from rpy2.robjects import numpy2ri\n'), ((2298, 2323), 'rpy2.robjects.r', 'rpy.r', (['"""X = as.matrix(X)"""'], {}), "('X = as.matrix(X)')\n", (2303, 2323), True, 'import rpy2.robjects as rpy\n'), ((2332, 2358), 'rpy2.robjects.r', 'rpy.r', (['"""Y = as.numeric(Y)"""'], {}), "('Y = as.numeric(Y)')\n", (2337, 2358), True, 'import rpy2.robjects as rpy\n'), ((2367, 2395), 'rpy2.robjects.r', 'rpy.r', (["('set.seed(%d)' % seed)"], {}), "('set.seed(%d)' % seed)\n", (2372, 2395), True, 'import rpy2.robjects as rpy\n'), ((2404, 2470), 'rpy2.robjects.r', 'rpy.r', (['"""cvG = cv.glmnet(X, Y, intercept=FALSE, standardize=FALSE)"""'], {}), "('cvG = cv.glmnet(X, Y, intercept=FALSE, standardize=FALSE)')\n", (2409, 2470), True, 'import rpy2.robjects as rpy\n'), ((2479, 2512), 'rpy2.robjects.r', 'rpy.r', (['"""L1 = cvG[[\'lambda.min\']]"""'], {}), '("L1 = cvG[[\'lambda.min\']]")\n', (2484, 2512), True, 'import rpy2.robjects as rpy\n'), ((2521, 2554), 'rpy2.robjects.r', 'rpy.r', (['"""L2 = cvG[[\'lambda.1se\']]"""'], {}), '("L2 = cvG[[\'lambda.1se\']]")\n', (2526, 2554), True, 'import rpy2.robjects as rpy\n'), ((2655, 2716), 'rpy2.robjects.r', 'rpy.r', (['"""G = glmnet(X, Y, intercept=FALSE, standardize=FALSE)"""'], {}), "('G = glmnet(X, Y, intercept=FALSE, standardize=FALSE)')\n", (2660, 2716), True, 'import rpy2.robjects as rpy\n'), ((2757, 2767), 'rpy2.robjects.r', 'rpy.r', (['"""L"""'], {}), "('L')\n", (2762, 2767), True, 'import rpy2.robjects as rpy\n'), ((2776, 2839), 'rpy2.robjects.r', 'rpy.r', (['"""B = as.numeric(coef(G, s=L, exact=TRUE, x=X, y=Y))[-1]"""'], {}), "('B = as.numeric(coef(G, s=L, exact=TRUE, x=X, y=Y))[-1]')\n", (2781, 2839), True, 'import rpy2.robjects as rpy\n'), ((3693, 3708), 'rpy2.robjects.r', 'rpy.r', (['"""chol_k"""'], {}), "('chol_k')\n", (3698, 3708), True, 'import rpy2.robjects as rpy\n'), ((3738, 3757), 'rpy2.robjects.r', 'rpy.r', (['"""SigmaInv_s"""'], {}), "('SigmaInv_s')\n", (3743, 3757), True, 'import rpy2.robjects as rpy\n'), ((3783, 3798), 'rpy2.robjects.r', 'rpy.r', (['"""diag_s"""'], {}), "('diag_s')\n", (3788, 3798), True, 'import rpy2.robjects as rpy\n'), ((2582, 2597), 'rpy2.robjects.r', 'rpy.r', (['"""L = L1"""'], {}), "('L = L1')\n", (2587, 2597), True, 'import rpy2.robjects as rpy\n'), ((2624, 2646), 'rpy2.robjects.r', 'rpy.r', (['"""L = 0.99 * L2"""'], {}), "('L = 0.99 * L2')\n", (2629, 2646), True, 'import rpy2.robjects as rpy\n'), ((2863, 2873), 'rpy2.robjects.r', 'rpy.r', (['"""B"""'], {}), "('B')\n", (2868, 2873), True, 'import rpy2.robjects as rpy\n'), ((1630, 1646), 'numpy.linalg.inv', 'np.linalg.inv', (['Q'], {}), '(Q)\n', (1643, 1646), True, 'import numpy as np\n'), ((2946, 2966), 'numpy.nonzero', 'np.nonzero', (['selected'], {}), '(selected)\n', (2956, 2966), True, 'import numpy as np\n'), ((3864, 3882), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (3880, 3882), False, 'import os, glob, tempfile, warnings\n')]
|
import numpy as np
import warnings
from copy import deepcopy
from scipy.signal import fftconvolve, medfilt
import astropy.units as u
import astropy.constants as cst
from astropy.io import fits, registry
from astropy.wcs import WCS
from astropy.nddata import NDDataArray, StdDevUncertainty, InverseVariance
from astropy.nddata.ccddata import _known_uncertainties
from astropy.nddata.ccddata import _unc_name_to_cls, _unc_cls_to_name, _uncertainty_unit_equivalent_to_parent
def forman(M):
"""Return Forman window.
The Forman window is defined in (E-4) [1]_.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
numpy.bartlett, numpy.blackman, numpy.hamming, numpy.kaiser, numpy.hanning
References
----------
..[1] <NAME>., (2005) Spectral Characterization of the Herschel SPIRE
Photometer, 2005MsT..........1S
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, float)
n = np.arange(0, M)
return (1 - ((n - M / 2) / M) ** 2) ** 2
class FTSData(NDDataArray):
"""Class to handle OPD or spectral FTS cubes.
Parameters
----------
data : `~numpy.ndarray` or `FTSData`
The actual data contained in this `FTSData` object. Not that this
will always be copies by *reference* , so you should make copy
the ``data`` before passing it in if that's the desired behavior.
uncertainty : `~astropy.nddata.NDUncertainty`, optional
Uncertainties on the data.
mask : `~numpy.ndarray`-like, optional
Mask for the data, given as a boolean Numpy array or any object that
can be converted to a boolean Numpy array with a shape
matching that of the data. The values must be ``False`` where
the data is *valid* and ``True`` when it is not (like Numpy
masked arrays). If ``data`` is a numpy masked array, providing
``mask`` here will causes the mask from the masked array to be
ignored.
hits : `~numpy.ndarray`-like, optional
Hit map for the data, given as a int Numpy array or any object that
can be converted to a int Numpy array with a shape
matching that of the data.
flags : `~numpy.ndarray`-like or `~astropy.nddata.FlagCollection`, optional
Flags giving information about each pixel. These can be specified
either as a Numpy array of any type (or an object which can be converted
to a Numpy array) with a shape matching that of the
data, or as a `~astropy.nddata.FlagCollection` instance which has a
shape matching that of the data.
wcs : `~astropy.wcs.WCS`, optional
WCS-object containing the world coordinate system for the data.
meta : `dict`-like object, optional
Metadata for this object. "Metadata" here means all information that
is included with this object but not part of any other attribute
of this particular object. e.g., creation date, unique identifier,
simulation parameters, exposure time, telescope name, etc.
unit : `~astropy.units.UnitBase` instance or str, optional
The units of the data.
"""
__opd_idx = None
__freq_idx = None
hits = None
def __init__(self, *args, hits=None, **kwargs):
# Initialize with the parent...
super().__init__(*args, **kwargs)
# Additionnal data
if hits is not None:
self.hits = np.array(hits).astype(int)
# Set Internal indexes on the wcs object
if self.wcs is not None:
opd_idx = np.argwhere("opd" == np.char.lower(self.wcs.wcs.ctype)).squeeze()
self.__opd_idx = opd_idx.item() if opd_idx.size == 1 else None
freq_idx = np.argwhere("freq" == np.char.lower(self.wcs.wcs.ctype)).squeeze()
self.__freq_idx = freq_idx.item() if freq_idx.size == 1 else None
@property
def __is_opd(self):
return self.__opd_idx is not None
@property
def __is_freq(self):
return self.__freq_idx is not None
@property
def opd_axis(self):
if self.__is_opd:
return self.wcs.sub([self.__opd_idx + 1]).pixel_to_world(np.arange(self.shape[0]))
@property
def spectral_axis(self):
if self.__is_freq:
return self.wcs.sub([self.__freq_idx + 1]).pixel_to_world(np.arange(self.shape[0]))
@property
def _is_doublesided(self):
"""Return True is the cube is double sided, also enforce positive increments."""
return (np.sum(self.wcs.sub([self.__opd_idx + 1]).all_pix2world([0, self.shape[0] - 1], 0)) == 0) & (
self.wcs.wcs.cdelt[self.__opd_idx] > 0
)
@property
def _is_onesided(self):
"""Return True is the cube is one sided, also enforce positive increments."""
return (np.sum(self.wcs.sub([self.__opd_idx + 1]).all_pix2world(0, 0)) == 0) & (
self.wcs.wcs.cdelt[self.__opd_idx] > 0
)
# from CCDData
def _slice_wcs(self, item):
"""
Override the WCS slicing behaviour so that the wcs attribute continues
to be an `astropy.wcs.WCS`.
"""
if self.wcs is None:
return None
try:
return self.wcs[item]
except Exception as err:
self._handle_wcs_slicing_error(err, item)
def _extract_doublesided(self):
"""Return the largest doublesided OPD cube from the data.
Returns
-------
output : FTSData
A doublesided interferograms cube
"""
assert self.__is_opd, "Intput should be OPD cube"
opd_wcs = self.wcs.sub([self.__opd_idx + 1])
opds = opd_wcs.all_pix2world(np.arange(self.data.shape[0]), 0)[0]
_maxopd = np.min([-opds.min(), opds.max()])
signed = np.sign(opd_wcs.wcs.cdelt[0])
slice_idx = opd_wcs.all_world2pix([-signed * _maxopd, signed * _maxopd], 0)[0].astype(int)
slice_idx += [0, 1] # Inclusive end
_slice = slice(*slice_idx)
wcs = deepcopy(self.wcs)
wcs.wcs.crpix[self.__opd_idx] -= _slice.start
meta = deepcopy(self.meta)
meta["HISTORY"] = "extract_doublesided"
mask = self.mask[_slice] if self.mask is not None else None
hits = self.hits[_slice] if self.hits is not None else None
result = self.__class__(self.data[_slice], wcs=wcs, mask=mask, meta=meta, hits=hits)
return result
def _to_onesided(self):
"""Return a onesided OPD cube from the data.
Returns
-------
output : FTSData
A onesided interferograms cube
"""
zpd_idx = self.wcs.sub([self.__opd_idx + 1]).world_to_pixel(0 * self.wcs.wcs.cunit[self.__opd_idx]).astype(int)
extrema_opd = np.abs(self.wcs.sub([self.__opd_idx + 1]).pixel_to_world([0, self.shape[0] - 1]))
if extrema_opd[1] >= extrema_opd[0]:
# Positive single sided : longer right hand side...
# Or doublesided
extract_slice = slice(zpd_idx, None)
os_slice = slice(0, zpd_idx + 1)
db_slice = slice(zpd_idx, None, -1)
elif extrema_opd[1] < extrema_opd[0]:
# Negative single sided : longer left hand side...
# Or double sided
extract_slice = slice(zpd_idx, None, -1)
os_slice = slice(0, self.data.shape[0] - zpd_idx)
db_slice = slice(zpd_idx, None)
# TODO: self.mask ??
# Extract the longest part
onesided_itg = self.data[extract_slice].copy()
onesided_hits = self.hits[extract_slice].copy() if self.hits is not None else None
# Take the mean with the other half on the double sided part
onesided_itg[os_slice] += self.data[db_slice]
onesided_itg[os_slice] /= 2
if onesided_hits is not None:
onesided_hits[os_slice] += self.hits[db_slice]
onesided_hits[os_slice] /= 2
wcs = deepcopy(self.wcs)
wcs.wcs.crpix[self.__opd_idx] = 1
output = FTSData(onesided_itg, wcs=wcs, meta=self.meta, hits=onesided_hits)
return output
def __invert_doublesided(self, apodization_function=None):
"""Invert a doublesided interferograms cube.
Parameters
----------
apodization_function : func
Apodization function to be used on the interferograms (default: None)
Returns
-------
output : FTSData
The corresponding spectral cube
Notes
-----
Choice can be made among the function available in numpy at [1]_, namely
`numpy.hanning`, `numpy.hamming`, `numpy.bartlett`, `numpy.blackman`, `numpy.kaiser`
or any custom routine following the same convention.
References
----------
.. [1] https://docs.scipy.org/doc/numpy/reference/routines.window.html
"""
assert self.__is_opd, "Intput should be OPD cube"
assert self._is_doublesided, "Not a doublesided interferogram cube"
cdelt_opd = self.wcs.wcs.cdelt[self.__opd_idx]
cunit_opd = u.Unit(self.wcs.wcs.cunit[self.__opd_idx])
naxis_opd = self.shape[0]
# freq = np.fft.fftfreq(naxis_opd, d=cdelt_opd * cunit_opd) * cst.c
if apodization_function is None:
apodization_function = np.ones
_cube = np.ma.array(self.data, mask=self.mask).filled(0) * np.expand_dims(
apodization_function(naxis_opd), tuple(np.arange(1, self.ndim))
)
# Spencer 2005 Eq 2.29, direct fft
spectra = np.fft.fft(np.fft.ifftshift(_cube, axes=0), axis=0)
# Factor of 2 because we used the fourier transform
spectra *= (4 * cdelt_opd * cunit_opd).decompose().value
spectra = np.fft.fftshift(spectra, axes=0)
# freq = np.fft.fftshift(freq)
# Build new wcs
wcs = deepcopy(self.wcs)
wcs.wcs.ctype[self.__opd_idx] = "FREQ"
wcs.wcs.cunit[self.__opd_idx] = "Hz"
# TODO: (cst.c / (cdelt_opd * cunit_opd) / (naxis_opd-1)).to(u.Hz).value give the 1/2L resolution, but fails in the tests
wcs.wcs.cdelt[self.__opd_idx] = (cst.c / (cdelt_opd * cunit_opd) / naxis_opd).to(u.Hz).value
wcs.wcs.crpix[self.__opd_idx] = (naxis_opd - 1) / 2 + 1
wcs.wcs.crval[self.__opd_idx] = 0
# TODO: Estimate uncertainty/hits
output = FTSData(spectra, meta=self.meta, wcs=wcs)
return output
def __invert_onesided(self, apodization_function=None):
"""Invert a onesided interferograms cube.
Parameters
----------
apodization_function : func
Apodization function to be used on the interferograms (default: None)
Returns
-------
output : FTSData
The corresponding spectral cube
Notes
-----
Choice can be made among the function available in numpy at [1]_, namely
`numpy.hanning`, `numpy.hamming`, `numpy.bartlett`, `numpy.blackman`, `numpy.kaiser`
or any custom routine following the same convention.
.. [1] https://docs.scipy.org/doc/numpy/reference/routines.window.html
"""
assert self.__is_opd, "Intput should be OPD cube"
assert self._is_onesided, "Not a one sided interferogram cube"
cdelt_opd = self.wcs.wcs.cdelt[self.__opd_idx]
cunit_opd = u.Unit(self.wcs.wcs.cunit[self.__opd_idx])
naxis_opd = self.shape[0]
if apodization_function is None:
apodization_function = np.ones
_cube = np.ma.array(self.data, mask=self.mask).filled(0) * np.expand_dims(
apodization_function(2 * naxis_opd)[naxis_opd:], tuple(np.arange(1, self.ndim))
)
# Spencer 2005 Eq 2.29, direct fft
# Trick is to use the unnormalized irfft
output_shape = 2 * naxis_opd - 1
spectra = np.fft.irfft(_cube, n=output_shape, axis=0) * output_shape
# Factor of 2 because we used the fourier transform
spectra *= (4 * cdelt_opd * cunit_opd).decompose().value
spectra = np.fft.fftshift(spectra, axes=0)
# Build new wcs
wcs = deepcopy(self.wcs)
wcs.wcs.ctype[self.__opd_idx] = "FREQ"
wcs.wcs.cunit[self.__opd_idx] = "Hz"
# (cst.c / (cdelt_opd * cunit_opd) / (output_shape-1)).to(u.Hz).value give the 1/2L resolution, but fails in the tests
wcs.wcs.cdelt[self.__opd_idx] = (cst.c / (cdelt_opd * cunit_opd) / output_shape).to(u.Hz).value
wcs.wcs.crpix[self.__opd_idx] = naxis_opd
wcs.wcs.crval[self.__opd_idx] = 0
# TODO: Estimate uncertainty/hits
output = FTSData(spectra, meta=self.meta, wcs=wcs)
return output
def _get_phase_correction_function(
self,
niter=1,
doublesided_apodization=None,
medfilt_size=None,
deg=None,
fitting_func="polynomial",
pcf_apodization=None,
plot=False,
**kwargs
):
"""Compute the phase correction function for the current cube
This follow the description in [1]_ with some additionnal features.
Parameters
----------
niter : [int], optional
number of iterations, by default 1
doublesided_apodization : [function], optional
apodization function for the double sided inversion, by default None, but see Notes
medfilt_size : [int], optional
size of the median filtering window to be applied (before polynomial fitting), by default None
deg : [int], optional
the polynomial degree to fit to the phase, by default None
fitting_func : [str], ("polynomial"|"chebysev"), optional
fitting function class, either polynomial or chebyshev, by default, "polynomial"
pcf_apodization : [function], optional
apodization function for the phase correction function, by default None
plot : bool, optional
diagnostic plots, by default False
Returns
-------
array_like (cube shape)
the phase correction function to be used as convolution kernel for the interferograms
Notes
-----
Choice of apodization function can be made among the function available in numpy at [2]_, namely
`numpy.hanning`, `numpy.hamming`, `numpy.bartlett`, `numpy.blackman`, `numpy.kaiser`
or any custom routine following the same convention.
References
----------
.. [1] <NAME>., (2005) Spectral Characterization of the Herschel SPIRE
Photometer, 2005MsT..........1S
"""
if pcf_apodization is None:
pcf_apodization = np.ones
# Working copy
itg = deepcopy(self._extract_doublesided())
# Reference iterferogram
itg_ma = np.ma.array(itg.data, mask=itg.mask, copy=True).filled(0)
# Null starting phase (take only the upper part)
phase = np.zeros(((itg.shape[0] - 1) // 2 + 1, *itg.shape[1:]))
# Loop Here
for i in range(niter):
cube = itg._FTSData__invert_doublesided(apodization_function=doublesided_apodization)
# Spencer 2.39 , well actually phases are -pi/pi so arctan2 or angle
_phase = np.angle(cube.data[(itg.shape[0] - 1) // 2 :])
# Replace bad phase :
_phase[np.isnan(_phase)] = 0
if plot:
import matplotlib.pyplot as plt
fig, axes = plt.subplots(ncols=4)
(freq,) = cube.wcs.sub([self.__opd_idx + 1]).all_pix2world(np.arange(cube.shape[0]), 0)
axes[1].plot(freq, cube.data[:, :, 0])
axes[2].plot(freq, _phase[:, :, 0])
if medfilt_size is not None:
# Median filtering of the phases
_phase = medfilt(_phase, kernel_size=(medfilt_size, *(1,) * (len(itg.shape) - 1)))
if deg is not None:
if fitting_func == "polynomial":
polyfit, polyval = np.polynomial.polynomial.polyfit, np.polynomial.polynomial.polyval
elif fitting_func == "chebychev":
polyfit, polyval = np.polynomial.chebyshev.chebfit, np.polynomial.chebyshev.chebval
else:
raise ValueError('fitting_func should be in ("polynomial"|"chebychev")')
# polynomial fit on the phase, weighted by the intensity
p = []
idx = np.linspace(0, 1, _phase.shape[0])
# np.polynomail.polynomial.polyfit do not accept a (`M`, `K`) array for the weights, so need to loop....
for spec, weight in zip(
_phase.reshape(_phase.shape[0], -1).T,
np.abs(cube.data[(itg.shape[0] - 1) // 2 :]).reshape(_phase.shape[0], -1).T,
):
p.append(polyfit(idx, spec, deg, w=weight))
p = np.asarray(p).T
# evaluate the polynomal all at once :
_phase = polyval(idx, p).T.reshape(_phase.shape)
# Wrap back the phases to -pi pi, uncessary, but just in case
_phase = (_phase + np.pi) % (2 * np.pi) - np.pi
"""
fit data also incorporates smoothing in the
out of band region to ensure zero phase and derivative discontinuities and zero amplitude at
zero and Nyquist frequency.
"""
if plot:
axes[2].plot(freq, _phase[:, :, 0], linestyle="--")
phase += _phase
# Spencer 3.30
# Using rfft leads pure real pcf and strangely could lead to wrong results
# phase_correction_function = np.fft.irfft(np.exp(-1j * phase), axis=0, n=2*(phase.shape[0]-1)+1)
phase_correction_function = np.fft.ifft(
np.exp(-1j * np.fft.fftshift(np.concatenate([-phase[:0:-1], phase]), axes=0)), axis=0
)
# Apodization of the PCF along the first axis
phase_correction_function = (
np.fft.fftshift(phase_correction_function, axes=0).T
* pcf_apodization(phase_correction_function.shape[0])
).T
if plot:
(x,) = itg.wcs.sub([3]).all_pix2world(np.arange(itg.shape[0]), 0)
axes[3].plot(x, phase_correction_function[:, :, 0])
axes[3].set_xlim(-1, 1)
axes[0].plot(x, itg.data[:, :, 0])
axes[0].set_xlim(-1, 1)
# Correct the initial dataset with the current phase for the next iteration
corrected_itg = fftconvolve(itg_ma, phase_correction_function, mode="same", axes=0).real
itg.data[:] = corrected_itg
return phase_correction_function
def to_spectra(self, onesided_apodization=None, **kwargs):
"""Invert an interferograms cube using the (enhanced) Forman method.
This follow the description in [1]_.
Parameters
----------
onesided_apodization : [function], optional
apodization function to be used on the one sided interferograms, by default None
niter : [int], optional
number of iterations, by default 1
doublesided_apodization : [function], optional
apodization function for the double sided inversion, by default None, but see Notes
medfilt_size : [int], optional
size of the median filtering window to be applied (before polynomial fitting), by default None
deg : [int], optional
the polynomial degree to fit to the phase, by default None
pcf_apodization : [function], optional
apodization function for the phase correction function, by default None
Returns
-------
output : FTSData
The corresponding spectral cube
Notes
-----
Choice of apodization function can be made among the function available in numpy at [2]_, namely
`numpy.hanning`, `numpy.hamming`, `numpy.bartlett`, `numpy.blackman`, `numpy.kaiser`
or any custom routine following the same convention.
References
----------
.. [1] <NAME>., (2005) Spectral Characterization of the Herschel SPIRE
Photometer, 2005MsT..........1S
.. [2] https://docs.scipy.org/doc/numpy/reference/routines.window.html
"""
phase_correction_function = self._get_phase_correction_function(**kwargs)
# Convolved the interferograms and hits
itg = np.ma.array(self.data, mask=self.mask).filled(0)
corrected_itg = fftconvolve(itg, phase_correction_function, mode="same", axes=0).real
corrected_hits = None
if self.hits is not None:
hits = np.ma.array(self.hits, mask=self.mask).filled(0)
corrected_hits = fftconvolve(hits, phase_correction_function, mode="same", axes=0).real
corrected = FTSData(corrected_itg, wcs=self.wcs, hits=corrected_hits)
onesided = corrected._to_onesided()
return onesided.__invert_onesided(apodization_function=onesided_apodization)
def to_hdu(
self,
hdu_mask="MASK",
hdu_uncertainty="UNCERT",
hdu_hits="HITS",
hdu_flags=None,
wcs_relax=True,
key_uncertainty_type="UTYPE",
):
"""Creates an HDUList object from a FTSData object.
Parameters
----------
hdu_mask, hdu_uncertainty, hdu_flags, hdu_hits : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty, ``'HITS'`` for hits and
``None`` for flags.
wcs_relax : bool
Value of the ``relax`` parameter to use in converting the WCS to a
FITS header using `~astropy.wcs.WCS.to_header`. The common
``CTYPE`` ``RA---TAN-SIP`` and ``DEC--TAN-SIP`` requires
``relax=True`` for the ``-SIP`` part of the ``CTYPE`` to be
preserved.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
Raises
-------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a astropy uncertainty type.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
if isinstance(self.meta, fits.Header):
# Copy here so that we can modify the HDU header by adding WCS
# information without changing the header of the CCDData object.
header = self.meta.copy()
else:
header = fits.Header(self.meta)
if self.unit is not None and self.unit is not u.dimensionless_unscaled:
header["bunit"] = self.unit.to_string()
if self.wcs:
# Simply extending the FITS header with the WCS can lead to
# duplicates of the WCS keywords; iterating over the WCS
# header should be safer.
#
# Turns out if I had read the io.fits.Header.extend docs more
# carefully, I would have realized that the keywords exist to
# avoid duplicates and preserve, as much as possible, the
# structure of the commentary cards.
#
# Note that until astropy/astropy#3967 is closed, the extend
# will fail if there are comment cards in the WCS header but
# not header.
wcs_header = self.wcs.to_header(relax=wcs_relax)
header.extend(wcs_header, useblanks=False, update=True)
hdus = [fits.PrimaryHDU(self.data, header)]
if hdu_mask and self.mask is not None:
# Always assuming that the mask is a np.ndarray (check that it has
# a 'shape').
if not hasattr(self.mask, "shape"):
raise ValueError("only a numpy.ndarray mask can be saved.")
# Convert boolean mask to uint since io.fits cannot handle bool.
hduMask = fits.ImageHDU(self.mask.astype(np.uint8), name=hdu_mask)
hdus.append(hduMask)
if hdu_uncertainty and self.uncertainty is not None:
# We need to save some kind of information which uncertainty was
# used so that loading the HDUList can infer the uncertainty type.
# No idea how this can be done so only allow StdDevUncertainty.
uncertainty_cls = self.uncertainty.__class__
if uncertainty_cls not in _known_uncertainties:
raise ValueError("only uncertainties of type {} can be saved.".format(_known_uncertainties))
uncertainty_name = _unc_cls_to_name[uncertainty_cls]
hdr_uncertainty = fits.Header()
hdr_uncertainty[key_uncertainty_type] = uncertainty_name
# Assuming uncertainty is an StdDevUncertainty save just the array
# this might be problematic if the Uncertainty has a unit differing
# from the data so abort for different units. This is important for
# astropy > 1.2
if hasattr(self.uncertainty, "unit") and self.uncertainty.unit is not None and self.unit is not None:
if not _uncertainty_unit_equivalent_to_parent(uncertainty_cls, self.uncertainty.unit, self.unit):
raise ValueError(
"saving uncertainties with a unit that is not "
"equivalent to the unit from the data unit is not "
"supported."
)
hduUncert = fits.ImageHDU(self.uncertainty.array, hdr_uncertainty, name=hdu_uncertainty)
hdus.append(hduUncert)
if hdu_hits and self.hits is not None:
# Always assuming that the mask is a np.ndarray (check that it has
# a 'shape').
if not hasattr(self.hits, "shape"):
raise ValueError("only a numpy.ndarray hits can be saved.")
# Convert boolean mask to uint since io.fits cannot handle bool.
hduHits = fits.ImageHDU(self.hits.astype(np.uint16), name=hdu_hits)
hdus.append(hduHits)
if hdu_flags and self.flags:
raise NotImplementedError("adding the flags to a HDU is not " "supported at this time.")
hdulist = fits.HDUList(hdus)
return hdulist
@classmethod
def from_array(cls, opd, data, hits=None, mask=None, **kwargs):
"""Construct FTS data from arrays.
Parameters
----------
opd : array_like or Quantity (M,)
the optical path difference, by default 'mm'
data : array_like (M, *)
the corresponding data, first dimension must match opd
hits : array_like, optionnal
the corresponding hits
mask : array_like, optionnal
the corresponding mask
Returns
-------
data : FTSData
the corresponding FTSData objects
"""
naxis = len(data.shape)
wcs = WCS(naxis=naxis)
if not isinstance(opd, u.Quantity):
opd = u.Quantity(opd, "mm")
zpd_idx = np.argmin(np.abs(opd))
if opd[zpd_idx] != 0:
print("Shifting opd by {} for 0".format(opd[zpd_idx]))
opd -= opd[zpd_idx]
dpd = np.diff(opd)
np.testing.assert_almost_equal(
np.median(dpd).to(dpd.unit).value, dpd.value, err_msg="Problem on opd differences"
)
wcs.wcs.ctype[naxis - 1] = "OPD"
wcs.wcs.cunit[naxis - 1] = opd.unit
wcs.wcs.crpix[naxis - 1] = zpd_idx + 1
wcs.wcs.crval[naxis - 1] = opd[zpd_idx].value
wcs.wcs.cdelt[naxis - 1] = np.median(dpd).value
if mask is None:
mask = False
return cls(data, wcs=wcs, hits=hits, mask=mask | np.isnan(data), **kwargs)
def fits_ftsdata_writer(
fts_data,
filename,
hdu_mask="MASK",
hdu_uncertainty="UNCERT",
hdu_hits="HITS",
hdu_flags=None,
key_uncertainty_type="UTYPE",
**kwd
):
"""
Write CCDData object to FITS file.
Parameters
----------
filename : str
Name of file.
hdu_mask, hdu_uncertainty, hdu_hits, hdu_flags : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty ``'HITS'`` for hits and
``None`` for flags.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
kwd :
All additional keywords are passed to :py:mod:`astropy.io.fits`
Raises
-------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a
`~astropy.nddata.StdDevUncertainty`.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
"""
hdu = fts_data.to_hdu(
hdu_mask=hdu_mask,
hdu_uncertainty=hdu_uncertainty,
hdu_hits=hdu_hits,
key_uncertainty_type=key_uncertainty_type,
hdu_flags=hdu_flags,
)
hdu.writeto(filename, **kwd)
with registry.delay_doc_updates(FTSData):
# registry.register_reader('fits', CCDData, fits_ccddata_reader)
registry.register_writer("fits", FTSData, fits_ftsdata_writer)
registry.register_identifier("fits", FTSData, fits.connect.is_fits)
|
[
"numpy.abs",
"numpy.angle",
"astropy.io.fits.PrimaryHDU",
"numpy.ones",
"numpy.isnan",
"astropy.io.fits.Header",
"numpy.arange",
"scipy.signal.fftconvolve",
"astropy.io.fits.HDUList",
"astropy.io.fits.ImageHDU",
"numpy.fft.ifftshift",
"astropy.io.registry.register_writer",
"numpy.fft.irfft",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"copy.deepcopy",
"astropy.units.Quantity",
"astropy.io.registry.register_identifier",
"numpy.median",
"astropy.io.registry.delay_doc_updates",
"numpy.asarray",
"numpy.fft.fftshift",
"numpy.char.lower",
"astropy.nddata.ccddata._uncertainty_unit_equivalent_to_parent",
"astropy.units.Unit",
"numpy.concatenate",
"numpy.zeros",
"astropy.wcs.WCS",
"numpy.ma.array",
"numpy.diff",
"numpy.array",
"numpy.sign"
] |
[((1238, 1253), 'numpy.arange', 'np.arange', (['(0)', 'M'], {}), '(0, M)\n', (1247, 1253), True, 'import numpy as np\n'), ((30442, 30477), 'astropy.io.registry.delay_doc_updates', 'registry.delay_doc_updates', (['FTSData'], {}), '(FTSData)\n', (30468, 30477), False, 'from astropy.io import fits, registry\n'), ((30555, 30617), 'astropy.io.registry.register_writer', 'registry.register_writer', (['"""fits"""', 'FTSData', 'fits_ftsdata_writer'], {}), "('fits', FTSData, fits_ftsdata_writer)\n", (30579, 30617), False, 'from astropy.io import fits, registry\n'), ((30622, 30689), 'astropy.io.registry.register_identifier', 'registry.register_identifier', (['"""fits"""', 'FTSData', 'fits.connect.is_fits'], {}), "('fits', FTSData, fits.connect.is_fits)\n", (30650, 30689), False, 'from astropy.io import fits, registry\n'), ((1169, 1181), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1177, 1181), True, 'import numpy as np\n'), ((1212, 1229), 'numpy.ones', 'np.ones', (['(1)', 'float'], {}), '(1, float)\n', (1219, 1229), True, 'import numpy as np\n'), ((6077, 6106), 'numpy.sign', 'np.sign', (['opd_wcs.wcs.cdelt[0]'], {}), '(opd_wcs.wcs.cdelt[0])\n', (6084, 6106), True, 'import numpy as np\n'), ((6301, 6319), 'copy.deepcopy', 'deepcopy', (['self.wcs'], {}), '(self.wcs)\n', (6309, 6319), False, 'from copy import deepcopy\n'), ((6390, 6409), 'copy.deepcopy', 'deepcopy', (['self.meta'], {}), '(self.meta)\n', (6398, 6409), False, 'from copy import deepcopy\n'), ((8235, 8253), 'copy.deepcopy', 'deepcopy', (['self.wcs'], {}), '(self.wcs)\n', (8243, 8253), False, 'from copy import deepcopy\n'), ((9383, 9425), 'astropy.units.Unit', 'u.Unit', (['self.wcs.wcs.cunit[self.__opd_idx]'], {}), '(self.wcs.wcs.cunit[self.__opd_idx])\n', (9389, 9425), True, 'import astropy.units as u\n'), ((10048, 10080), 'numpy.fft.fftshift', 'np.fft.fftshift', (['spectra'], {'axes': '(0)'}), '(spectra, axes=0)\n', (10063, 10080), True, 'import numpy as np\n'), ((10159, 10177), 'copy.deepcopy', 'deepcopy', (['self.wcs'], {}), '(self.wcs)\n', (10167, 10177), False, 'from copy import deepcopy\n'), ((11662, 11704), 'astropy.units.Unit', 'u.Unit', (['self.wcs.wcs.cunit[self.__opd_idx]'], {}), '(self.wcs.wcs.cunit[self.__opd_idx])\n', (11668, 11704), True, 'import astropy.units as u\n'), ((12364, 12396), 'numpy.fft.fftshift', 'np.fft.fftshift', (['spectra'], {'axes': '(0)'}), '(spectra, axes=0)\n', (12379, 12396), True, 'import numpy as np\n'), ((12436, 12454), 'copy.deepcopy', 'deepcopy', (['self.wcs'], {}), '(self.wcs)\n', (12444, 12454), False, 'from copy import deepcopy\n'), ((15251, 15306), 'numpy.zeros', 'np.zeros', (['((itg.shape[0] - 1) // 2 + 1, *itg.shape[1:])'], {}), '(((itg.shape[0] - 1) // 2 + 1, *itg.shape[1:]))\n', (15259, 15306), True, 'import numpy as np\n'), ((27210, 27228), 'astropy.io.fits.HDUList', 'fits.HDUList', (['hdus'], {}), '(hdus)\n', (27222, 27228), False, 'from astropy.io import fits, registry\n'), ((27925, 27941), 'astropy.wcs.WCS', 'WCS', ([], {'naxis': 'naxis'}), '(naxis=naxis)\n', (27928, 27941), False, 'from astropy.wcs import WCS\n'), ((28214, 28226), 'numpy.diff', 'np.diff', (['opd'], {}), '(opd)\n', (28221, 28226), True, 'import numpy as np\n'), ((9864, 9895), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['_cube'], {'axes': '(0)'}), '(_cube, axes=0)\n', (9880, 9895), True, 'import numpy as np\n'), ((12162, 12205), 'numpy.fft.irfft', 'np.fft.irfft', (['_cube'], {'n': 'output_shape', 'axis': '(0)'}), '(_cube, n=output_shape, axis=0)\n', (12174, 12205), True, 'import numpy as np\n'), ((15561, 15606), 'numpy.angle', 'np.angle', (['cube.data[(itg.shape[0] - 1) // 2:]'], {}), '(cube.data[(itg.shape[0] - 1) // 2:])\n', (15569, 15606), True, 'import numpy as np\n'), ((20913, 20977), 'scipy.signal.fftconvolve', 'fftconvolve', (['itg', 'phase_correction_function'], {'mode': '"""same"""', 'axes': '(0)'}), "(itg, phase_correction_function, mode='same', axes=0)\n", (20924, 20977), False, 'from scipy.signal import fftconvolve, medfilt\n'), ((23537, 23559), 'astropy.io.fits.Header', 'fits.Header', (['self.meta'], {}), '(self.meta)\n', (23548, 23559), False, 'from astropy.io import fits, registry\n'), ((24504, 24538), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['self.data', 'header'], {}), '(self.data, header)\n', (24519, 24538), False, 'from astropy.io import fits, registry\n'), ((25623, 25636), 'astropy.io.fits.Header', 'fits.Header', ([], {}), '()\n', (25634, 25636), False, 'from astropy.io import fits, registry\n'), ((26472, 26548), 'astropy.io.fits.ImageHDU', 'fits.ImageHDU', (['self.uncertainty.array', 'hdr_uncertainty'], {'name': 'hdu_uncertainty'}), '(self.uncertainty.array, hdr_uncertainty, name=hdu_uncertainty)\n', (26485, 26548), False, 'from astropy.io import fits, registry\n'), ((28005, 28026), 'astropy.units.Quantity', 'u.Quantity', (['opd', '"""mm"""'], {}), "(opd, 'mm')\n", (28015, 28026), True, 'import astropy.units as u\n'), ((28056, 28067), 'numpy.abs', 'np.abs', (['opd'], {}), '(opd)\n', (28062, 28067), True, 'import numpy as np\n'), ((28595, 28609), 'numpy.median', 'np.median', (['dpd'], {}), '(dpd)\n', (28604, 28609), True, 'import numpy as np\n'), ((4443, 4467), 'numpy.arange', 'np.arange', (['self.shape[0]'], {}), '(self.shape[0])\n', (4452, 4467), True, 'import numpy as np\n'), ((4610, 4634), 'numpy.arange', 'np.arange', (['self.shape[0]'], {}), '(self.shape[0])\n', (4619, 4634), True, 'import numpy as np\n'), ((5969, 5998), 'numpy.arange', 'np.arange', (['self.data.shape[0]'], {}), '(self.data.shape[0])\n', (5978, 5998), True, 'import numpy as np\n'), ((15119, 15166), 'numpy.ma.array', 'np.ma.array', (['itg.data'], {'mask': 'itg.mask', 'copy': '(True)'}), '(itg.data, mask=itg.mask, copy=True)\n', (15130, 15166), True, 'import numpy as np\n'), ((15662, 15678), 'numpy.isnan', 'np.isnan', (['_phase'], {}), '(_phase)\n', (15670, 15678), True, 'import numpy as np\n'), ((15783, 15804), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(4)'}), '(ncols=4)\n', (15795, 15804), True, 'import matplotlib.pyplot as plt\n'), ((16783, 16817), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '_phase.shape[0]'], {}), '(0, 1, _phase.shape[0])\n', (16794, 16817), True, 'import numpy as np\n'), ((18914, 18981), 'scipy.signal.fftconvolve', 'fftconvolve', (['itg_ma', 'phase_correction_function'], {'mode': '"""same"""', 'axes': '(0)'}), "(itg_ma, phase_correction_function, mode='same', axes=0)\n", (18925, 18981), False, 'from scipy.signal import fftconvolve, medfilt\n'), ((20840, 20878), 'numpy.ma.array', 'np.ma.array', (['self.data'], {'mask': 'self.mask'}), '(self.data, mask=self.mask)\n', (20851, 20878), True, 'import numpy as np\n'), ((21145, 21210), 'scipy.signal.fftconvolve', 'fftconvolve', (['hits', 'phase_correction_function'], {'mode': '"""same"""', 'axes': '(0)'}), "(hits, phase_correction_function, mode='same', axes=0)\n", (21156, 21210), False, 'from scipy.signal import fftconvolve, medfilt\n'), ((3703, 3717), 'numpy.array', 'np.array', (['hits'], {}), '(hits)\n', (3711, 3717), True, 'import numpy as np\n'), ((9638, 9676), 'numpy.ma.array', 'np.ma.array', (['self.data'], {'mask': 'self.mask'}), '(self.data, mask=self.mask)\n', (9649, 9676), True, 'import numpy as np\n'), ((9756, 9779), 'numpy.arange', 'np.arange', (['(1)', 'self.ndim'], {}), '(1, self.ndim)\n', (9765, 9779), True, 'import numpy as np\n'), ((11841, 11879), 'numpy.ma.array', 'np.ma.array', (['self.data'], {'mask': 'self.mask'}), '(self.data, mask=self.mask)\n', (11852, 11879), True, 'import numpy as np\n'), ((11975, 11998), 'numpy.arange', 'np.arange', (['(1)', 'self.ndim'], {}), '(1, self.ndim)\n', (11984, 11998), True, 'import numpy as np\n'), ((15880, 15904), 'numpy.arange', 'np.arange', (['cube.shape[0]'], {}), '(cube.shape[0])\n', (15889, 15904), True, 'import numpy as np\n'), ((17240, 17253), 'numpy.asarray', 'np.asarray', (['p'], {}), '(p)\n', (17250, 17253), True, 'import numpy as np\n'), ((18570, 18593), 'numpy.arange', 'np.arange', (['itg.shape[0]'], {}), '(itg.shape[0])\n', (18579, 18593), True, 'import numpy as np\n'), ((21067, 21105), 'numpy.ma.array', 'np.ma.array', (['self.hits'], {'mask': 'self.mask'}), '(self.hits, mask=self.mask)\n', (21078, 21105), True, 'import numpy as np\n'), ((26111, 26205), 'astropy.nddata.ccddata._uncertainty_unit_equivalent_to_parent', '_uncertainty_unit_equivalent_to_parent', (['uncertainty_cls', 'self.uncertainty.unit', 'self.unit'], {}), '(uncertainty_cls, self.uncertainty.\n unit, self.unit)\n', (26149, 26205), False, 'from astropy.nddata.ccddata import _unc_name_to_cls, _unc_cls_to_name, _uncertainty_unit_equivalent_to_parent\n'), ((28725, 28739), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (28733, 28739), True, 'import numpy as np\n'), ((18355, 18405), 'numpy.fft.fftshift', 'np.fft.fftshift', (['phase_correction_function'], {'axes': '(0)'}), '(phase_correction_function, axes=0)\n', (18370, 18405), True, 'import numpy as np\n'), ((28279, 28293), 'numpy.median', 'np.median', (['dpd'], {}), '(dpd)\n', (28288, 28293), True, 'import numpy as np\n'), ((3856, 3889), 'numpy.char.lower', 'np.char.lower', (['self.wcs.wcs.ctype'], {}), '(self.wcs.wcs.ctype)\n', (3869, 3889), True, 'import numpy as np\n'), ((4022, 4055), 'numpy.char.lower', 'np.char.lower', (['self.wcs.wcs.ctype'], {}), '(self.wcs.wcs.ctype)\n', (4035, 4055), True, 'import numpy as np\n'), ((18167, 18205), 'numpy.concatenate', 'np.concatenate', (['[-phase[:0:-1], phase]'], {}), '([-phase[:0:-1], phase])\n', (18181, 18205), True, 'import numpy as np\n'), ((17059, 17102), 'numpy.abs', 'np.abs', (['cube.data[(itg.shape[0] - 1) // 2:]'], {}), '(cube.data[(itg.shape[0] - 1) // 2:])\n', (17065, 17102), True, 'import numpy as np\n')]
|
# coding=utf-8
name = "mattermost_handler"
import logging
import requests
logger = logging.getLogger(__name__)
class MattermostIncomeWebhookHandler(logging.Handler):
def __init__(self, url):
super(MattermostIncomeWebhookHandler, self).__init__()
self.url = url
if self.url is None:
logger.warning("Mattermost webhook url cannot be None")
def emit(self, record):
if self.url is not None:
requests.post(self.url, json={"text": self.format(record)})
|
[
"logging.getLogger"
] |
[((86, 113), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (103, 113), False, 'import logging\n')]
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 02_utils.ipynb (unless otherwise specified).
__all__ = ['load_camvid_dataset', 'display_segmentation', 'display_segmentation_from_file', 'CamvidDataset']
# Cell
import matplotlib.pyplot as plt
import os
import torch
import torchvision.transforms.functional as tf
from PIL import Image
# Cell
def load_camvid_dataset(data_directory):
with open(os.path.join(data_directory, "valid.txt"), "r") as f:
val_names = [line.strip() for line in f]
with open(os.path.join(data_directory, "codes.txt"), "r") as f:
label_mapping = {l.strip(): i for i, l in enumerate(f)}
data = []
image_index_mapping = {}
for im_f in os.listdir(os.path.join(data_directory, "images")):
if im_f.split('.')[-1] != 'png':
continue
image_index_mapping[im_f] = len(data)
fp = os.path.join(data_directory, "images", im_f)
data.append(fp)
for label_f in os.listdir(os.path.join(data_directory, "labels")):
im_f = label_f.split('.')
im_f[0] = '_'.join(im_f[0].split('_')[:-1])
im_f = '.'.join(im_f)
index = image_index_mapping[im_f]
fp = os.path.join(data_directory, "labels", label_f)
data[index] = (data[index], fp)
val_indices = [image_index_mapping[name] for name in val_names]
return data, val_indices, label_mapping
# Cell
def display_segmentation(image, target, ax=None):
if ax:
ax.imshow(image, cmap='gray')
else:
plt.imshow(image, cmap='gray')
if ax:
ax.imshow(target, cmap='jet', alpha=0.5)
else:
plt.imshow(target, cmap='jet', alpha=0.5)
plt.show()
def display_segmentation_from_file(im_f, label_f):
im, label = Image.open(im_f), Image.open(label_f)
display_segmentation(im, label)
# Cell
class CamvidDataset(torch.utils.data.Dataset):
def __init__(self, data, resize_shape=(360, 480), is_train=True):
self.images, self.labels = [tpl[0] for tpl in data], \
[tpl[1] for tpl in data]
self.resize_shape = resize_shape
self.is_train = is_train
def transform(self, index):
input, target = map(
Image.open, (self.images[index], self.labels[index]))
input, target = (
tf.resize(input, self.resize_shape),
tf.resize(target, self.resize_shape, interpolation=Image.NEAREST)
)
if self.is_train:
horizontal_draw = torch.rand(1).item()
vertical_draw = torch.rand(1).item()
if horizontal_draw > 0.5:
input, target = tf.hflip(input), tf.hflip(target)
if vertical_draw > 0.5:
input, target = tf.vflip(input), tf.vflip(target)
input, target = map(tf.to_tensor, (input, target))
torch.clamp((255 * target), 0, 32, out=target)
return tf.normalize(input, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), target.long()
def __getitem__(self, index):
return self.transform(index)
def __len__(self):
return len(self.images)
|
[
"matplotlib.pyplot.show",
"torchvision.transforms.functional.hflip",
"matplotlib.pyplot.imshow",
"torchvision.transforms.functional.resize",
"PIL.Image.open",
"torch.clamp",
"torchvision.transforms.functional.vflip",
"torch.rand",
"torchvision.transforms.functional.normalize",
"os.path.join"
] |
[((1655, 1665), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1663, 1665), True, 'import matplotlib.pyplot as plt\n'), ((701, 739), 'os.path.join', 'os.path.join', (['data_directory', '"""images"""'], {}), "(data_directory, 'images')\n", (713, 739), False, 'import os\n'), ((863, 907), 'os.path.join', 'os.path.join', (['data_directory', '"""images"""', 'im_f'], {}), "(data_directory, 'images', im_f)\n", (875, 907), False, 'import os\n'), ((962, 1000), 'os.path.join', 'os.path.join', (['data_directory', '"""labels"""'], {}), "(data_directory, 'labels')\n", (974, 1000), False, 'import os\n'), ((1174, 1221), 'os.path.join', 'os.path.join', (['data_directory', '"""labels"""', 'label_f'], {}), "(data_directory, 'labels', label_f)\n", (1186, 1221), False, 'import os\n'), ((1500, 1530), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""gray"""'}), "(image, cmap='gray')\n", (1510, 1530), True, 'import matplotlib.pyplot as plt\n'), ((1609, 1650), 'matplotlib.pyplot.imshow', 'plt.imshow', (['target'], {'cmap': '"""jet"""', 'alpha': '(0.5)'}), "(target, cmap='jet', alpha=0.5)\n", (1619, 1650), True, 'import matplotlib.pyplot as plt\n'), ((1734, 1750), 'PIL.Image.open', 'Image.open', (['im_f'], {}), '(im_f)\n', (1744, 1750), False, 'from PIL import Image\n'), ((1752, 1771), 'PIL.Image.open', 'Image.open', (['label_f'], {}), '(label_f)\n', (1762, 1771), False, 'from PIL import Image\n'), ((2822, 2866), 'torch.clamp', 'torch.clamp', (['(255 * target)', '(0)', '(32)'], {'out': 'target'}), '(255 * target, 0, 32, out=target)\n', (2833, 2866), False, 'import torch\n'), ((396, 437), 'os.path.join', 'os.path.join', (['data_directory', '"""valid.txt"""'], {}), "(data_directory, 'valid.txt')\n", (408, 437), False, 'import os\n'), ((513, 554), 'os.path.join', 'os.path.join', (['data_directory', '"""codes.txt"""'], {}), "(data_directory, 'codes.txt')\n", (525, 554), False, 'import os\n'), ((2297, 2332), 'torchvision.transforms.functional.resize', 'tf.resize', (['input', 'self.resize_shape'], {}), '(input, self.resize_shape)\n', (2306, 2332), True, 'import torchvision.transforms.functional as tf\n'), ((2346, 2411), 'torchvision.transforms.functional.resize', 'tf.resize', (['target', 'self.resize_shape'], {'interpolation': 'Image.NEAREST'}), '(target, self.resize_shape, interpolation=Image.NEAREST)\n', (2355, 2411), True, 'import torchvision.transforms.functional as tf\n'), ((2884, 2949), 'torchvision.transforms.functional.normalize', 'tf.normalize', (['input', '[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '(input, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2896, 2949), True, 'import torchvision.transforms.functional as tf\n'), ((2478, 2491), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (2488, 2491), False, 'import torch\n'), ((2527, 2540), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (2537, 2540), False, 'import torch\n'), ((2618, 2633), 'torchvision.transforms.functional.hflip', 'tf.hflip', (['input'], {}), '(input)\n', (2626, 2633), True, 'import torchvision.transforms.functional as tf\n'), ((2635, 2651), 'torchvision.transforms.functional.hflip', 'tf.hflip', (['target'], {}), '(target)\n', (2643, 2651), True, 'import torchvision.transforms.functional as tf\n'), ((2720, 2735), 'torchvision.transforms.functional.vflip', 'tf.vflip', (['input'], {}), '(input)\n', (2728, 2735), True, 'import torchvision.transforms.functional as tf\n'), ((2737, 2753), 'torchvision.transforms.functional.vflip', 'tf.vflip', (['target'], {}), '(target)\n', (2745, 2753), True, 'import torchvision.transforms.functional as tf\n')]
|
import matrices_new_extended as mne
import numpy as np
import sympy as sp
from equality_check import Point
x, y, z = sp.symbols("x y z")
Point.base_point = np.array([x, y, z, 1])
class Test_Axis_3_xxx:
def test_matrix_3_xxx(self):
expected = Point([ z, x, y, 1])
calculated = Point.calculate(mne._matrix_3_xxx)
assert calculated == expected
def test_matrix_3_1_mtmHxx_ttt(self):
expected = Point([ z, 1+x, 1+y, 1])
calculated = Point.calculate(mne._matrix_3_1_mtmHxx_ttt)
assert calculated == expected
def test_matrix_3_1_HxmHxx_ttt(self):
expected = Point([ 1+z, x, 1+y, 1])
calculated = Point.calculate(mne._matrix_3_1_HxmHxx_ttt)
assert calculated == expected
def test_matrix_3_1_Hxtxx_ttt(self):
expected = Point([ 1+z, 1+x, y, 1])
calculated = Point.calculate(mne._matrix_3_1_Hxtxx_ttt)
assert calculated == expected
def test_matrix_3_xxx_hhh(self):
expected = Point([ 1+z, 1+x, 1+y, 1])
calculated = Point.calculate(mne._matrix_3_xxx_hhh)
assert calculated == expected
|
[
"sympy.symbols",
"numpy.array",
"equality_check.Point.calculate",
"equality_check.Point"
] |
[((118, 137), 'sympy.symbols', 'sp.symbols', (['"""x y z"""'], {}), "('x y z')\n", (128, 137), True, 'import sympy as sp\n'), ((157, 179), 'numpy.array', 'np.array', (['[x, y, z, 1]'], {}), '([x, y, z, 1])\n', (165, 179), True, 'import numpy as np\n'), ((258, 277), 'equality_check.Point', 'Point', (['[z, x, y, 1]'], {}), '([z, x, y, 1])\n', (263, 277), False, 'from equality_check import Point\n'), ((300, 334), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_3_xxx'], {}), '(mne._matrix_3_xxx)\n', (315, 334), False, 'from equality_check import Point\n'), ((435, 462), 'equality_check.Point', 'Point', (['[z, 1 + x, 1 + y, 1]'], {}), '([z, 1 + x, 1 + y, 1])\n', (440, 462), False, 'from equality_check import Point\n'), ((481, 524), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_3_1_mtmHxx_ttt'], {}), '(mne._matrix_3_1_mtmHxx_ttt)\n', (496, 524), False, 'from equality_check import Point\n'), ((625, 652), 'equality_check.Point', 'Point', (['[1 + z, x, 1 + y, 1]'], {}), '([1 + z, x, 1 + y, 1])\n', (630, 652), False, 'from equality_check import Point\n'), ((671, 714), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_3_1_HxmHxx_ttt'], {}), '(mne._matrix_3_1_HxmHxx_ttt)\n', (686, 714), False, 'from equality_check import Point\n'), ((814, 841), 'equality_check.Point', 'Point', (['[1 + z, 1 + x, y, 1]'], {}), '([1 + z, 1 + x, y, 1])\n', (819, 841), False, 'from equality_check import Point\n'), ((860, 902), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_3_1_Hxtxx_ttt'], {}), '(mne._matrix_3_1_Hxtxx_ttt)\n', (875, 902), False, 'from equality_check import Point\n'), ((998, 1029), 'equality_check.Point', 'Point', (['[1 + z, 1 + x, 1 + y, 1]'], {}), '([1 + z, 1 + x, 1 + y, 1])\n', (1003, 1029), False, 'from equality_check import Point\n'), ((1046, 1084), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_3_xxx_hhh'], {}), '(mne._matrix_3_xxx_hhh)\n', (1061, 1084), False, 'from equality_check import Point\n')]
|
'''
docstring
'''
from collections import namedtuple
import logging
import os.path
import sys
_THIS = sys.modules[__name__]
AppProperties = namedtuple('AppProperties', ('name', 'path', 'root_logger', 'init', 'get_path', 'get_logger'))
def __dummy(*args, **kwargs):
pass
def __get_logger(fullmodulename):
''' docstring '''
if fullmodulename == '__main__' or fullmodulename == _THIS.Properties.name:
logname = _THIS.Properties.name
else:
modulename = fullmodulename.split('.')[-1]
if not modulename: logname = _THIS.Properties.name
else: logname = '.'.join((_THIS.Properties.name, modulename))
return logging.getLogger(logname)
def __get_path(extension, path_given=None):
'''
Generates the full absolute path of a file.
This function builds an absolute path to a file based on 3 'default' arguments
(the basename of the file, the extension of the file, and an absolute path) and
an extra argument that represents a valid path.
Depending on what represents this path (a directory, a file, an absolute or a
relative reference) the function will generate a full absolute path, relying on the
'default' parameters if and when necessary.
The generation of the full path follows those rules:
- the default name is made of the default basename and the default extension;
- if the path given is empty, then the full path is the default absolute path
with the default filename;
- if the path given contains a filename at the end, this is the filename to be used;
- if the path given contains an absolute path at the beginning, that is the
absolute path that will be used;
- if the path given contains only a relative path at the beginning, then
the default absolute path will be prepended to the path given.
Args:
basename (string): basename without extension, usually the application name
absdirpath (string): the absolute path of the current application
ext (string): the extension of the file, in the form '.xxx'. i.e. with the dot
pathgiven (string): the path given as alternative to the default
Returns:
string: a full absolute path
'''
dfltname = ''.join((_THIS.Properties.name, extension))
if path_given == '':
filepath = os.path.join(_THIS.Properties.path, dfltname)
else:
dirname, filename = os.path.split(path_given.strip())
if dirname != '': dirname = os.path.normpath(dirname)
if filename == '': filename = dfltname
if dirname == '': dirname = _THIS.Properties.path
elif not os.path.isabs(dirname): dirname = os.path.join(_THIS.Properties.path, dirname)
filepath = os.path.join(dirname, filename)
return os.path.normpath(filepath)
def __init_properties(full_path):
name = os.path.splitext(os.path.basename(full_path))[0] # first part of the filename, without extension
path = os.path.realpath(os.path.dirname(full_path)) # full path of the launching script
root_logger = logging.getLogger(name)
_THIS.Properties = AppProperties(name, path, root_logger, __dummy, __get_path, __get_logger)
Properties = AppProperties('', '', None, __init_properties, __dummy, __dummy)
|
[
"collections.namedtuple",
"logging.getLogger"
] |
[((143, 241), 'collections.namedtuple', 'namedtuple', (['"""AppProperties"""', "('name', 'path', 'root_logger', 'init', 'get_path', 'get_logger')"], {}), "('AppProperties', ('name', 'path', 'root_logger', 'init',\n 'get_path', 'get_logger'))\n", (153, 241), False, 'from collections import namedtuple\n'), ((656, 682), 'logging.getLogger', 'logging.getLogger', (['logname'], {}), '(logname)\n', (673, 682), False, 'import logging\n'), ((3076, 3099), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (3093, 3099), False, 'import logging\n')]
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import division # Eliminate need for decimals on whole values
import sys
# As of 28 July 2019, python3.6 is the default "python3" in apt-get install python3
if sys.version_info[0] != 3 or sys.version_info[1] < 6:
print("This script requires Python version 3.6")
sys.exit(1)
import configparser # config file parsing
import argparse # command line parsing
import os
from datetime import date, timedelta, datetime
from time import time # For performance timing
from math import acos, asin, atan, cos, sin, tan, degrees # Fast/precise math functions
import numpy as np
import logging
import string
from spacetrack import SpaceTrackClient
# These are necessary until <NAME> approves pull requests
# https://github.com/brandon-rhodes/python-sgp4/pull/35
sys.path.insert(1, '../python-sgp4')
# https://github.com/skyfielders/python-skyfield/pull/276
sys.path.insert(2, '/Users/chris/Dropbox/code/preMVP/python-skyfield')
# FIXME: Note python-skyfield is not currently compatible with cythonized python-SGP4
from skyfield.iokit import Loader, download, parse_tle
from skyfield import sgp4lib
# The following 5 lines are necessary until our modules are public
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
tle_path = os.path.join(parentdir, "sathunt-tle")
sys.path.insert(1,tle_path)
from tle_util import make_tle, append_tle_file
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / mag(vector)
def proj(v2, v1):
""" Returns the unit vector projection of v1 onto v2 """
b = np.dot(v2, v1)/np.dot(v2, v2)
temp = np.multiply(b, v2)
# Make unit vector
vp = unit_vector(temp)
return vp
def flat_proj(v1, v2):
""" Returns the flat projection of direction unit vector, v1 onto v2 """
temp1 = np.cross(v1, v2)
temp2 = np.cross(temp1, v1)
return proj(temp2, v2)
def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'::
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
Partially Ref: angle(vec1,vec2) in python-sgp4/ext.py
"""
small = 0.00000001
undefined = 999999.1
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
magv1 = mag(v1)
magv2 = mag(v1)
if (magv1 * magv2 > small * small):
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
else:
return undefined
def mag(v):
""" Computes the magnitude of a vector ||v||
Renamed from norm(v) used in original Scott Campbell code
to better correspond to function names in SGP4 code.
"""
mag = np.sqrt(np.dot(v, v))
return mag
def main():
""" Interactive tool for finding an unknown TLE object within a library of TLEs
TODO:
- Implment argv[1] = unid.txt, argv[2] = refer.tle
- Make non-interactive callable version
- Make stand-alone verison that uses python-SGP exclusively, not tle_util
- Incorporate logic to read the (first?) TLE from the UNID file
- Incorporate logic to warn/error the user if no TLEs found
- Incorporate Perr/Alpha inputs as command line/config flags
- Put in more compares for altitude, velocity, etc.
"""
t0 = time()
# Read commandline options
conf_parser = argparse.ArgumentParser(description='Utility to assist in ID of an unidentified (unid) satellite')
conf_parser.add_argument("-c", "--conf_file",
help="Specify configuration file. [Default configuration.ini]",
dest='conf_file',
nargs='?',
const=1,
default='configuration.ini',
type=str,
metavar="FILE")
conf_parser.add_argument("-d", "--datadir",
help="data directory [default ./data]",
dest='datadir',
default='./data',
nargs='?',
const=1,
type=str,
metavar="DIR")
conf_parser.add_argument("--tleref",
help="Specify TLE reference file. [Default refer.tle]",
dest='tle_ref',
nargs='?',
type=str,
metavar="REFTLE")
conf_parser.add_argument("--tleunid",
help="Specify TLE unid file. [Default unid.tle]",
dest='tle_unid',
nargs='?',
type=str,
metavar="UNID")
conf_parser.add_argument("--update", help="update TLEs from online sources",
action="store_true")
conf_parser.add_argument("-dbname", "--database",
help="database to USE",
dest='dbname',
default='opensatcat_dev',
nargs='?',
const=1,
type=str,
metavar="NAME")
conf_parser.add_argument("-H", "--hostname",
help="database hostname",
dest='dbhostname',
default='opensatcat.cvpypmmxjtv1.us-east-2.rds.amazonaws.com',
nargs='?',
const=1,
type=str,
metavar="HOSTNAME")
conf_parser.add_argument("-u", "--user",
help="database user name",
dest='dbusername',
nargs='?',
type=str,
metavar="USER")
conf_parser.add_argument("-p", "--password",
help="database user password",
dest='dbpassword',
nargs='?',
type=str,
metavar="PASSWD")
conf_parser.add_argument("-t", "--dbtype",
help="database type [INFILE, sqlserver, sqlite] \
default: INFILE",
dest='dbtype',
nargs='?',
choices=['INFILE', 'sqlserver', 'sqlite'],
default='INFILE',
type=str,
metavar="TYPE")
conf_parser.add_argument("-i", "--import", help="Import TLEs to database",
dest='importTLE',
action="store_true")
conf_parser.add_argument("-q", "--quiet", help="Suppress console output",
dest='quiet',
action="store_true")
conf_parser.add_argument("-V", "--verbose",
help="increase verbosity: 0 = only warnings, 1 = info, 2 = debug. No number means info. Default is no verbosity.",
const=1,
default=1,
type=int,
nargs="?")
# Process commandline options and parse configuration
cfg = configparser.ConfigParser(inline_comment_prefixes=('#', ';'))
args = conf_parser.parse_args()
log = logging.getLogger(__name__)
# make it print to the console.
console = logging.StreamHandler()
log.addHandler(console)
conf_file = args.conf_file
tle_ref = args.tle_ref
tle_unid = args.tle_unid
update = args.update
datadir = args.datadir
dbname = args.dbname
dbhostname = args.dbhostname
dbusername = args.dbusername
dbpassword = args.dbpassword
dbtype = args.dbtype
importTLE = args.importTLE
verbose = args.verbose
quiet = args.quiet
# Set our python-skyfield data directory
load = Loader(datadir)
ts = load.timescale()
if (quiet == False):
if verbose == 0:
log.setLevel(logging.WARN)
elif verbose == 1:
log.setLevel(logging.INFO)
elif verbose == 2:
log.setLevel(logging.DEBUG)
log.debug("Log level set to {}".format(log.level))
if verbose:
for arg in vars(args):
log.debug("%s : %s",arg, getattr(args, arg))
cfg.read([args.conf_file])
log.info("Reading config from: {}".format(args.conf_file))
# 1st arg in original version
if not (tle_ref):
try:
tle_ref = cfg.get('Common', 'tle_ref')
except KeyError:
tle_ref = "refer.tle"
# 2nd arg in original version
if not (tle_unid):
try:
tle_unid = cfg.get('Common', 'tle_unid')
except KeyError:
tle_unid = "unid.txt"
# # Read single (first?) TLE from UNIDentified TLE file
# TLE_UNID = tle_util.TLEFile(tle_unid,strict=False)
# for sat_num in TLE_UNID.Satellites: # Need learn a better method to get just the first/only record
# #// id_sat comparison variables
# #// Date t1(tle);
# #// Satellite id_sat(t1.jd, ii, om, ec, ww, ma, nn, bstar);
# UNIDsat = TLE_UNID.Satellites[sat_num]
# # echo tle to screen
# log.info("{LINE1}\n{LINE2}".format(LINE1=UNIDsat.line1, LINE2=UNIDsat.line2))
# # Most popular const used by TLEs
# whichconst = sgp4.earth_gravity.wgs72
# afspc_mode = False
# (satn, epoch, xbstar, xecco, xargpo, xinclo, xmo, xno, xnodeo) = UNIDsat.satrec
# # id_satrec = sgp4init(whichconst, afspc_mode, satn, epoch, xbstar, xecco, xargpo, xinclo, xmo, xno, xnodeo)
# # (rr,vv) = sgp4(id_satrec, tsince=0, whichconst=whichconst)
# id_sat = sgp4.io.twoline2rv(UNIDsat.line1, UNIDsat.line2, whichconst, afspc_mode=False)
# (year, monnth, day, hour, minute, second) = UNIDsat.epoch.timetuple()[:6]
UNIDtle = load.tle(url=tle_unid,reload=False)
# Make sure UNID satellite appears only once
# UNIDtle = set(UNIDtle.values())
if(not UNIDtle):
log.error("No TLEs found in file: {}".format(tle_unid))
log.error("Run elfind first?")
sys.exit()
# Get the single item out of the list
# [UNID] = UNIDtle
for satnum in UNIDtle: break
UNID = UNIDtle[satnum]
# t_unid = ts.ut1(jd=UNID.model.jdsatepoch)
t_unid = UNID.epoch
# Get r,v data at its own EPOCH
# (rr, vv) = id_sat.propagate(year, monnth, day, hour, minute, second)
(rr, vv, id_sat_err) = UNID._position_and_velocity_TEME_km(t_unid)
id_sat_rr = np.array(rr)
id_sat_vv = np.array(vv)
# print(id_sat_rr)
# print(id_sat_vv)
# flat projection of UNID satellite direction unit vector, vp1
vp1 = flat_proj(rr, vv)
# Set Perr error bound
err1 = input(" position error, degrees [2]: ")
err1 = err1 or 2
err1 = float(err1)
# Set alpha error bound
err2 = input(" track angle error, degrees [20]: ")
err2 = err2 or 20
err2 = float(err2)
# Read in REFERENCE element list, and loop through for potential solutions within error bounds
REFtle = load.tle(url=tle_ref,reload=False)
# Make sure REFtle satellites appears only once
REFtle = set(REFtle.values())
for ref_sat in REFtle:
# log.debug("Comparing against {}".format(sat_num))
# if(ref_sat.model.satnum == 26905):
# print("here")
# Get r,v data at UNID epoch
(rr, vv, ref_sat_err) = ref_sat._position_and_velocity_TEME_km(t_unid)
ref_sat_rr = np.array(rr)
ref_sat_vv = np.array(vv)
# delr - satellite delta r vector
delr = np.subtract(id_sat_rr, ref_sat_rr)
# delr - flat projection of delta r unit vector
delr = flat_proj(id_sat_rr, delr)
# alpha - angle between delr and id_sat.vv, radians
alpha = angle_between(delr, id_sat_vv)
# Per - angle between position unit vectors, radians
Perr = angle_between(ref_sat_rr, id_sat_rr)
# delta - magnitude of Perr in direction of id_sat.vv (UNID velocity), radians
delt = atan(tan(Perr) * cos(alpha))
# delta_t - time of flight to Closest Point of Approach (cpa) seconds
# rr, vv already in units of km, km/s. No need to convert.
delta_t = delt * mag(id_sat_rr) / mag(id_sat_vv)
# cpa - Closest Point of Approach (cpa), radians
cpa = asin(sin(alpha) * sin(Perr))
# vp2 - flat projection of REF satellite direction unit vector
vp2 = flat_proj(ref_sat_rr, ref_sat_vv)
# alpha - angle between direction unit vectors, radians
alpha = acos(np.dot(vp1, vp2))
# Calculate REF deltas from UNID
try:
alpha = acos(cos(alpha)/cos(delt))
except ValueError:
alpha = float('nan')
# Prepare for presentation to user
alpha = degrees(alpha) # angle between direction unit vectors
Perr = degrees(Perr) # angle between position unit vectors
# Compare UNID to REF using osculating elements (close enough)
if((Perr < err1) and (alpha < err2)):
# tle = None # epoch of elements in tle format
# ii = None # inclination, degrees
# om = None # right ascension of ascending node, degrees
# ec = None # eccentricity
# ww = None # argument of the perigee, degrees
# ma = None # mean anomaly, degrees
# nn = None # mean motion, revolutions/day
# uu = None # true longitude
# c2 = None # bstar coefficient
# bstar = None # BSTAR drag term
# name[81] = None
# visually check match parameters using advanced mean elements
# Write tle to screen
(tle_line0, tle_line1, tle_line2) = make_tle(
name=ref_sat.name,
ssn=ref_sat.model.satnum,
epoch_datetime=ref_sat.epoch.utc_datetime(),
xincl=ref_sat.model.inclo,
xnodeo=ref_sat.model.nodeo,
eo=ref_sat.model.ecco,
omegao=ref_sat.model.argpo,
xmo=ref_sat.model.mo,
xno=degrees(ref_sat.model.no_kozai*1440.0)/360.0,
deg=False)
log.info(" position error {:4.1f}".format(Perr))
log.info("track angle error {:4.1f}\n".format(alpha))
log.info(" time error {:4.0f}".format(delta_t))
log.info(" to closest point {:4.1f}\n".format(degrees(cpa)))
tle_file_path = os.path.join(datadir, tle_unid)
append_tle_file(tle_file_path, tle_line0, tle_line1, tle_line2)
get_continue = input("\n[Next]")
# // s_in("\n[Next]", buf);
# // } // if match
# // } // while
# // s_in("\n[Done]", buf);
get_continue = input("\n[Done]")
# // system(id_file);
# // } // end main
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"os.path.join",
"numpy.multiply",
"skyfield.iokit.Loader",
"os.path.dirname",
"tle_util.append_tle_file",
"math.cos",
"configparser.ConfigParser",
"logging.StreamHandler",
"numpy.cross",
"math.sin",
"inspect.currentframe",
"numpy.dot",
"math.degrees",
"sys.exit",
"numpy.subtract",
"math.tan",
"sys.path.insert",
"time.time",
"numpy.array",
"logging.getLogger"
] |
[((950, 986), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../python-sgp4"""'], {}), "(1, '../python-sgp4')\n", (965, 986), False, 'import sys\n'), ((1047, 1117), 'sys.path.insert', 'sys.path.insert', (['(2)', '"""/Users/chris/Dropbox/code/preMVP/python-skyfield"""'], {}), "(2, '/Users/chris/Dropbox/code/preMVP/python-skyfield')\n", (1062, 1117), False, 'import sys\n'), ((1480, 1507), 'os.path.dirname', 'os.path.dirname', (['currentdir'], {}), '(currentdir)\n', (1495, 1507), False, 'import os\n'), ((1520, 1558), 'os.path.join', 'os.path.join', (['parentdir', '"""sathunt-tle"""'], {}), "(parentdir, 'sathunt-tle')\n", (1532, 1558), False, 'import os\n'), ((1560, 1588), 'sys.path.insert', 'sys.path.insert', (['(1)', 'tle_path'], {}), '(1, tle_path)\n', (1575, 1588), False, 'import sys\n'), ((359, 370), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (367, 370), False, 'import sys\n'), ((1887, 1905), 'numpy.multiply', 'np.multiply', (['b', 'v2'], {}), '(b, v2)\n', (1898, 1905), True, 'import numpy as np\n'), ((2094, 2110), 'numpy.cross', 'np.cross', (['v1', 'v2'], {}), '(v1, v2)\n', (2102, 2110), True, 'import numpy as np\n'), ((2124, 2143), 'numpy.cross', 'np.cross', (['temp1', 'v1'], {}), '(temp1, v1)\n', (2132, 2143), True, 'import numpy as np\n'), ((3722, 3728), 'time.time', 'time', ([], {}), '()\n', (3726, 3728), False, 'from time import time\n'), ((3780, 3883), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Utility to assist in ID of an unidentified (unid) satellite"""'}), "(description=\n 'Utility to assist in ID of an unidentified (unid) satellite')\n", (3803, 3883), False, 'import argparse\n'), ((8321, 8382), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {'inline_comment_prefixes': "('#', ';')"}), "(inline_comment_prefixes=('#', ';'))\n", (8346, 8382), False, 'import configparser\n'), ((8431, 8458), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (8448, 8458), False, 'import logging\n'), ((8513, 8536), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (8534, 8536), False, 'import logging\n'), ((9016, 9031), 'skyfield.iokit.Loader', 'Loader', (['datadir'], {}), '(datadir)\n', (9022, 9031), False, 'from skyfield.iokit import Loader, download, parse_tle\n'), ((11763, 11775), 'numpy.array', 'np.array', (['rr'], {}), '(rr)\n', (11771, 11775), True, 'import numpy as np\n'), ((11793, 11805), 'numpy.array', 'np.array', (['vv'], {}), '(vv)\n', (11801, 11805), True, 'import numpy as np\n'), ((1845, 1859), 'numpy.dot', 'np.dot', (['v2', 'v1'], {}), '(v2, v1)\n', (1851, 1859), True, 'import numpy as np\n'), ((1860, 1874), 'numpy.dot', 'np.dot', (['v2', 'v2'], {}), '(v2, v2)\n', (1866, 1874), True, 'import numpy as np\n'), ((3116, 3128), 'numpy.dot', 'np.dot', (['v', 'v'], {}), '(v, v)\n', (3122, 3128), True, 'import numpy as np\n'), ((11343, 11353), 'sys.exit', 'sys.exit', ([], {}), '()\n', (11351, 11353), False, 'import sys\n'), ((12771, 12783), 'numpy.array', 'np.array', (['rr'], {}), '(rr)\n', (12779, 12783), True, 'import numpy as np\n'), ((12806, 12818), 'numpy.array', 'np.array', (['vv'], {}), '(vv)\n', (12814, 12818), True, 'import numpy as np\n'), ((12882, 12916), 'numpy.subtract', 'np.subtract', (['id_sat_rr', 'ref_sat_rr'], {}), '(id_sat_rr, ref_sat_rr)\n', (12893, 12916), True, 'import numpy as np\n'), ((14155, 14169), 'math.degrees', 'degrees', (['alpha'], {}), '(alpha)\n', (14162, 14169), False, 'from math import acos, asin, atan, cos, sin, tan, degrees\n'), ((14225, 14238), 'math.degrees', 'degrees', (['Perr'], {}), '(Perr)\n', (14232, 14238), False, 'from math import acos, asin, atan, cos, sin, tan, degrees\n'), ((1441, 1463), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (1461, 1463), False, 'import inspect\n'), ((13905, 13921), 'numpy.dot', 'np.dot', (['vp1', 'vp2'], {}), '(vp1, vp2)\n', (13911, 13921), True, 'import numpy as np\n'), ((15939, 15970), 'os.path.join', 'os.path.join', (['datadir', 'tle_unid'], {}), '(datadir, tle_unid)\n', (15951, 15970), False, 'import os\n'), ((15984, 16047), 'tle_util.append_tle_file', 'append_tle_file', (['tle_file_path', 'tle_line0', 'tle_line1', 'tle_line2'], {}), '(tle_file_path, tle_line0, tle_line1, tle_line2)\n', (15999, 16047), False, 'from tle_util import make_tle, append_tle_file\n'), ((2828, 2846), 'numpy.dot', 'np.dot', (['v1_u', 'v2_u'], {}), '(v1_u, v2_u)\n', (2834, 2846), True, 'import numpy as np\n'), ((13358, 13367), 'math.tan', 'tan', (['Perr'], {}), '(Perr)\n', (13361, 13367), False, 'from math import acos, asin, atan, cos, sin, tan, degrees\n'), ((13370, 13380), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (13373, 13380), False, 'from math import acos, asin, atan, cos, sin, tan, degrees\n'), ((13669, 13679), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (13672, 13679), False, 'from math import acos, asin, atan, cos, sin, tan, degrees\n'), ((13682, 13691), 'math.sin', 'sin', (['Perr'], {}), '(Perr)\n', (13685, 13691), False, 'from math import acos, asin, atan, cos, sin, tan, degrees\n'), ((14008, 14018), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (14011, 14018), False, 'from math import acos, asin, atan, cos, sin, tan, degrees\n'), ((14019, 14028), 'math.cos', 'cos', (['delt'], {}), '(delt)\n', (14022, 14028), False, 'from math import acos, asin, atan, cos, sin, tan, degrees\n'), ((15893, 15905), 'math.degrees', 'degrees', (['cpa'], {}), '(cpa)\n', (15900, 15905), False, 'from math import acos, asin, atan, cos, sin, tan, degrees\n'), ((15556, 15596), 'math.degrees', 'degrees', (['(ref_sat.model.no_kozai * 1440.0)'], {}), '(ref_sat.model.no_kozai * 1440.0)\n', (15563, 15596), False, 'from math import acos, asin, atan, cos, sin, tan, degrees\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#https://github.com/juano2310/CarND-Behavioral-Cloning-P3-Juan/blob/master/model.py
#https://github.com/udacity/self-driving-car/blob/master/steering-models/community-models/rambo/train.py
import os
import csv
import cv2
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from keras import backend as K
from keras.models import Model, Sequential
from keras.layers import Dense, GlobalAveragePooling2D, MaxPooling2D, Lambda, Cropping2D
from keras.layers.convolutional import Convolution2D
from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
import sklearn
from sklearn.model_selection import train_test_split
samples = []
with open('../../../output/conde_gazebo/interpolated.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
sklearn.utils.shuffle(samples)
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
print("Number of traing samples: ",len(train_samples))
print("Number of validation samples: ",len(validation_samples))
#index,timestamp,width,height,frame_id,filename,angle,speed
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
#print(batch_samples)
images = []
angles = []
for batch_sample in batch_samples:
if batch_sample[5] != "filename":
path = os.path.normpath(batch_sample[5]).split(os.path.sep)
name = '../../../output/conde_gazebo/center/'+path[1].split('\\')[-1]
center_image = cv2.imread(name)
center_image = cv2.resize(center_image, (320,180)) #resize from 720x1280 to 180x320
#plt.imshow(left_image)
#plt.show()
angle = float(batch_sample[6])
images.append(center_image)
angles.append(angle)
flip_image = np.fliplr(center_image)
flip_angle = -1 * angle
images.append(flip_image)
angles.append(flip_angle)
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
# compile and train the model using the generator function
batch_size_value = 32
n_epoch = 150
train_generator = generator(train_samples, batch_size=batch_size_value)
validation_generator = generator(validation_samples, batch_size=batch_size_value)
model = Sequential()
# trim image to only see section with road
model.add(Cropping2D(cropping=((50,20), (0,0)), input_shape=(180,320,3)))
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: (x / 255.0) - 0.5))
#Nvidia model
model.add(Convolution2D(24, (5, 5), activation="relu", name="conv_1", strides=(2, 2)))
model.add(Convolution2D(36, (5, 5), activation="relu", name="conv_2", strides=(2, 2)))
model.add(Convolution2D(48, (5, 5), activation="relu", name="conv_3", strides=(2, 2)))
model.add(SpatialDropout2D(.5, dim_ordering='default'))
model.add(Convolution2D(64, (3, 3), activation="relu", name="conv_4", strides=(1, 1)))
model.add(Convolution2D(64, (3, 3), activation="relu", name="conv_5", strides=(1, 1)))
model.add(Flatten())
model.add(Dense(1164))
model.add(Dropout(.5))
model.add(Dense(100, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(50, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(10, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
model.summary()
# checkpoint
filepath="../../weights/weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='auto', period=1)
callbacks_list = [checkpoint]
# Fit the model
history_object = model.fit_generator(train_generator, steps_per_epoch=(len(train_samples) / batch_size_value), validation_data=validation_generator, validation_steps=(len(validation_samples)/batch_size_value), callbacks=callbacks_list, epochs=n_epoch)
# Save model
model.save('model.h5')
with open('model.json', 'w') as output_json:
output_json.write(model.to_json())
# Save TensorFlow model
tf.train.write_graph(K.get_session().graph.as_graph_def(), logdir='.', name='model.pb', as_text=False)
# Plot the training and validation loss for each epoch
print('Generating loss chart...')
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.savefig('model.png')
# Done
print('Done.')
|
[
"matplotlib.pyplot.title",
"csv.reader",
"keras.layers.Cropping2D",
"sklearn.model_selection.train_test_split",
"keras.layers.core.Flatten",
"keras.layers.core.SpatialDropout2D",
"os.path.normpath",
"keras.layers.core.Dropout",
"cv2.resize",
"keras.layers.core.Dense",
"keras.callbacks.ModelCheckpoint",
"matplotlib.pyplot.legend",
"numpy.fliplr",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"keras.backend.get_session",
"cv2.imread",
"keras.layers.Lambda",
"keras.layers.convolutional.Convolution2D",
"numpy.array",
"keras.models.Sequential",
"sklearn.utils.shuffle",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((952, 982), 'sklearn.utils.shuffle', 'sklearn.utils.shuffle', (['samples'], {}), '(samples)\n', (973, 982), False, 'import sklearn\n'), ((1019, 1059), 'sklearn.model_selection.train_test_split', 'train_test_split', (['samples'], {'test_size': '(0.2)'}), '(samples, test_size=0.2)\n', (1035, 1059), False, 'from sklearn.model_selection import train_test_split\n'), ((2883, 2895), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2893, 2895), False, 'from keras.models import Model, Sequential\n'), ((4092, 4197), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""auto"""', 'period': '(1)'}), "(filepath, monitor='val_loss', verbose=1, save_best_only=\n True, mode='auto', period=1)\n", (4107, 4197), False, 'from keras.callbacks import ModelCheckpoint\n'), ((4831, 4871), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['loss']"], {}), "(history_object.history['loss'])\n", (4839, 4871), True, 'import matplotlib.pyplot as plt\n'), ((4872, 4916), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['val_loss']"], {}), "(history_object.history['val_loss'])\n", (4880, 4916), True, 'import matplotlib.pyplot as plt\n'), ((4917, 4959), 'matplotlib.pyplot.title', 'plt.title', (['"""model mean squared error loss"""'], {}), "('model mean squared error loss')\n", (4926, 4959), True, 'import matplotlib.pyplot as plt\n'), ((4960, 4997), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mean squared error loss"""'], {}), "('mean squared error loss')\n", (4970, 4997), True, 'import matplotlib.pyplot as plt\n'), ((4998, 5017), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (5008, 5017), True, 'import matplotlib.pyplot as plt\n'), ((5018, 5083), 'matplotlib.pyplot.legend', 'plt.legend', (["['training set', 'validation set']"], {'loc': '"""upper right"""'}), "(['training set', 'validation set'], loc='upper right')\n", (5028, 5083), True, 'import matplotlib.pyplot as plt\n'), ((5084, 5108), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""model.png"""'], {}), "('model.png')\n", (5095, 5108), True, 'import matplotlib.pyplot as plt\n'), ((876, 895), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (886, 895), False, 'import csv\n'), ((2950, 3016), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((50, 20), (0, 0))', 'input_shape': '(180, 320, 3)'}), '(cropping=((50, 20), (0, 0)), input_shape=(180, 320, 3))\n', (2960, 3016), False, 'from keras.layers import Dense, GlobalAveragePooling2D, MaxPooling2D, Lambda, Cropping2D\n'), ((3104, 3137), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {}), '(lambda x: x / 255.0 - 0.5)\n', (3110, 3137), False, 'from keras.layers import Dense, GlobalAveragePooling2D, MaxPooling2D, Lambda, Cropping2D\n'), ((3166, 3241), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(24)', '(5, 5)'], {'activation': '"""relu"""', 'name': '"""conv_1"""', 'strides': '(2, 2)'}), "(24, (5, 5), activation='relu', name='conv_1', strides=(2, 2))\n", (3179, 3241), False, 'from keras.layers.convolutional import Convolution2D\n'), ((3253, 3328), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(36)', '(5, 5)'], {'activation': '"""relu"""', 'name': '"""conv_2"""', 'strides': '(2, 2)'}), "(36, (5, 5), activation='relu', name='conv_2', strides=(2, 2))\n", (3266, 3328), False, 'from keras.layers.convolutional import Convolution2D\n'), ((3340, 3415), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(48)', '(5, 5)'], {'activation': '"""relu"""', 'name': '"""conv_3"""', 'strides': '(2, 2)'}), "(48, (5, 5), activation='relu', name='conv_3', strides=(2, 2))\n", (3353, 3415), False, 'from keras.layers.convolutional import Convolution2D\n'), ((3427, 3472), 'keras.layers.core.SpatialDropout2D', 'SpatialDropout2D', (['(0.5)'], {'dim_ordering': '"""default"""'}), "(0.5, dim_ordering='default')\n", (3443, 3472), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((3484, 3559), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'name': '"""conv_4"""', 'strides': '(1, 1)'}), "(64, (3, 3), activation='relu', name='conv_4', strides=(1, 1))\n", (3497, 3559), False, 'from keras.layers.convolutional import Convolution2D\n'), ((3571, 3646), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'name': '"""conv_5"""', 'strides': '(1, 1)'}), "(64, (3, 3), activation='relu', name='conv_5', strides=(1, 1))\n", (3584, 3646), False, 'from keras.layers.convolutional import Convolution2D\n'), ((3659, 3668), 'keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (3666, 3668), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((3681, 3692), 'keras.layers.core.Dense', 'Dense', (['(1164)'], {}), '(1164)\n', (3686, 3692), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((3704, 3716), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3711, 3716), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((3727, 3756), 'keras.layers.core.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (3732, 3756), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((3768, 3780), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3775, 3780), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((3791, 3819), 'keras.layers.core.Dense', 'Dense', (['(50)'], {'activation': '"""relu"""'}), "(50, activation='relu')\n", (3796, 3819), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((3831, 3843), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3838, 3843), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((3854, 3882), 'keras.layers.core.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (3859, 3882), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((3894, 3906), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3901, 3906), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((3917, 3925), 'keras.layers.core.Dense', 'Dense', (['(1)'], {}), '(1)\n', (3922, 3925), False, 'from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D\n'), ((2493, 2509), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (2501, 2509), True, 'import numpy as np\n'), ((2533, 2549), 'numpy.array', 'np.array', (['angles'], {}), '(angles)\n', (2541, 2549), True, 'import numpy as np\n'), ((2583, 2622), 'sklearn.utils.shuffle', 'sklearn.utils.shuffle', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (2604, 2622), False, 'import sklearn\n'), ((4659, 4674), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (4672, 4674), True, 'from keras import backend as K\n'), ((1895, 1911), 'cv2.imread', 'cv2.imread', (['name'], {}), '(name)\n', (1905, 1911), False, 'import cv2\n'), ((1948, 1984), 'cv2.resize', 'cv2.resize', (['center_image', '(320, 180)'], {}), '(center_image, (320, 180))\n', (1958, 1984), False, 'import cv2\n'), ((2289, 2312), 'numpy.fliplr', 'np.fliplr', (['center_image'], {}), '(center_image)\n', (2298, 2312), True, 'import numpy as np\n'), ((1710, 1743), 'os.path.normpath', 'os.path.normpath', (['batch_sample[5]'], {}), '(batch_sample[5])\n', (1726, 1743), False, 'import os\n')]
|
from kivy.uix.widget import Widget
from kivy.graphics.instructions import Canvas
from kivy.graphics.vertex_instructions import Line,Ellipse ,Ellipse,Rectangle
from kivy.graphics.context_instructions import Color
from kivy.metrics import dp
# from kivy.clock import Clock
from kivy.properties import Clock
from kivymd.uix.button import MDRaisedButton
class Quicar(Widget):
#@overrided
def on_size(self,*args):
"""triged when the window is changed its size /resized"""
print(f'{self.width}')
# vou centralzar a bola
self.ball.pos=(self.center_x-self.ball_size/2, self.center_y-self.ball_size/2)
# def on_touch_down(self,kv):
# print('start')
# self.start()
def start_game(self,*args):
print(f'check play: {self.opacity}')
if(self.opacity==1):
print('play started')
self.start()
return
print('Unable to play')
def __init__(self,**k):
super().__init__(**k)
self.ball_size=dp(50)
# veloidades
self.vx=dp(3)
self.vy=dp(3)
self.bouncing=False
# self.add_widget(MDRaisedButton(text='Begin',id='btn',on_press=self.start))
with self.canvas:
self.rect =Rectangle(pos=(200,220),size=(60,60))#filed rectangle
Color(1,1,1,1)
self.ball=Ellipse(pos=self.center, size=(self.ball_size,self.ball_size))
def move(self,arg):
x,y =self.rect.pos
w,h=self.rect.size
incrementer=dp(10)
diff =self.width-(x+w)*2
if(diff < incrementer):
incrementer=diff
x+=incrementer
self.rect.pos =(x,y)
def start(self):
if(self.opacity==1):
# Clock.schedule_interval(self.move,1/60)
self.bouncing =not self.bouncing
print(self.bouncing)
if self.bouncing==True:
Clock.unschedule(self.move_ball,1/60)
self.ball.pos=self.center
return
Clock.schedule_interval(self.move_ball,1/60)
def update_ui_txts(self,x,y):
if(self.opacity==1):
self.parent.ids['res'].text=f"""
x:{x} y:{y} pos: {self.ball.pos}
"""
else:
self.parent.ids['res'].text=''
def move_ball(self,dt):
"""TODO
fazer abolar quicar se movendo nos
eixos x e y e se passar fora do window do pai
eles devem voltar pra atras
VIDEO 2:21:10
"""
x,y=self.ball.pos
'QUANDO TOCAR EM CIMA'
"""se abola.y+size dele for maior que atela
precisa contrariar decrementando o incrementador de posy do mesmo
que eh vy
"""
if y + self.ball_size > self.height:
self.vy=-self.vy
if x +self.ball_size >self.width:
self.vx = -self.vx
'QUANDO TOCAR NO CHAO'
"""se abola tocar no y do chao self.vy estava negativo e pra contrariar
anegatividade dele para positivo preciso fazer
self.vy=-(self.vy)
ex:vy=-10 then vy=-vy==+10
"""
if x<0:
x=0
self.vx =-self.vx
if y<0:
y=0
self.vy =-self.vy
x+=self.vx
y+=self.vy
self.ball.pos=(x,y)
self.update_ui_txts(x,y)
|
[
"kivy.properties.Clock.unschedule",
"kivy.properties.Clock.schedule_interval",
"kivy.graphics.vertex_instructions.Rectangle",
"kivy.metrics.dp",
"kivy.graphics.context_instructions.Color",
"kivy.graphics.vertex_instructions.Ellipse"
] |
[((1018, 1024), 'kivy.metrics.dp', 'dp', (['(50)'], {}), '(50)\n', (1020, 1024), False, 'from kivy.metrics import dp\n'), ((1062, 1067), 'kivy.metrics.dp', 'dp', (['(3)'], {}), '(3)\n', (1064, 1067), False, 'from kivy.metrics import dp\n'), ((1084, 1089), 'kivy.metrics.dp', 'dp', (['(3)'], {}), '(3)\n', (1086, 1089), False, 'from kivy.metrics import dp\n'), ((1522, 1528), 'kivy.metrics.dp', 'dp', (['(10)'], {}), '(10)\n', (1524, 1528), False, 'from kivy.metrics import dp\n'), ((1253, 1293), 'kivy.graphics.vertex_instructions.Rectangle', 'Rectangle', ([], {'pos': '(200, 220)', 'size': '(60, 60)'}), '(pos=(200, 220), size=(60, 60))\n', (1262, 1293), False, 'from kivy.graphics.vertex_instructions import Line, Ellipse, Ellipse, Rectangle\n'), ((1319, 1336), 'kivy.graphics.context_instructions.Color', 'Color', (['(1)', '(1)', '(1)', '(1)'], {}), '(1, 1, 1, 1)\n', (1324, 1336), False, 'from kivy.graphics.context_instructions import Color\n'), ((1356, 1419), 'kivy.graphics.vertex_instructions.Ellipse', 'Ellipse', ([], {'pos': 'self.center', 'size': '(self.ball_size, self.ball_size)'}), '(pos=self.center, size=(self.ball_size, self.ball_size))\n', (1363, 1419), False, 'from kivy.graphics.vertex_instructions import Line, Ellipse, Ellipse, Rectangle\n'), ((2029, 2076), 'kivy.properties.Clock.schedule_interval', 'Clock.schedule_interval', (['self.move_ball', '(1 / 60)'], {}), '(self.move_ball, 1 / 60)\n', (2052, 2076), False, 'from kivy.properties import Clock\n'), ((1906, 1946), 'kivy.properties.Clock.unschedule', 'Clock.unschedule', (['self.move_ball', '(1 / 60)'], {}), '(self.move_ball, 1 / 60)\n', (1922, 1946), False, 'from kivy.properties import Clock\n')]
|
from timesheet_utils.service_comunication import request
from werkzeug.exceptions import Unauthorized, Forbidden
import os
def get_logged_user():
users_service_url_port = os.path.expandvars(
os.environ.get('USERS_SERVICE_URL_PORT')
)
data = request(
'{}{}/me/'.format(
users_service_url_port,
os.environ.get('USERS_SERVICE_PREFIX')
),
check_ok=False
)
if data.status_code != 200:
print("'{}/me/' returned '{}' status".format(
os.environ.get('USERS_SERVICE_PREFIX'),
data.status_code
))
raise Unauthorized(data.json()['msg'])
return data.json()
def require_login(
only_with_roles: list = None,
only_with_all_permissions: list = None,
only_with_any_permissions: list = None,
):
def decorator(func):
def wrapper(self, *args, **kwargs):
user = get_logged_user()
kwargs['user'] = user
user_roles = user['roles']
user_permissions = user['permissions']
for role in user_roles:
if only_with_roles and role.name not in only_with_roles:
raise Forbidden(
"'{}' role is not allowed to get this content!".format(role.name)
)
if (only_with_all_permissions and not all(item in user_permissions for item in only_with_all_permissions)) or (only_with_any_permissions and not any(item in user_permissions for item in only_with_any_permissions)):
raise Forbidden(
"You are not allowed to get this content!"
)
return func(self, *args, **kwargs)
return wrapper
return decorator
|
[
"os.environ.get",
"werkzeug.exceptions.Forbidden"
] |
[((204, 244), 'os.environ.get', 'os.environ.get', (['"""USERS_SERVICE_URL_PORT"""'], {}), "('USERS_SERVICE_URL_PORT')\n", (218, 244), False, 'import os\n'), ((346, 384), 'os.environ.get', 'os.environ.get', (['"""USERS_SERVICE_PREFIX"""'], {}), "('USERS_SERVICE_PREFIX')\n", (360, 384), False, 'import os\n'), ((523, 561), 'os.environ.get', 'os.environ.get', (['"""USERS_SERVICE_PREFIX"""'], {}), "('USERS_SERVICE_PREFIX')\n", (537, 561), False, 'import os\n'), ((1560, 1613), 'werkzeug.exceptions.Forbidden', 'Forbidden', (['"""You are not allowed to get this content!"""'], {}), "('You are not allowed to get this content!')\n", (1569, 1613), False, 'from werkzeug.exceptions import Unauthorized, Forbidden\n')]
|