code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# -*- coding: UTF-8 -*-
import numpy as np
'''
此部分用于存储公共资源
'''
'''
船舶状态记录
A record in SHIPSTATUS is like:
{'mmsi': mmsi, 'lon': lon, 'lat': lat, 'shipspeed': shipspeed, 'heading': heading, 'sog': sog}
'''
SHIPSTATUS = []
SHIPJSON = []
# river作为公共资源共享, 初始即创建河床,不再在sim_env中初始河床.
RIVER = np.zeros((10000, 1000))
'''
船舶资源注册表 Register
每生成一只船,就要在此注册一次
注册形式: mmsi
'''
SHIP_REGISTER = {'ship_num': 0, 'registered_ship': []}
'''
水流资源注册表
'''
'''
风资源注册表
'''
'''
RISKVALUE风险值临时策略
'''
RISKVALUE = []
SHIP1POS = []
SHIP2POS = [] | [
"numpy.zeros"
] | [((289, 312), 'numpy.zeros', 'np.zeros', (['(10000, 1000)'], {}), '((10000, 1000))\n', (297, 312), True, 'import numpy as np\n')] |
import numpy as np
import cv2
import cv
# this just handles actually showing the window
# and the dots where you've clicked
class SelectView:
def __init__(self, winname, imsize):
self.im = np.zeros((imsize, imsize, 3), dtype=np.uint8)
self.clicks = []
self.winname = winname
cv2.namedWindow(self.winname)
cv.SetMouseCallback(self.winname, self.mouseHandler, 0)
def addClick(self, x, y):
self.clicks.append((x,y))
def mouseHandler(self, event, x, y, flags, params):
if event == cv.CV_EVENT_LBUTTONDOWN:
self.addClick(x, y)
def renderWindow(self):
self.dispim = self.im.copy()
for (x, y) in self.clicks:
cv2.circle(self.dispim, (int(x), int(y)), 8, (255,255,255), 2)
cv2.imshow(self.winname, self.dispim)
def finishSelection(self):
cv2.destroyWindow(self.winname)
# this handles the actual math for computing the homography
def compute_homography(srcpoints, destpoints):
src_pts = np.array([ list(p) for p in srcpoints ], dtype=np.float32).reshape(1,-1,2)
dst_pts = np.array([ list(p) for p in destpoints ], dtype=np.float32).reshape(1,-1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
print(M)
return M
def compute_perspective(srcpoints, destpoints):
src_pts = np.array([ list(p) for p in srcpoints ], dtype=np.float32).reshape(1,-1,2)
dst_pts = np.array([ list(p) for p in destpoints ], dtype=np.float32).reshape(1,-1,2)
return cv2.getPerspectiveTransform(src_pts, dst_pts)
def warp_image(srcim, H, invert=False):
if invert:
Hp = np.linalg.inv(H)
else:
Hp = H
return cv2.warpPerspective(srcim, Hp, (srcim.shape[0], srcim.shape[1]))
if __name__ == '__main__':
imsize = 1024
# get correspondences through 'gui'
clickview = SelectView("selectview", imsize)
while True:
clickview.renderWindow()
if len(clickview.clicks) == 4:
break
keycode = cv.WaitKey(30)
clickview.finishSelection()
print(clickview.clicks)
# compute perspective transform (you can save M to reuse later)
destpoints = [(0,0), (imsize,0), (imsize,imsize), (0, imsize)]
M = compute_perspective(clickview.clicks, destpoints)
print(M)
# warp image
inimage = cv2.imread("test.png")
warpimage = warp_image(inimage, M, True)
cv2.imshow("warped", warpimage)
cv.WaitKey(0) | [
"cv.SetMouseCallback",
"cv2.getPerspectiveTransform",
"cv2.findHomography",
"cv2.destroyWindow",
"cv2.imshow",
"cv.WaitKey",
"cv2.warpPerspective",
"numpy.zeros",
"numpy.linalg.inv",
"cv2.imread",
"cv2.namedWindow"
] | [((1202, 1255), 'cv2.findHomography', 'cv2.findHomography', (['src_pts', 'dst_pts', 'cv2.RANSAC', '(5.0)'], {}), '(src_pts, dst_pts, cv2.RANSAC, 5.0)\n', (1220, 1255), False, 'import cv2\n'), ((1522, 1567), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['src_pts', 'dst_pts'], {}), '(src_pts, dst_pts)\n', (1549, 1567), False, 'import cv2\n'), ((1691, 1755), 'cv2.warpPerspective', 'cv2.warpPerspective', (['srcim', 'Hp', '(srcim.shape[0], srcim.shape[1])'], {}), '(srcim, Hp, (srcim.shape[0], srcim.shape[1]))\n', (1710, 1755), False, 'import cv2\n'), ((2330, 2352), 'cv2.imread', 'cv2.imread', (['"""test.png"""'], {}), "('test.png')\n", (2340, 2352), False, 'import cv2\n'), ((2402, 2433), 'cv2.imshow', 'cv2.imshow', (['"""warped"""', 'warpimage'], {}), "('warped', warpimage)\n", (2412, 2433), False, 'import cv2\n'), ((2438, 2451), 'cv.WaitKey', 'cv.WaitKey', (['(0)'], {}), '(0)\n', (2448, 2451), False, 'import cv\n'), ((203, 248), 'numpy.zeros', 'np.zeros', (['(imsize, imsize, 3)'], {'dtype': 'np.uint8'}), '((imsize, imsize, 3), dtype=np.uint8)\n', (211, 248), True, 'import numpy as np\n'), ((313, 342), 'cv2.namedWindow', 'cv2.namedWindow', (['self.winname'], {}), '(self.winname)\n', (328, 342), False, 'import cv2\n'), ((351, 406), 'cv.SetMouseCallback', 'cv.SetMouseCallback', (['self.winname', 'self.mouseHandler', '(0)'], {}), '(self.winname, self.mouseHandler, 0)\n', (370, 406), False, 'import cv\n'), ((790, 827), 'cv2.imshow', 'cv2.imshow', (['self.winname', 'self.dispim'], {}), '(self.winname, self.dispim)\n', (800, 827), False, 'import cv2\n'), ((868, 899), 'cv2.destroyWindow', 'cv2.destroyWindow', (['self.winname'], {}), '(self.winname)\n', (885, 899), False, 'import cv2\n'), ((1637, 1653), 'numpy.linalg.inv', 'np.linalg.inv', (['H'], {}), '(H)\n', (1650, 1653), True, 'import numpy as np\n'), ((2016, 2030), 'cv.WaitKey', 'cv.WaitKey', (['(30)'], {}), '(30)\n', (2026, 2030), False, 'import cv\n')] |
""" Analyzing simulations done with FitSim. """
from __future__ import print_function, division, absolute_import
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.widgets import Slider
from wmpl.Config import config
from wmpl.Utils.Pickling import loadPickle
from wmpl.Utils.TrajConversions import jd2Date
from wmpl.MetSim.MetSim import loadInputs
from wmpl.MetSim.FitSim import calcVelocity
# Minimum difference between slider
SLIDER_EPSILON = 0.01
class FitSimAnalyzer(object):
def __init__(self, dir_path_mir, traj_pickle_file):
# Name of input file for meteor parameters
meteor_inputs_file = config.met_sim_input_file
# Load input meteor data
met, consts = loadInputs(meteor_inputs_file)
# Load the pickled trajectory
self.traj = loadPickle(dir_path_mir, traj_pickle_file)
self.results_list = []
self.full_cost_list = []
# Go through all observations
for station_ind, obs in enumerate(self.traj.observations):
# Name of the results file
results_file = jd2Date(self.traj.jdt_ref, dt_obj=True).strftime('%Y%m%d_%H%M%S') + "_" \
+ str(self.traj.observations[station_ind].station_id) + "_simulations.npy"
results_file = os.path.join(dir_path_mir, results_file)
# Add the results file to the results list
self.results_list.append(results_file)
# Take the parameters of the observation with the highest beginning height
obs_time = self.traj.observations[station_ind].time_data
obs_length = self.traj.observations[station_ind].length
# Fit only the first 25% of the observed trajectory
len_part = int(0.25*len(obs_time))
# If the first 25% has less than 4 points, than take the first 4 points
if len_part < 4:
len_part = 4
# Cut the observations to the first part of the trajectory
obs_time = obs_time[:len_part]
obs_length = obs_length[:len_part]
# Calculate observed velocities
velocities, time_diffs = calcVelocity(obs_time, obs_length)
print(velocities)
# Calculate the RMS of velocities
vel_rms = np.sqrt(np.mean((velocities[1:] - self.traj.v_init)**2))
print('Vel RMS:', vel_rms)
# Calculate the along track differences
along_track_diffs = (velocities[1:] - self.traj.v_init)*time_diffs[1:]
# Calculate the full 3D residuals
full_residuals = np.sqrt(along_track_diffs**2 \
+ self.traj.observations[station_ind].v_residuals[:len_part][1:]**2 \
+ self.traj.observations[station_ind].h_residuals[:len_part][1:]**2)
# Calculate the average 3D deviation from the estimated trajectory
full_cost = np.sum(np.abs(np.array(full_residuals)))/len(full_residuals)
self.full_cost_list.append(full_cost)
# Load solutions from a file
self.loadSimulations()
# Initialize the plot framework
self.initGrid()
# Initialize main plots
self.dens_min_init, self.dens_max_init = self.updatePlots(init=True)
self.dens_min = self.dens_min_init
self.dens_max = self.dens_max_init
### SLIDERS
# Sliders for density
self.sl_ind_dev_1 = Slider(self.ax_sl_11, 'Min', self.dens_min, self.dens_max, valinit=self.dens_min)
self.sl_ind_dev_2 = Slider(self.ax_sl_12, 'Max', self.dens_min, self.dens_max, valinit=self.dens_max, slidermin=self.sl_ind_dev_1)
self.ax_sl_12.set_xlabel('Density')
# Turn on slider updating
self.sl_ind_dev_1.on_changed(self.updateSliders)
self.sl_ind_dev_2.on_changed(self.updateSliders)
######
plt.show()
def loadSimulations(self):
""" Load simulation results from a file. """
self.solutions_list = []
for results_file in self.results_list:
# Load the numpy array with the results from a file
solutions = np.load(results_file)
self.solutions_list.append(solutions)
def initGrid(self):
""" Initializes the plot framework. """
### Create grid
# Main gridspec
gs = gridspec.GridSpec(6, 2)
gs.update(hspace=0.5, bottom=0.05, top=0.95, left=0.05, right=0.98)
# Index vs. deviations axes
gs_ind = gridspec.GridSpecFromSubplotSpec(6, 2, subplot_spec=gs[:3, :], wspace=0.0, hspace=2.0)
ax_ind_1 = plt.subplot(gs_ind[:5, 0])
ax_ind_2 = plt.subplot(gs_ind[:5, 1], sharex=ax_ind_1, sharey=ax_ind_1)
# Mass colorbar axis
self.ax_cbar = plt.subplot(gs_ind[5, :])
# Velocity vs. deviations axies
gs_vel = gridspec.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs[3:5, :], wspace=0.0, hspace=0.2)
ax_vel_1 = plt.subplot(gs_vel[0, 0])
ax_vel_2 = plt.subplot(gs_vel[0, 1], sharex=ax_vel_1, sharey=ax_vel_1)
# Disable left tick labels on plots to the right
ax_ind_2.tick_params(labelleft='off')
ax_vel_2.tick_params(labelleft='off')
# Sliders
gs_sl = gridspec.GridSpecFromSubplotSpec(2, 2, subplot_spec=gs[5, :], wspace=0.15, hspace=0.2)
# Slider upper left axis
self.ax_sl_11 = plt.subplot(gs_sl[0, 0])
# Slider lower left axis
self.ax_sl_12 = plt.subplot(gs_sl[1, 0])
# Slider upper right axis
self.ax_sl_21 = plt.subplot(gs_sl[0, 1])
# Slider lower right axis
self.ax_sl_22 = plt.subplot(gs_sl[1, 1])
######
self.axes_cost = [ax_ind_1, ax_ind_2]
self.axes_velocity = [ax_vel_1, ax_vel_2]
def updatePlots(self, init=False):
""" Updates the plots with the given range of densities.
Keyword arguments:
init: [bool] If True, plots will be shown with no constrain on density. False by defualt.
"""
# List of cost plot handles
plot_cost_handles = []
# List of velocity plot handles
plot_velocity_handles = []
dens_min = 10000
dens_max = 0
# Go through all results file
for n, (full_cost, solutions) in enumerate(zip(self.full_cost_list, self.solutions_list)):
# Cost function plots
ax_cost = self.axes_cost[n]
# Velocity plots
ax_vel = self.axes_velocity[n]
# Extract initial velocities
v_init_all = solutions[:, 1]
# Idenfity unique initial velocities
v_init_unique = np.unique(v_init_all)
# List for velocities which are possible under the measurement RMS
v_possible = []
# List of velocities vs. best cost pairs
vel_cost_pairs = []
# Go through initial velocities
for i, v_init in enumerate(v_init_unique):
# Select only those with the given initial velocity
select_ind = np.where(v_init_all == v_init)
# Select the solution by the v init
solutions_v_init = solutions[select_ind]
# Extract densities from the solutions
densities = solutions_v_init[:, 3]
# If the plots are not being initializes, i.e. a constrain on densities was given,
# select only those simulations in between selected densities
if not init:
# Select the solutions only in the range of selected densities
solutions_v_init = solutions_v_init[(densities >= self.dens_min) & (densities <= self.dens_max), :]
else:
# Store the largest and the smallest density
if np.max(densities) > dens_max:
dens_max = np.max(densities)
if np.min(densities) < dens_min:
dens_min = np.min(densities)
# Sort by cost
solutions_v_init[solutions_v_init[:, 0].argsort()]
# Extract costs from the solution
costs = solutions_v_init[:, 0]
vel_cost_pairs.append([v_init, costs[0]])
# Add the velocity to the list if the first cost is below the measurement RMS cost
if costs[0] < full_cost:
v_possible.append(v_init)
# Set text to mark the velocity
ax_cost.text(0, costs[0], str(int(v_init)), ha='right')
# Index vs. cost scatter plot where the color represents the mass
scat_ind_dev = ax_cost.scatter(range(len(costs)), costs, c=solutions_v_init[:, 2], s=((i+1)**2)/2,
norm=matplotlib.colors.LogNorm(), zorder=3)
plot_cost_handles.append(scat_ind_dev)
# Print the range of possible velocities from the simulations and threshold deviations
if v_possible:
v_possible = sorted(v_possible)
print('Site', n+1, 'possible range of velocities:', min(v_possible), max(v_possible))
else:
print('No possible velocities!')
# Plot the cost function values of the RMS of lengths along the track
rms_cost_x = np.linspace(0, len(costs), 1000)
rms_cost_y = np.array([full_cost]*1000)
ax_cost.plot(rms_cost_x, rms_cost_y, label='RMS along track cost', linestyle='--', linewidth=2,
zorder=3)
# Set the Y limit from 0 to 2x the threshold cost
ax_cost.set_ylim(0, 2*full_cost)
ax_cost.set_xlabel('Index')
ax_cost.legend()
ax_cost.set_title(str(n + 1))
ax_cost.grid()
### Plot velocities vs. best cost
vel_cost_pairs = np.array(vel_cost_pairs)
vels, best_costs = vel_cost_pairs.T
plot_vel_dev = ax_vel.plot(vels, best_costs, marker='x', label='Model V')
plot_velocity_handles.append(plot_vel_dev)
# Plot the threshold cost
vel_rms_cost_x = np.linspace(np.min(vels), np.max(vels), 1000)
vel_rms_cost_y = np.zeros_like(vel_rms_cost_x) + full_cost
ax_vel.plot(vel_rms_cost_x, vel_rms_cost_y, linestyle='--', linewidth=2, zorder=3, label='RMS cost')
# Plot the initial velocity from the trajectory solver
v_init_orig_x = np.zeros(10) + self.traj.v_init
v_init_orig_y = np.linspace(0, full_cost, 10)
ax_vel.plot(v_init_orig_x, v_init_orig_y, color='r', zorder=3, label='$V_{init}$')
# Plot the no-atmosphere velocity from the trajectory solver
v_init_orig_x = np.zeros(10) + self.traj.orbit.v_inf
v_init_orig_y = np.linspace(0, full_cost, 10)
ax_vel.plot(v_init_orig_x, v_init_orig_y, color='g', zorder=3, label='$V_{\infty}$')
ax_vel.set_xlabel('Velocity (m/s)')
# Set the Y limit from 0 to 2x the threshold cost
ax_vel.set_ylim(0, 2*full_cost)
ax_vel.grid()
ax_vel.legend()
######
# Extract plot handles
self.plot_ind_dev_1 = plot_cost_handles[0]
self.plot_ind_dev_1 = plot_cost_handles[1]
self.plot_vel_dev_1 = plot_velocity_handles[0]
self.plot_vel_dev_2 = plot_velocity_handles[1]
# Set mass scatter plot labels and colorbar
self.axes_cost[0].set_ylabel('Average absolute deviations (m)')
# Plot the masses colorbar
plt.gcf().colorbar(self.plot_ind_dev_1, label='Mass (kg)', cax=self.ax_cbar, orientation='horizontal')
return dens_min, dens_max
def clearPlots(self):
""" Clears all axes. """
for ax_cost in self.axes_cost:
ax_cost.cla()
for ax_vel in self.axes_velocity:
ax_vel.cla()
self.ax_cbar.cla()
def updateSliders(self, val):
""" Update slider values. """
# Get slider values
self.dens_min = self.sl_ind_dev_1.val
self.dens_max = self.sl_ind_dev_2.val
# Make sure the sliders do not go beyond one another
if self.dens_min > self.dens_max - SLIDER_EPSILON:
self.sl_ind_dev_1.set_val(self.dens_max - SLIDER_EPSILON)
if self.dens_max < self.dens_min + SLIDER_EPSILON:
self.sl_ind_dev_2.set_val(self.dens_min + SLIDER_EPSILON)
# Get slider values
self.dens_min = self.sl_ind_dev_1.val
self.dens_max = self.sl_ind_dev_2.val
# Clear plots
self.clearPlots()
# Update plots with the given density range
self.updatePlots()
if __name__ == "__main__":
# dir_path_mir = "../MirfitPrepare/20160929_062945_mir/"
#dir_path_mir = "../MirfitPrepare/20161007_052346_mir/"
dir_path_mir = "../MirfitPrepare/20161007_052749_mir/"
# Trajectory pickle file
#traj_pickle_file = "20160929_062945_trajectory.pickle"
#traj_pickle_file = "20161007_052346_trajectory.pickle"
traj_pickle_file = "20161007_052749_trajectory.pickle"
FitSimAnalyzer(dir_path_mir, traj_pickle_file) | [
"numpy.sqrt",
"wmpl.Utils.Pickling.loadPickle",
"numpy.array",
"matplotlib.gridspec.GridSpecFromSubplotSpec",
"matplotlib.widgets.Slider",
"matplotlib.colors.LogNorm",
"numpy.mean",
"numpy.where",
"wmpl.MetSim.FitSim.calcVelocity",
"numpy.max",
"matplotlib.gridspec.GridSpec",
"numpy.linspace",... | [((796, 826), 'wmpl.MetSim.MetSim.loadInputs', 'loadInputs', (['meteor_inputs_file'], {}), '(meteor_inputs_file)\n', (806, 826), False, 'from wmpl.MetSim.MetSim import loadInputs\n'), ((887, 929), 'wmpl.Utils.Pickling.loadPickle', 'loadPickle', (['dir_path_mir', 'traj_pickle_file'], {}), '(dir_path_mir, traj_pickle_file)\n', (897, 929), False, 'from wmpl.Utils.Pickling import loadPickle\n'), ((3537, 3623), 'matplotlib.widgets.Slider', 'Slider', (['self.ax_sl_11', '"""Min"""', 'self.dens_min', 'self.dens_max'], {'valinit': 'self.dens_min'}), "(self.ax_sl_11, 'Min', self.dens_min, self.dens_max, valinit=self.\n dens_min)\n", (3543, 3623), False, 'from matplotlib.widgets import Slider\n'), ((3647, 3762), 'matplotlib.widgets.Slider', 'Slider', (['self.ax_sl_12', '"""Max"""', 'self.dens_min', 'self.dens_max'], {'valinit': 'self.dens_max', 'slidermin': 'self.sl_ind_dev_1'}), "(self.ax_sl_12, 'Max', self.dens_min, self.dens_max, valinit=self.\n dens_max, slidermin=self.sl_ind_dev_1)\n", (3653, 3762), False, 'from matplotlib.widgets import Slider\n'), ((3980, 3990), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3988, 3990), True, 'import matplotlib.pyplot as plt\n'), ((4460, 4483), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(6)', '(2)'], {}), '(6, 2)\n', (4477, 4483), True, 'import matplotlib.gridspec as gridspec\n'), ((4614, 4704), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(6)', '(2)'], {'subplot_spec': 'gs[:3, :]', 'wspace': '(0.0)', 'hspace': '(2.0)'}), '(6, 2, subplot_spec=gs[:3, :], wspace=0.0,\n hspace=2.0)\n', (4646, 4704), True, 'import matplotlib.gridspec as gridspec\n'), ((4720, 4746), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs_ind[:5, 0]'], {}), '(gs_ind[:5, 0])\n', (4731, 4746), True, 'import matplotlib.pyplot as plt\n'), ((4766, 4826), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs_ind[:5, 1]'], {'sharex': 'ax_ind_1', 'sharey': 'ax_ind_1'}), '(gs_ind[:5, 1], sharex=ax_ind_1, sharey=ax_ind_1)\n', (4777, 4826), True, 'import matplotlib.pyplot as plt\n'), ((4880, 4905), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs_ind[5, :]'], {}), '(gs_ind[5, :])\n', (4891, 4905), True, 'import matplotlib.pyplot as plt\n'), ((4964, 5055), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(1)', '(2)'], {'subplot_spec': 'gs[3:5, :]', 'wspace': '(0.0)', 'hspace': '(0.2)'}), '(1, 2, subplot_spec=gs[3:5, :], wspace=0.0,\n hspace=0.2)\n', (4996, 5055), True, 'import matplotlib.gridspec as gridspec\n'), ((5071, 5096), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs_vel[0, 0]'], {}), '(gs_vel[0, 0])\n', (5082, 5096), True, 'import matplotlib.pyplot as plt\n'), ((5116, 5175), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs_vel[0, 1]'], {'sharex': 'ax_vel_1', 'sharey': 'ax_vel_1'}), '(gs_vel[0, 1], sharex=ax_vel_1, sharey=ax_vel_1)\n', (5127, 5175), True, 'import matplotlib.pyplot as plt\n'), ((5363, 5453), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(2)', '(2)'], {'subplot_spec': 'gs[5, :]', 'wspace': '(0.15)', 'hspace': '(0.2)'}), '(2, 2, subplot_spec=gs[5, :], wspace=0.15,\n hspace=0.2)\n', (5395, 5453), True, 'import matplotlib.gridspec as gridspec\n'), ((5508, 5532), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs_sl[0, 0]'], {}), '(gs_sl[0, 0])\n', (5519, 5532), True, 'import matplotlib.pyplot as plt\n'), ((5591, 5615), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs_sl[1, 0]'], {}), '(gs_sl[1, 0])\n', (5602, 5615), True, 'import matplotlib.pyplot as plt\n'), ((5675, 5699), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs_sl[0, 1]'], {}), '(gs_sl[0, 1])\n', (5686, 5699), True, 'import matplotlib.pyplot as plt\n'), ((5759, 5783), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs_sl[1, 1]'], {}), '(gs_sl[1, 1])\n', (5770, 5783), True, 'import matplotlib.pyplot as plt\n'), ((1362, 1402), 'os.path.join', 'os.path.join', (['dir_path_mir', 'results_file'], {}), '(dir_path_mir, results_file)\n', (1374, 1402), False, 'import os\n'), ((2261, 2295), 'wmpl.MetSim.FitSim.calcVelocity', 'calcVelocity', (['obs_time', 'obs_length'], {}), '(obs_time, obs_length)\n', (2273, 2295), False, 'from wmpl.MetSim.FitSim import calcVelocity\n'), ((2704, 2885), 'numpy.sqrt', 'np.sqrt', (['(along_track_diffs ** 2 + self.traj.observations[station_ind].v_residuals[:\n len_part][1:] ** 2 + self.traj.observations[station_ind].h_residuals[:\n len_part][1:] ** 2)'], {}), '(along_track_diffs ** 2 + self.traj.observations[station_ind].\n v_residuals[:len_part][1:] ** 2 + self.traj.observations[station_ind].\n h_residuals[:len_part][1:] ** 2)\n', (2711, 2885), True, 'import numpy as np\n'), ((4249, 4270), 'numpy.load', 'np.load', (['results_file'], {}), '(results_file)\n', (4256, 4270), True, 'import numpy as np\n'), ((6795, 6816), 'numpy.unique', 'np.unique', (['v_init_all'], {}), '(v_init_all)\n', (6804, 6816), True, 'import numpy as np\n'), ((9571, 9599), 'numpy.array', 'np.array', (['([full_cost] * 1000)'], {}), '([full_cost] * 1000)\n', (9579, 9599), True, 'import numpy as np\n'), ((10060, 10084), 'numpy.array', 'np.array', (['vel_cost_pairs'], {}), '(vel_cost_pairs)\n', (10068, 10084), True, 'import numpy as np\n'), ((10730, 10759), 'numpy.linspace', 'np.linspace', (['(0)', 'full_cost', '(10)'], {}), '(0, full_cost, 10)\n', (10741, 10759), True, 'import numpy as np\n'), ((11022, 11051), 'numpy.linspace', 'np.linspace', (['(0)', 'full_cost', '(10)'], {}), '(0, full_cost, 10)\n', (11033, 11051), True, 'import numpy as np\n'), ((2403, 2452), 'numpy.mean', 'np.mean', (['((velocities[1:] - self.traj.v_init) ** 2)'], {}), '((velocities[1:] - self.traj.v_init) ** 2)\n', (2410, 2452), True, 'import numpy as np\n'), ((7211, 7241), 'numpy.where', 'np.where', (['(v_init_all == v_init)'], {}), '(v_init_all == v_init)\n', (7219, 7241), True, 'import numpy as np\n'), ((10356, 10368), 'numpy.min', 'np.min', (['vels'], {}), '(vels)\n', (10362, 10368), True, 'import numpy as np\n'), ((10370, 10382), 'numpy.max', 'np.max', (['vels'], {}), '(vels)\n', (10376, 10382), True, 'import numpy as np\n'), ((10419, 10448), 'numpy.zeros_like', 'np.zeros_like', (['vel_rms_cost_x'], {}), '(vel_rms_cost_x)\n', (10432, 10448), True, 'import numpy as np\n'), ((10670, 10682), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (10678, 10682), True, 'import numpy as np\n'), ((10957, 10969), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (10965, 10969), True, 'import numpy as np\n'), ((11796, 11805), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (11803, 11805), True, 'import matplotlib.pyplot as plt\n'), ((3024, 3048), 'numpy.array', 'np.array', (['full_residuals'], {}), '(full_residuals)\n', (3032, 3048), True, 'import numpy as np\n'), ((7983, 8000), 'numpy.max', 'np.max', (['densities'], {}), '(densities)\n', (7989, 8000), True, 'import numpy as np\n'), ((8048, 8065), 'numpy.max', 'np.max', (['densities'], {}), '(densities)\n', (8054, 8065), True, 'import numpy as np\n'), ((8090, 8107), 'numpy.min', 'np.min', (['densities'], {}), '(densities)\n', (8096, 8107), True, 'import numpy as np\n'), ((8155, 8172), 'numpy.min', 'np.min', (['densities'], {}), '(densities)\n', (8161, 8172), True, 'import numpy as np\n'), ((8964, 8991), 'matplotlib.colors.LogNorm', 'matplotlib.colors.LogNorm', ([], {}), '()\n', (8989, 8991), False, 'import matplotlib\n'), ((1169, 1208), 'wmpl.Utils.TrajConversions.jd2Date', 'jd2Date', (['self.traj.jdt_ref'], {'dt_obj': '(True)'}), '(self.traj.jdt_ref, dt_obj=True)\n', (1176, 1208), False, 'from wmpl.Utils.TrajConversions import jd2Date\n')] |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from matplotlib import cm
nnodes = 5;
nvars = 1024;
nsteps = 100;
path = './npy/'
dat = np.zeros([nnodes*nsteps,nvars]);
x = np.linspace(0,32*np.pi,nvars)
#plt.figure()
#dat = np.zeros([nvars])
#fn = path+'uexact.npy'
#dat = np.load(fn)
#plt.plot(x,dat)
plt.figure()
dat = np.zeros([nsteps+1,nvars]);
counter = 0
for step in range(0,nsteps+1):
# for m in range(1,nnodes+1):
#fn=path+'ys'+str(step).zfill(4)+'m' + str(m).zfill(2) + '.npy'
fn=path+'ys'+str(step).zfill(4) + '.npy'
dat[counter,:] = np.load(fn);
counter = counter + 1
plt.imshow(dat,extent=[0,101,0,60],aspect='auto',origin='lower',interpolation='none')
plt.colorbar()
plt.xlabel('x')
plt.ylabel('t')
plt.title('state')
plt.figure()
dat = np.zeros([nsteps*nnodes,nvars]);
counter = 0
for step in range(1,nsteps+1):
for m in range(1,nnodes+1):
fn=path+'ytargets'+str(step).zfill(4)+'m' + str(m).zfill(2) + '.npy'
#fn=path+'ys'+str(step).zfill(4) + '.npy'
dat[counter,:] = np.load(fn);
counter = counter + 1
plt.imshow(dat,extent=[0,101,0,60],aspect='auto',origin='lower',interpolation='none')
plt.colorbar()
plt.xlabel('x')
plt.ylabel('t')
plt.title('target state')
#plt.figure()
#plt.plot(dat[:,15])
#plt.title('state at node 15')
#plt.figure()
#plt.plot(dat[:,16])
#plt.title('state at node 16')
#plt.figure()
#plt.plot(dat[:,-1])
#plt.title('state at node 1024')
plt.figure()
#plt.plot(dat[-1,:])
datuex = np.zeros([nvars])
fn = path+'uexact.npy'
datuex = np.load(fn)
plt.plot(datuex, label='exact')
#plt.figure()
datu = np.zeros([nvars])
fn = path+'uk0001.npy'
datu = np.load(fn)
plt.plot(datu, label='initial')
#plt.figure()
datu = np.zeros([nvars])
fn = path+'ufinal.npy'
datu = np.load(fn)
plt.plot(datu, label='final')
plt.legend(loc='upper right')
plt.title('controls')
plt.figure()
plt.plot(datu-datuex)
plt.title('diff computed - exact control')
plt.figure()
fn = path+'gradientk0001.npy'
datu = np.load(fn)
#plt.plot(datu,label='it 1')
fn = path+'gradientk0005.npy'
datu = np.load(fn)
plt.plot(datu,label='it 5')
fn = path+'gradientk0020.npy'
datu = np.load(fn)
plt.plot(datu,label='it 20')
fn = path+'gradientk0060.npy'
datu = np.load(fn)
plt.plot(datu,label='it 60')
plt.legend(loc='upper left')
plt.title('gradients')
plt.title('gradients')
plt.show()
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.plot",
"numpy.zeros",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.load",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"... | [((175, 209), 'numpy.zeros', 'np.zeros', (['[nnodes * nsteps, nvars]'], {}), '([nnodes * nsteps, nvars])\n', (183, 209), True, 'import numpy as np\n'), ((213, 246), 'numpy.linspace', 'np.linspace', (['(0)', '(32 * np.pi)', 'nvars'], {}), '(0, 32 * np.pi, nvars)\n', (224, 246), True, 'import numpy as np\n'), ((344, 356), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (354, 356), True, 'import matplotlib.pyplot as plt\n'), ((364, 393), 'numpy.zeros', 'np.zeros', (['[nsteps + 1, nvars]'], {}), '([nsteps + 1, nvars])\n', (372, 393), True, 'import numpy as np\n'), ((636, 732), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dat'], {'extent': '[0, 101, 0, 60]', 'aspect': '"""auto"""', 'origin': '"""lower"""', 'interpolation': '"""none"""'}), "(dat, extent=[0, 101, 0, 60], aspect='auto', origin='lower',\n interpolation='none')\n", (646, 732), True, 'import matplotlib.pyplot as plt\n'), ((722, 736), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (734, 736), True, 'import matplotlib.pyplot as plt\n'), ((737, 752), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (747, 752), True, 'import matplotlib.pyplot as plt\n'), ((753, 768), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""t"""'], {}), "('t')\n", (763, 768), True, 'import matplotlib.pyplot as plt\n'), ((769, 787), 'matplotlib.pyplot.title', 'plt.title', (['"""state"""'], {}), "('state')\n", (778, 787), True, 'import matplotlib.pyplot as plt\n'), ((789, 801), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (799, 801), True, 'import matplotlib.pyplot as plt\n'), ((808, 842), 'numpy.zeros', 'np.zeros', (['[nsteps * nnodes, nvars]'], {}), '([nsteps * nnodes, nvars])\n', (816, 842), True, 'import numpy as np\n'), ((1088, 1184), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dat'], {'extent': '[0, 101, 0, 60]', 'aspect': '"""auto"""', 'origin': '"""lower"""', 'interpolation': '"""none"""'}), "(dat, extent=[0, 101, 0, 60], aspect='auto', origin='lower',\n interpolation='none')\n", (1098, 1184), True, 'import matplotlib.pyplot as plt\n'), ((1174, 1188), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1186, 1188), True, 'import matplotlib.pyplot as plt\n'), ((1189, 1204), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1199, 1204), True, 'import matplotlib.pyplot as plt\n'), ((1205, 1220), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""t"""'], {}), "('t')\n", (1215, 1220), True, 'import matplotlib.pyplot as plt\n'), ((1221, 1246), 'matplotlib.pyplot.title', 'plt.title', (['"""target state"""'], {}), "('target state')\n", (1230, 1246), True, 'import matplotlib.pyplot as plt\n'), ((1452, 1464), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1462, 1464), True, 'import matplotlib.pyplot as plt\n'), ((1496, 1513), 'numpy.zeros', 'np.zeros', (['[nvars]'], {}), '([nvars])\n', (1504, 1513), True, 'import numpy as np\n'), ((1546, 1557), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1553, 1557), True, 'import numpy as np\n'), ((1558, 1589), 'matplotlib.pyplot.plot', 'plt.plot', (['datuex'], {'label': '"""exact"""'}), "(datuex, label='exact')\n", (1566, 1589), True, 'import matplotlib.pyplot as plt\n'), ((1613, 1630), 'numpy.zeros', 'np.zeros', (['[nvars]'], {}), '([nvars])\n', (1621, 1630), True, 'import numpy as np\n'), ((1661, 1672), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1668, 1672), True, 'import numpy as np\n'), ((1673, 1704), 'matplotlib.pyplot.plot', 'plt.plot', (['datu'], {'label': '"""initial"""'}), "(datu, label='initial')\n", (1681, 1704), True, 'import matplotlib.pyplot as plt\n'), ((1728, 1745), 'numpy.zeros', 'np.zeros', (['[nvars]'], {}), '([nvars])\n', (1736, 1745), True, 'import numpy as np\n'), ((1776, 1787), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1783, 1787), True, 'import numpy as np\n'), ((1788, 1817), 'matplotlib.pyplot.plot', 'plt.plot', (['datu'], {'label': '"""final"""'}), "(datu, label='final')\n", (1796, 1817), True, 'import matplotlib.pyplot as plt\n'), ((1818, 1847), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (1828, 1847), True, 'import matplotlib.pyplot as plt\n'), ((1848, 1869), 'matplotlib.pyplot.title', 'plt.title', (['"""controls"""'], {}), "('controls')\n", (1857, 1869), True, 'import matplotlib.pyplot as plt\n'), ((1871, 1883), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1881, 1883), True, 'import matplotlib.pyplot as plt\n'), ((1884, 1907), 'matplotlib.pyplot.plot', 'plt.plot', (['(datu - datuex)'], {}), '(datu - datuex)\n', (1892, 1907), True, 'import matplotlib.pyplot as plt\n'), ((1906, 1948), 'matplotlib.pyplot.title', 'plt.title', (['"""diff computed - exact control"""'], {}), "('diff computed - exact control')\n", (1915, 1948), True, 'import matplotlib.pyplot as plt\n'), ((1951, 1963), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1961, 1963), True, 'import matplotlib.pyplot as plt\n'), ((2001, 2012), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (2008, 2012), True, 'import numpy as np\n'), ((2079, 2090), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (2086, 2090), True, 'import numpy as np\n'), ((2091, 2119), 'matplotlib.pyplot.plot', 'plt.plot', (['datu'], {'label': '"""it 5"""'}), "(datu, label='it 5')\n", (2099, 2119), True, 'import matplotlib.pyplot as plt\n'), ((2156, 2167), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (2163, 2167), True, 'import numpy as np\n'), ((2168, 2197), 'matplotlib.pyplot.plot', 'plt.plot', (['datu'], {'label': '"""it 20"""'}), "(datu, label='it 20')\n", (2176, 2197), True, 'import matplotlib.pyplot as plt\n'), ((2234, 2245), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (2241, 2245), True, 'import numpy as np\n'), ((2246, 2275), 'matplotlib.pyplot.plot', 'plt.plot', (['datu'], {'label': '"""it 60"""'}), "(datu, label='it 60')\n", (2254, 2275), True, 'import matplotlib.pyplot as plt\n'), ((2275, 2303), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (2285, 2303), True, 'import matplotlib.pyplot as plt\n'), ((2304, 2326), 'matplotlib.pyplot.title', 'plt.title', (['"""gradients"""'], {}), "('gradients')\n", (2313, 2326), True, 'import matplotlib.pyplot as plt\n'), ((2329, 2351), 'matplotlib.pyplot.title', 'plt.title', (['"""gradients"""'], {}), "('gradients')\n", (2338, 2351), True, 'import matplotlib.pyplot as plt\n'), ((2354, 2364), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2362, 2364), True, 'import matplotlib.pyplot as plt\n'), ((597, 608), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (604, 608), True, 'import numpy as np\n'), ((1050, 1061), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1057, 1061), True, 'import numpy as np\n')] |
import numpy as np
from numba import jit
from .utils import ConfidenceModel, print_verbose, GPModel
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import ExtraTreeRegressor
from sklearn.linear_model import LinearRegression
@jit(nopython=True)
def get_random_candidate(number, dimensionality, non_zero):
"""
Creates random candidates
Arguments:
number {int} -- Number of random candidates
Returns:
Array of weights for kernel combination (dimension: number * dimensionality)
"""
epsilon = 0.001
weights = np.zeros((number, dimensionality))
for i in range(number):
number_components = np.random.randint(2, non_zero + 1)
indices = np.random.choice(dimensionality, number_components, replace = False)
weights_indices = [np.random.uniform(epsilon, 1) for k in indices]
for j, w in zip(indices, weights_indices):
weights[i, j] = w
return weights
class CombinationKernelOptimizer:
"""
Wrapper for optimizer
This object is abstract and should be implemented
"""
@classmethod
def create(cls, method, dimensionality, iteration = 1000, init_candidates = [], **args):
"""
Optimizer factory
"""
if method == "random":
return CombinationKernelOptimizer(dimensionality = dimensionality, iteration = iteration, random_init = iteration - len(init_candidates), init_candidates = init_candidates, **args)
elif method == "model":
return ModelGuidedOptimization(dimensionality = dimensionality, iteration = iteration, init_candidates = init_candidates, **args)
elif method == "gp":
return ModelGuidedOptimization(dimensionality = dimensionality, iteration = iteration, init_candidates = init_candidates, model = GPModel(dimensionality), **args)
else:
print("Optimizer unknown")
def __init__(self, objective_function, dimensionality, iteration = 1000,
init_candidates = [], random_init = 100, non_zero = 5, verbose = 0, **args):
"""
Wrapper for optimizer initialization
Arguments:
objective_function {func} -- Function that evaluates the value
dimensionality {int} -- Dimensionality of the candidates for optimization
iteration {int} -- Number of iteration to use
init_candidates {Array} -- Initial candidates to use
non_zero {int} -- Maximum non zeros kernels in the combination
"""
assert len(init_candidates) + random_init <= iteration, "No optimizer needed"
self.objective_function = objective_function
self.dimensionality = dimensionality
self.iteration = iteration
self.non_zero = non_zero
self.verbose = verbose
self.candidates = np.zeros((iteration, dimensionality))
self.scores = np.zeros(iteration)
# Add random points
if random_init > 0:
self.candidates[:random_init] = get_random_candidate(random_init, dimensionality, non_zero)
self.scores[:random_init] = [objective_function(c) for c in self.candidates[:random_init]]
# Add initial points
for i, candidate in enumerate(init_candidates):
self.candidates[random_init + i] = candidate
self.scores[random_init + i] = objective_function(candidate)
# Next eval should start at this level
self.random_init = random_init + len(init_candidates)
def run_optimization(self):
"""
Runs the optimization and returns the best candidate
"""
return self.get_best_candidate()
def get_best_candidate(self):
"""
Returns best candidate
"""
best = np.nanargmax(self.scores)
if best <= self.random_init:
print_verbose("Best solution not obtained after optimization", self.verbose, level = 1)
return self.candidates[best]
class ModelGuidedOptimization(CombinationKernelOptimizer):
"""
Explores the space of possible candidates guided by the prediction of a model
"""
def __init__(self, objective_function, dimensionality, iteration = 1000,
init_candidates = [], random_init = 100, non_zero = 5, verbose = 0,
model = None, acquisition_evals = 1000, exploration = 0.1, base_model = LinearRegression()):
"""
Initialize model for evaluation
Keyword Arguments:
model {ConfidenceModel} -- Model which estimated confidence (default: {None})
random_init {int} -- Number of random initialization (default: {10})
acquisition_evals {int} -- Number of estimation (default: {10000})
exploration {float} -- Parameter controlling exploration for UCB (Higher bound has more weight)
"""
CombinationKernelOptimizer.__init__(self, objective_function, dimensionality, iteration, init_candidates, random_init, non_zero, verbose)
if model is None:
self.model = ConfidenceModel(
BaggingRegressor(base_model, n_estimators = 150),
acquisition_evals
)
else:
self.model = model
self.acquisition_evals = acquisition_evals
self.exploration = exploration
def run_optimization(self):
"""
Explores the space of possible values given a model predicting the
estimated performances at this point in the space of possible values
"""
for step in range(self.random_init, self.iteration):
# Fit model on previous evaluation
self.model.fit(self.candidates[:step], self.scores[:step])
# Evaluate random sample
potential_candidates = get_random_candidate(self.acquisition_evals, self.dimensionality, self.non_zero)
predictions, confidence = self.model.predict_confidence(potential_candidates)
# Compute the best candidate
if step < self.iteration - 1:
predictions += self.exploration * confidence
index_candidate = np.argmax(predictions)
self.candidates[step] = potential_candidates[index_candidate]
self.scores[step] = self.objective_function(potential_candidates[index_candidate])
print_verbose("Step {} - KTA {:.5f}".format(step, self.scores[step]), self.verbose, level = 1)
if self.scores[step] == 1:
print_verbose("Optimal solution obtained", self.verbose, level = 1)
break
return self.get_best_candidate()
| [
"numpy.nanargmax",
"numpy.random.choice",
"numpy.argmax",
"numpy.zeros",
"numba.jit",
"numpy.random.randint",
"numpy.random.uniform",
"sklearn.ensemble.BaggingRegressor",
"sklearn.linear_model.LinearRegression"
] | [((242, 260), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (245, 260), False, 'from numba import jit\n'), ((604, 638), 'numpy.zeros', 'np.zeros', (['(number, dimensionality)'], {}), '((number, dimensionality))\n', (612, 638), True, 'import numpy as np\n'), ((700, 734), 'numpy.random.randint', 'np.random.randint', (['(2)', '(non_zero + 1)'], {}), '(2, non_zero + 1)\n', (717, 734), True, 'import numpy as np\n'), ((753, 819), 'numpy.random.choice', 'np.random.choice', (['dimensionality', 'number_components'], {'replace': '(False)'}), '(dimensionality, number_components, replace=False)\n', (769, 819), True, 'import numpy as np\n'), ((2940, 2977), 'numpy.zeros', 'np.zeros', (['(iteration, dimensionality)'], {}), '((iteration, dimensionality))\n', (2948, 2977), True, 'import numpy as np\n'), ((3000, 3019), 'numpy.zeros', 'np.zeros', (['iteration'], {}), '(iteration)\n', (3008, 3019), True, 'import numpy as np\n'), ((3922, 3947), 'numpy.nanargmax', 'np.nanargmax', (['self.scores'], {}), '(self.scores)\n', (3934, 3947), True, 'import numpy as np\n'), ((4540, 4558), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (4556, 4558), False, 'from sklearn.linear_model import LinearRegression\n'), ((849, 878), 'numpy.random.uniform', 'np.random.uniform', (['epsilon', '(1)'], {}), '(epsilon, 1)\n', (866, 878), True, 'import numpy as np\n'), ((6393, 6415), 'numpy.argmax', 'np.argmax', (['predictions'], {}), '(predictions)\n', (6402, 6415), True, 'import numpy as np\n'), ((5285, 5331), 'sklearn.ensemble.BaggingRegressor', 'BaggingRegressor', (['base_model'], {'n_estimators': '(150)'}), '(base_model, n_estimators=150)\n', (5301, 5331), False, 'from sklearn.ensemble import BaggingRegressor\n')] |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Converters to flip problem sense, e.g. maximization to minimization and vice versa."""
import copy
from typing import Optional, List, Union
import numpy as np
from .quadratic_program_converter import QuadraticProgramConverter
from ..exceptions import QiskitOptimizationError
from ..problems.quadratic_objective import ObjSense
from ..problems.quadratic_program import QuadraticProgram
class _FlipProblemSense(QuadraticProgramConverter):
"""Flip the sense of a problem, e.g. converts from maximization to minimization and
vice versa, regardless of the current sense."""
def __init__(self) -> None:
self._src_num_vars: Optional[int] = None
def convert(self, problem: QuadraticProgram) -> QuadraticProgram:
"""Flip the sense of a problem.
Args:
problem: The problem to be flipped.
Returns:
A converted problem, that has the flipped sense.
"""
# copy original number of variables as reference.
self._src_num_vars = problem.get_num_vars()
desired_sense = self._get_desired_sense(problem)
# flip the problem sense
if problem.objective.sense != desired_sense:
desired_problem = copy.deepcopy(problem)
desired_problem.objective.sense = desired_sense
desired_problem.objective.constant = (-1) * problem.objective.constant
desired_problem.objective.linear = (-1) * problem.objective.linear.coefficients
desired_problem.objective.quadratic = (-1) * problem.objective.quadratic.coefficients
else:
desired_problem = problem
return desired_problem
def _get_desired_sense(self, problem: QuadraticProgram) -> ObjSense:
"""
Computes a desired sense of the problem. By default, flip the sense.
Args:
problem: a problem to check
Returns:
A desired sense, if the problem was a minimization problem, then the sense is
maximization and vice versa.
"""
if problem.objective.sense == ObjSense.MAXIMIZE:
return ObjSense.MINIMIZE
else:
return ObjSense.MAXIMIZE
def interpret(self, x: Union[np.ndarray, List[float]]) -> np.ndarray:
"""Convert the result of the converted problem back to that of the original problem.
Args:
x: The result of the converted problem or the given result in case of FAILURE.
Returns:
The result of the original problem.
Raises:
QiskitOptimizationError: if the number of variables in the result differs from
that of the original problem.
"""
if len(x) != self._src_num_vars:
raise QiskitOptimizationError(
f"The number of variables in the passed result differs from "
f"that of the original problem, should be {self._src_num_vars}, but got {len(x)}."
)
return np.asarray(x)
class MaximizeToMinimize(_FlipProblemSense):
"""Convert a maximization problem to a minimization problem only if it is a maximization
problem, otherwise problem's sense is unchanged."""
def _get_desired_sense(self, problem: QuadraticProgram) -> ObjSense:
return ObjSense.MINIMIZE
class MinimizeToMaximize(_FlipProblemSense):
"""Convert a minimization problem to a maximization problem only if it is a minimization
problem, otherwise problem's sense is unchanged."""
def _get_desired_sense(self, problem: QuadraticProgram) -> ObjSense:
return ObjSense.MAXIMIZE
| [
"numpy.asarray",
"copy.deepcopy"
] | [((3476, 3489), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (3486, 3489), True, 'import numpy as np\n'), ((1695, 1717), 'copy.deepcopy', 'copy.deepcopy', (['problem'], {}), '(problem)\n', (1708, 1717), False, 'import copy\n')] |
import numpy as np
import scipy.spatial
import skimage.draw
import torch
from torchvision import io
import face_alignment
import matplotlib.pyplot as plt
def interpolate_from_landmarks(image, landmarks, vertex_indices=None, weights=None, mask=None):
H, W = image.shape[-2:]
step = 4
rect = landmarks.new_tensor([[0, 0], [H, 0], [0, W], [H, W]])
vertices = torch.cat([landmarks, rect], dim=0)
vertices_cpu = vertices.cpu().detach()
if vertex_indices is None:
delaunay = scipy.spatial.Delaunay(vertices_cpu)
triangles = delaunay.simplices
facet_map = np.full([H, W], -1, dtype=np.int32)
for index, triangle in enumerate(triangles):
points = vertices_cpu[triangle]
rr, cc = skimage.draw.polygon(points[:,0], points[:,1], [H - 1, W - 1])
facet_map[rr, cc] = index
facet_map = torch.from_numpy(facet_map).long().to(image.device)
triangles = torch.from_numpy(triangles).long().to(image.device)
grid0, grid1 = torch.meshgrid(torch.arange(H), torch.arange(W))
grids = torch.stack([grid0, grid1], dim=-1).to(image.device)
valid = facet_map >= 0
if mask is not None:
valid = valid & mask
facet_map = facet_map[valid]
grids = grids[valid]
N = len(facet_map)
# N -> N x 1 x 3
facet_map = facet_map[..., None, None].expand(-1, 1, 3)
# F x 3 -> N x F x 3
expanded = triangles[None, ...].expand(N, -1, -1)
# N x 1 x 3
vertex_indices = torch.gather(expanded, dim=1, index=facet_map)
# N x 1 x 3 -> N x 3
vertex_indices = vertex_indices.squeeze(1)
else:
assert mask is None
N = len(vertex_indices)
# N x 3 -> N x 3 x 2
expanded = vertex_indices[..., None].expand(-1, -1, 2)
# V x 2 -> N x V x 2
vertices = vertices[None, ...].expand(N, -1, -1)
# N x 3 x 2
vertices = torch.gather(vertices, dim=1, index=expanded)
if weights is None:
with torch.no_grad():
# https://gamedev.stackexchange.com/questions/23743/whats-the-most-efficient-way-to-find-barycentric-coordinates/63203#63203
v0 = vertices[:, 1, :] - vertices[:, 0, :]
v1 = vertices[:, 2, :] - vertices[:, 0, :]
v2 = grids - vertices[:, 0, :]
den = v0[:, 1] * v1[:, 0] - v1[:, 1] * v0[:, 0]
v = (v2[:, 1] * v1[:, 0] - v1[:, 1] * v2[:, 0]) / den
w = (v0[:, 1] * v2[:, 0] - v2[:, 1] * v0[:, 0]) / den
u = 1. - v - w
weights = torch.stack([u, v, w], dim=-1)
interpolated = (vertices * weights.unsqueeze(-1)).sum(dim=1)
if False:
if not hasattr(interpolate_from_landmarks, 'triangles'):
interpolate_from_landmarks.triangles = triangles
if not hasattr(interpolate_from_landmarks, 'save_index'):
interpolate_from_landmarks.save_index = 0
f = plt.figure(figsize=(3, 3))
plt.imshow(image.permute(1, 2, 0).cpu().detach())
for index, triangle in enumerate(interpolate_from_landmarks.triangles):
points = vertices_cpu[triangle]
points = points - 0.5
plt.plot(points[:, 1], points[:, 0], c='g')
# mask = facet_map[:, 0, 0] == 100
# vertices = vertices[mask].cpu().detach()
# interpolated = interpolated[mask].cpu().detach()
np.random.seed(12)
selected = np.random.choice(len(interpolated), 100)
selected = interpolated[selected, :].cpu().detach()
selected = selected - 0.5
# plt.figure(figsize=(16, 12))
# plt.imshow(image.permute(1, 2, 0).cpu().detach())
plt.scatter(x=selected[:, 1], y=selected[:, 0], c='r')
# plt.scatter(x=vertices[0, :, 1], y=vertices[0, :, 0], c='g')
f.gca().set_axis_off()
f.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
plt.margins(0, 0)
f.gca().xaxis.set_major_locator(plt.NullLocator())
f.gca().yaxis.set_major_locator(plt.NullLocator())
plt.show()
f.savefig(f'/opt_/cephfs_workspace/gpudisk/libo427/workspace/attractive/figs/blend_{interpolate_from_landmarks.save_index}.pdf',
bbox_inches='tight', pad_inches=0)
interpolate_from_landmarks.save_index += 1
# N x 2, N x 3, N x 3
return interpolated, vertex_indices, weights
if __name__ == '__main__':
image = io.read_image('research/jackiechan.png')
im = image.permute(1, 2, 0).numpy()
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False)
landmarks = fa.get_landmarks(im)[0]
landmarks = torch.from_numpy(landmarks).flip(dims=(-1,))
interpolated, vertex_indices, weights = interpolate_from_landmarks(image, landmarks)
import pdb; pdb.set_trace()
| [
"torch.stack",
"face_alignment.FaceAlignment",
"torchvision.io.read_image",
"matplotlib.pyplot.margins",
"matplotlib.pyplot.plot",
"torch.from_numpy",
"torch.no_grad",
"matplotlib.pyplot.figure",
"torch.arange",
"numpy.random.seed",
"pdb.set_trace",
"matplotlib.pyplot.scatter",
"numpy.full",... | [((376, 411), 'torch.cat', 'torch.cat', (['[landmarks, rect]'], {'dim': '(0)'}), '([landmarks, rect], dim=0)\n', (385, 411), False, 'import torch\n'), ((1949, 1994), 'torch.gather', 'torch.gather', (['vertices'], {'dim': '(1)', 'index': 'expanded'}), '(vertices, dim=1, index=expanded)\n', (1961, 1994), False, 'import torch\n'), ((4455, 4495), 'torchvision.io.read_image', 'io.read_image', (['"""research/jackiechan.png"""'], {}), "('research/jackiechan.png')\n", (4468, 4495), False, 'from torchvision import io\n'), ((4550, 4635), 'face_alignment.FaceAlignment', 'face_alignment.FaceAlignment', (['face_alignment.LandmarksType._2D'], {'flip_input': '(False)'}), '(face_alignment.LandmarksType._2D, flip_input=False\n )\n', (4578, 4635), False, 'import face_alignment\n'), ((4843, 4858), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (4856, 4858), False, 'import pdb\n'), ((611, 646), 'numpy.full', 'np.full', (['[H, W]', '(-1)'], {'dtype': 'np.int32'}), '([H, W], -1, dtype=np.int32)\n', (618, 646), True, 'import numpy as np\n'), ((1561, 1607), 'torch.gather', 'torch.gather', (['expanded'], {'dim': '(1)', 'index': 'facet_map'}), '(expanded, dim=1, index=facet_map)\n', (1573, 1607), False, 'import torch\n'), ((2586, 2616), 'torch.stack', 'torch.stack', (['[u, v, w]'], {'dim': '(-1)'}), '([u, v, w], dim=-1)\n', (2597, 2616), False, 'import torch\n'), ((2957, 2983), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(3, 3)'}), '(figsize=(3, 3))\n', (2967, 2983), True, 'import matplotlib.pyplot as plt\n'), ((3418, 3436), 'numpy.random.seed', 'np.random.seed', (['(12)'], {}), '(12)\n', (3432, 3436), True, 'import numpy as np\n'), ((3699, 3753), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': 'selected[:, 1]', 'y': 'selected[:, 0]', 'c': '"""r"""'}), "(x=selected[:, 1], y=selected[:, 0], c='r')\n", (3710, 3753), True, 'import matplotlib.pyplot as plt\n'), ((3945, 3962), 'matplotlib.pyplot.margins', 'plt.margins', (['(0)', '(0)'], {}), '(0, 0)\n', (3956, 3962), True, 'import matplotlib.pyplot as plt\n'), ((4089, 4099), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4097, 4099), True, 'import matplotlib.pyplot as plt\n'), ((1050, 1065), 'torch.arange', 'torch.arange', (['H'], {}), '(H)\n', (1062, 1065), False, 'import torch\n'), ((1067, 1082), 'torch.arange', 'torch.arange', (['W'], {}), '(W)\n', (1079, 1082), False, 'import torch\n'), ((2041, 2056), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2054, 2056), False, 'import torch\n'), ((3212, 3255), 'matplotlib.pyplot.plot', 'plt.plot', (['points[:, 1]', 'points[:, 0]'], {'c': '"""g"""'}), "(points[:, 1], points[:, 0], c='g')\n", (3220, 3255), True, 'import matplotlib.pyplot as plt\n'), ((4003, 4020), 'matplotlib.pyplot.NullLocator', 'plt.NullLocator', ([], {}), '()\n', (4018, 4020), True, 'import matplotlib.pyplot as plt\n'), ((4062, 4079), 'matplotlib.pyplot.NullLocator', 'plt.NullLocator', ([], {}), '()\n', (4077, 4079), True, 'import matplotlib.pyplot as plt\n'), ((4687, 4714), 'torch.from_numpy', 'torch.from_numpy', (['landmarks'], {}), '(landmarks)\n', (4703, 4714), False, 'import torch\n'), ((1100, 1135), 'torch.stack', 'torch.stack', (['[grid0, grid1]'], {'dim': '(-1)'}), '([grid0, grid1], dim=-1)\n', (1111, 1135), False, 'import torch\n'), ((888, 915), 'torch.from_numpy', 'torch.from_numpy', (['facet_map'], {}), '(facet_map)\n', (904, 915), False, 'import torch\n'), ((960, 987), 'torch.from_numpy', 'torch.from_numpy', (['triangles'], {}), '(triangles)\n', (976, 987), False, 'import torch\n')] |
''' Mesh analysis '''
import numpy as np
from scipy import sparse
FLOAT64_EPS = np.finfo(np.float64).eps
FLOAT_TYPES = np.sctypes['float']
white = 0
red = 1
black = 2
green = 3
def sym_hemisphere(vertices,
hemisphere='z',
equator_thresh=None,
dist_thresh=None):
""" Indices for hemisphere from an array of `vertices` on a sphere
Selects the vertices from a sphere that lie in one hemisphere.
If there are pairs of symmetric points on the equator, we return only
the first occurring of each pair.
Parameters
----------
vertices : (N,3) array-like
(x, y, z) Point coordinates of N vertices
hemisphere : str, optional
Which hemisphere to select. Values of '-x', '-y', '-z' select,
respectively negative x, y, and z hemispheres; 'x', 'y', 'z'
select the positive x, y, and z hemispheres. Default is 'z'
equator_thresh : None or float, optional
Threshold (+-0) to identify points as being on the equator of the
sphere. If None, generate a default based on the data type
dist_thresh : None or float, optional
For a vertex ``v`` on the equator, if there is a vertex
``v_dash`` in `vertices`, such that the Euclidean distance
between ``v * -1`` and ``v_dash`` is <= `dist_thresh`, then ``v``
is taken to be in the opposite hemisphere to ``v_dash``, and only
``v``, not ``v_dash``, will appear in the output vertex indices
`inds`. None results in a threshold based on the input data type
of ``vertices``
Returns
-------
inds : (P,) array
Indices into `vertices` giving points in hemisphere
Notes
-----
We expect the sphere to be symmetric, and so there may well be
points on the sphere equator that are both on the same diameter
line. The routine returns the first of the two points in the
original order of `vertices`.
"""
vertices = np.asarray(vertices)
assert vertices.shape[1] == 3
if len(hemisphere) == 2:
sign, hemisphere = hemisphere
if sign not in '+-':
raise ValueError('Hemisphere sign must be + or -')
else:
sign = '+'
try:
coord = 'xyz'.index(hemisphere)
except ValueError:
raise ValueError('Hemisphere must be (+-) x, y or z')
if equator_thresh is None or dist_thresh is None:
if not vertices.dtype.type in FLOAT_TYPES:
EPS = FLOAT64_EPS
else:
EPS = np.finfo(vertices.dtype.type).eps
if equator_thresh is None:
equator_thresh = EPS * 10
if dist_thresh is None:
dist_thresh = EPS * 20
# column with coordinates for selecting the hemisphere
sel_col = vertices[:,coord]
if sign == '+':
inds = sel_col > -equator_thresh
else:
inds = sel_col < equator_thresh
# find equator points
eq_inds, = np.where(
(sel_col < equator_thresh) & (sel_col > -equator_thresh))
# eliminate later points that are symmetric on equator
untested_inds = list(eq_inds)
out_inds = []
for ind in eq_inds:
untested_inds.remove(ind)
test_vert = vertices[ind,:] * -1
test_dists = np.sum(
(vertices[untested_inds,:] - test_vert)**2, axis=1)
sym_inds, = np.where(test_dists < dist_thresh)
for si in sym_inds:
out_ind = untested_inds[si]
untested_inds.remove(out_ind)
out_inds.append(out_ind)
if len(untested_inds) == 0:
break
inds[out_inds] = False
return np.nonzero(inds)[0]
def faces_from_sphere_vertices(vertices):
"""
Triangulate a set of vertices on the sphere.
Parameters
----------
vertices : (M, 3) ndarray
XYZ coordinates of vertices on the sphere.
Returns
-------
faces : (N, 3) ndarray
Indices into vertices; forms triangular faces.
"""
from scipy.spatial import Delaunay
return Delaunay(vertices).convex_hull
def vertinds_to_neighbors(vertex_inds, faces):
""" Return indices of neighbors of vertices given `faces`
Parameters
----------
vertex_inds : sequence
length N. Indices of vertices
faces : (F, 3) array-like
Faces given by indices of vertices for each of ``F`` faces
Returns
-------
adj : list
For each ``N`` vertex indicated by `vertex_inds`, the vertex
indices that are neighbors according to the graph given by
`faces`.
"""
full_adj = neighbors(faces)
adj = []
for i, n in enumerate(full_adj):
if i in vertex_inds:
adj.append(n)
return adj
def neighbors(faces):
""" Return indices of neighbors for each vertex within `faces`
Parameters
----------
faces : (F, 3) array-like
Faces given by indices of vertices for each of ``F`` faces
Returns
-------
adj : list
For each vertex found within `faces`, the vertex
indices that are neighbors according to the graph given by
`faces`. We expand the list with empty lists in between
non-empty neighbors.
"""
faces = np.asarray(faces)
adj = {}
for face in faces:
a, b, c = face
if a in adj:
adj[a] += [b, c]
else:
adj[a] = [b, c]
if b in adj:
adj[b] += [a, c]
else:
adj[b] = [a, c]
if c in adj:
adj[c] += [a, b]
else:
adj[c] = [a, b]
N = max(adj.keys())+1
out = [[] for i in range(N)]
for i in range(N):
if i in adj:
out[i] = np.sort(np.unique(adj[i]))
return out
def vertinds_faces(vertex_inds, faces):
""" Return faces containing any of `vertex_inds`
Parameters
----------
vertex_inds : sequence
length N. Indices of vertices
faces : (F, 3) array-like
Faces given by indices of vertices for each of ``F`` faces
Returns
---------
less_faces : (P, 3) array
Only retaining rows in `faces` which contain any of `vertex_inds`
"""
in_inds = []
vertex_inds = set(vertex_inds)
for ind, face in enumerate(faces):
if vertex_inds.intersection(face):
in_inds.append(ind)
return faces[in_inds]
def vertinds_faceinds(vertex_inds, faces):
""" Return indices of faces containing any of `vertex_inds`
Parameters
----------
vertex_inds : sequence
length N. Indices of vertices
faces : (F, 3) array-like
Faces given by indices of vertices for each of ``F`` faces
Returns
---------
in_inds : sequence
Indices of `faces` which contain any of `vertex_inds`
"""
in_inds = []
vertex_inds = set(vertex_inds)
for ind, face in enumerate(faces):
if vertex_inds.intersection(face):
in_inds.append(ind)
return in_inds
def edges(vertex_inds, faces):
r""" Return array of starts and ends of edges from list of faces
taking regard of direction.
Parameters
----------
vertex_inds : sequence
length N. Indices of vertices
faces : (F, 3) array-like
Faces given by indices of vertices for each of F faces
Returns
-------
edgearray : (E2, 2) array
where E2 = 2*E, twice the number of edges. If e= (a,b) is an
edge then [a,b] and [b,a] are included in edgearray.
"""
edgedic = {}
for face in faces:
edgedic[(face[0],face[1])]=1
edgedic[(face[0],face[2])]=1
edgedic[(face[1],face[0])]=1
edgedic[(face[1],face[2])]=1
edgedic[(face[2],face[0])]=1
edgedic[(face[2],face[1])]=1
start, end = zip(*edgedic)
edgearray = np.column_stack(zip(*edgedic))
return edgearray
def vertex_adjacencies(vertex_inds, faces):
""" Return matrix which shows the adjacent vertices
of each vertex
Parameters
----------
vertex_inds : sequence
length N. Indices of vertices
faces : (F, 3) array-like
Faces given by indices of vertices for each of F faces
Returns
-------
"""
edgearray = edges(vertex_inds, faces)
V = len(vertex_inds)
a = sparse.coo_matrix((np.ones(edgearray.shape[0]),
(edgearray[:,0],edgearray[:,1])),
shape=(V,V))
return a
def argmax_from_adj(vals, vertex_inds, adj_inds):
""" Indices of local maxima from `vals` given adjacent points
See ``reconstruction_performance`` for optimized versions of this
routine.
Parameters
----------
vals : (N,) array-like
values at all vertices referred to in either of `vertex_inds` or
`adj_inds`'
vertex_inds : None or (V,) array-like
indices into `vals` giving vertices that may be local maxima.
If None, then equivalent to ``np.arange(N)``
adj_inds : sequence
For every vertex in ``vertex_inds``, the indices (into `vals`) of
the neighboring points
Returns
-------
inds : (M,) array
Indices into `vals` giving local maxima of vals, given topology
from `adj_inds`, and restrictions from `vertex_inds`. Inds are
returned sorted by value at that index - i.e. smallest value (at
index) first.
"""
vals = np.asarray(vals)
if vertex_inds is None:
vertex_inds = np.arange(vals.shape[0])
else:
vertex_inds = np.asarray(vertex_inds)
maxes = []
for i, adj in enumerate(adj_inds):
vert_ind = vertex_inds[i]
val = vals[vert_ind]
if np.all(val > vals[adj]):
maxes.append((val, vert_ind))
if len(maxes) == 0:
return np.array([])
maxes.sort(cmp=lambda x, y: cmp(x[0], y[0]))
vals, inds = zip(*maxes)
return np.array(inds)
def peak_finding_compatible(vertices,
hemisphere='z',
equator_thresh=None,
dist_thresh=None):
""" Check that a sphere mesh is compatible with ``peak_finding``
Parameters
----------
vertices : (N,3) array-like
(x, y, z) Point coordinates of N vertices
hemisphere : str, optional
Which hemisphere to select. Values of '-x', '-y', '-z' select,
respectively negative x, y, and z hemispheres; 'x', 'y', 'z'
select the positive x, y, and z hemispheres. Default is 'z'
equator_thresh : None or float, optional
Threshold (+-0) to identify points as being on the equator of the
sphere. If None, generate a default based on the data type
dist_thresh : None or float, optional
For a vertex ``v`` on the equator, if there is a vertex
``v_dash`` in `vertices`, such that the Euclidean distance
between ``v * -1`` and ``v_dash`` is <= `dist_thresh`, then ``v``
is taken to be in the opposite hemisphere to ``v_dash``, and only
``v``, not ``v_dash``, will appear in the output vertex indices
`inds`. None results in a threshold based on the input data type
of ``vertices``
Returns
-------
compatible : bool
True if the sphere mesh is compatible with ``peak_finding``
"""
inds = sym_hemisphere(vertices, hemisphere,
equator_thresh, dist_thresh)
N = vertices.shape[0] // 2
return np.all(inds == np.arange(N))
def euler_characteristic_check(vertices, faces, chi=2):
r'''
If $f$ = number of faces, $e$ = number_of_edges and $v$ = number of vertices,
the Euler formula says $f-e+v = 2$ for a mesh
on a sphere. Here, assuming we have a healthy triangulation every
face is a triangle, all 3 of whose edges should belong to exactly
two faces. So $2*e = 3*f$. To avoid integer division and consequential
integer rounding we test whether $2*f - 3*f + 2*v == 4$ or, more generally,
whether $2*v - f == 2*\chi$ where $\chi$ is the Euler characteristic of the mesh.
- Open chain (track) has $\chi=1$
- Closed chain (loop) has $\chi=0$
- Disk has $\chi=1$
- Sphere has $\chi=2$
Parameters
----------
vertices : (N,3) array-like
(x, y, z) Point coordinates of N vertices
faces : (M,3) array-like of type int
(i1, i2, i3) Integer indices of the vertices of the (triangular) faces
chi : int, or None
The Euler characteristic of the mesh to be checked
Returns
-------
check : bool
True if the mesh has Euler characteristic chi
'''
v = vertices.shape[0]
f = faces.shape[0]
if 2*v-f==2*chi:
return True
else:
return False
def adjacent_uncoloured(vertinds, vertex_colour, face_colour, faces):
adjacent_faces = np.array(vertinds_faceinds(vertinds, faces))
uncoloured_adjacent_faces = adjacent_faces[np.where(face_colour[adjacent_faces]==white)]
adjacent_vertices = np.array(list(set(faces[uncoloured_adjacent_faces].ravel())))
l = list(adjacent_vertices)
w = np.where(vertex_colour[l]==white)
uncoloured_adjacent_vertices = adjacent_vertices[w]
return(uncoloured_adjacent_vertices, uncoloured_adjacent_faces)
| [
"numpy.ones",
"numpy.unique",
"numpy.where",
"numpy.asarray",
"numpy.array",
"numpy.sum",
"numpy.nonzero",
"scipy.spatial.Delaunay",
"numpy.finfo",
"numpy.all",
"numpy.arange"
] | [((82, 102), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (90, 102), True, 'import numpy as np\n'), ((1983, 2003), 'numpy.asarray', 'np.asarray', (['vertices'], {}), '(vertices)\n', (1993, 2003), True, 'import numpy as np\n'), ((2944, 3010), 'numpy.where', 'np.where', (['((sel_col < equator_thresh) & (sel_col > -equator_thresh))'], {}), '((sel_col < equator_thresh) & (sel_col > -equator_thresh))\n', (2952, 3010), True, 'import numpy as np\n'), ((5197, 5214), 'numpy.asarray', 'np.asarray', (['faces'], {}), '(faces)\n', (5207, 5214), True, 'import numpy as np\n'), ((9371, 9387), 'numpy.asarray', 'np.asarray', (['vals'], {}), '(vals)\n', (9381, 9387), True, 'import numpy as np\n'), ((9855, 9869), 'numpy.array', 'np.array', (['inds'], {}), '(inds)\n', (9863, 9869), True, 'import numpy as np\n'), ((13062, 13097), 'numpy.where', 'np.where', (['(vertex_colour[l] == white)'], {}), '(vertex_colour[l] == white)\n', (13070, 13097), True, 'import numpy as np\n'), ((3251, 3312), 'numpy.sum', 'np.sum', (['((vertices[untested_inds, :] - test_vert) ** 2)'], {'axis': '(1)'}), '((vertices[untested_inds, :] - test_vert) ** 2, axis=1)\n', (3257, 3312), True, 'import numpy as np\n'), ((3343, 3377), 'numpy.where', 'np.where', (['(test_dists < dist_thresh)'], {}), '(test_dists < dist_thresh)\n', (3351, 3377), True, 'import numpy as np\n'), ((3617, 3633), 'numpy.nonzero', 'np.nonzero', (['inds'], {}), '(inds)\n', (3627, 3633), True, 'import numpy as np\n'), ((4016, 4034), 'scipy.spatial.Delaunay', 'Delaunay', (['vertices'], {}), '(vertices)\n', (4024, 4034), False, 'from scipy.spatial import Delaunay\n'), ((9438, 9462), 'numpy.arange', 'np.arange', (['vals.shape[0]'], {}), '(vals.shape[0])\n', (9447, 9462), True, 'import numpy as np\n'), ((9495, 9518), 'numpy.asarray', 'np.asarray', (['vertex_inds'], {}), '(vertex_inds)\n', (9505, 9518), True, 'import numpy as np\n'), ((9647, 9670), 'numpy.all', 'np.all', (['(val > vals[adj])'], {}), '(val > vals[adj])\n', (9653, 9670), True, 'import numpy as np\n'), ((9753, 9765), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9761, 9765), True, 'import numpy as np\n'), ((12879, 12925), 'numpy.where', 'np.where', (['(face_colour[adjacent_faces] == white)'], {}), '(face_colour[adjacent_faces] == white)\n', (12887, 12925), True, 'import numpy as np\n'), ((8279, 8306), 'numpy.ones', 'np.ones', (['edgearray.shape[0]'], {}), '(edgearray.shape[0])\n', (8286, 8306), True, 'import numpy as np\n'), ((11424, 11436), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (11433, 11436), True, 'import numpy as np\n'), ((2527, 2556), 'numpy.finfo', 'np.finfo', (['vertices.dtype.type'], {}), '(vertices.dtype.type)\n', (2535, 2556), True, 'import numpy as np\n'), ((5682, 5699), 'numpy.unique', 'np.unique', (['adj[i]'], {}), '(adj[i])\n', (5691, 5699), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Commonly used utility functions."""
# mainly backports from future numpy here
from __future__ import absolute_import, division, print_function
import numpy as np
import nibabel as nib
def thresholding_abs(A, thr, smaller=True, copy=True):
"""thresholding of the adjacency matrix with an absolute value.
Parameters
----------
A : ndarray, shape(n, n) **or** shape(n_tps, n, n)
Adjacency matrix of the graph. The matrix must be weighted with zero
values along the main diagonal.
thr : float
Absolute threshold. Edges with weights `</>` to the given threshold
value will be removed.
smaller : boolean (default=True)
Threshold values smaller than the given threshold. If ``smaller=False``
values greater
than are thresholded.
copy : boolean
Whether to copy the input or change the matrix in-place (default=True).
Returns
-------
A_thr : ndarray, shape(n, n) **or** shape(n_tps, n, n)
Thresholded adjacency matrix.
Notes
-----
Supports also thresholding of **dynamic graphs**.
See also
--------
thresholding_rel, thresholding_pval, thresholding_M, thresholding_max
Examples
--------
>>> d = get_fmri_data()
>>> # p-values are not required for thresholding with an given value
>>> A = adj_static(d, pval=False)
>>> A_thr = thresholding_abs(A, .3)
>>> # smallest nonzero edge weight after thresholding
>>> print A_thr[A_thr > 0].min()
0.30012873644
"""
# this function was tested against BCT and gives the same
# results as the matlab function: 'threshold_absolute.m'
# TODO np.clip is faster than inplace operation:
# A = np.random.normal(size=10000*10000).reshape((10000,10000))
# In [30]: %timeit np.clip(A,0,np.inf, A)
# 1 loops, best of 3: 315 ms per loop
#
# In [31]: %timeit A[A < 0]=0
# 1 loops, best of 3: 638 ms per loop
def _thr(data, smaller=True):
if smaller:
data[data < thr] = 0.
else:
data[data > thr] = 0.
return data
if copy:
data = A.copy()
return _thr(data)
else:
return _thr(A)
def load_mri(func, mask):
"""load MRI voxel data
The data is converted into a 2D (n_voxel, n_tps) array.
Parameters
----------
func : string
Path to imaging data (e.g. nifti).
mask : string
Path to binary mask (e.g. nifti) that defines brain regions. Values > 0
are regarded as brain tissue.
Returns
-------
ts : ndarray, shape(n_voxel, n_tps)
Timeseries information in a 2D array.
See Also
--------
save_mri: save MRI voxel data to disk.
Examples
--------
>>> ts = load_mri(func='localizer.nii.gz', mask='V1_mask.nii.gz')
"""
# load mask data
m = nib.load(mask).get_data()
# load func data
d = nib.load(func).get_data()
# mask the data
func_data = d[m != 0]
# nib.load(func).get_data()[nib.load(mask).get_data()!=0]
del d
return func_data
def save_mri(data, mask, fname=None):
"""save MRI voxel data
Parameters
----------
data : ndarray, shape(n_voxel,) **or** shape(n_voxel, n_tps)
Voxel data to save to disk.
mask : string
Path to binary mask (e.g. nifti) that defines brain regions. Values > 0
are regarded as brain tissue.
fname : string
Filename.
Examples
--------
>>> ts = load_mri(func='localizer.nii.gz', mask='V1_mask.nii.gz')
>>> ts = ts + 1. # some operation
>>> save_mri(ts, 'V1_mask.nii.gz', 'localizer_plus_one.nii.gz')
"""
# load mask data
f = nib.load(mask)
m = f.get_data()
aff = f.get_affine()
s = m.shape
if len(data.shape) == 2:
n_tps = data.shape[1]
else:
n_tps = 1
data = data[:, np.newaxis]
res = np.zeros((s[0], s[1], s[2], n_tps)) # + time
res[m != 0] = data
# save to disk
if fname is not None:
nib.save(nib.Nifti1Image(res, aff), fname)
def load_roi_mri(func, mask):
"""returns mean-timeseries based on a provided ROI-mask
Parameters
----------
func : string
imaging-file (e.g. nifti). Fuctional imaging data that contains the 3D+
time information (n_tps)
mask : string
imaging-file (e.g. nifti). ROIs are defined as areas with the same mask
value; all values < 1 are discarded
Returns
-------
ts : ndarray, shape(n_rois, n_tps)
Timeseries information in a 2D array.
Notes
-----
Mask values don't need to have ascending or descending order, but the
returned array is always sorted in ascending order.
See Also
--------
save_roi_mri: save ROI data
References
----------
.. [1] <NAME>., <NAME>., <NAME>., <NAME>., &
<NAME>. (2011). A whole brain fMRI atlas generated via
spatially constrained spectral clustering. Human Brain Mapping,
n/a–n/a. doi:10.1002/hbm.21333
.. [2] <NAME>., <NAME>., & <NAME>. (2012). Unravelling the
intrinsic functional organization of the human lateral frontal
cortex: a parcellation scheme based on resting state fMRI. Journal
of Neuroscience, 32(30), 10238–10252.
doi:10.1523/JNEUROSCI.5852-11.2012
Examples
--------
>>> # TODO
>>> ts = load_roi_mri(func, mask, mode='mean')
"""
# load mask data
m = nib.load(mask).get_data()
# load func data
d = nib.load(func).get_data()
n_samples = d.shape[-1]
if len(m.shape) > 3:
m = m.reshape(m.shape[0], m.shape[1], m.shape[2])
if not d.shape[:3] == m.shape:
raise ValueError('The functional data and the given mask are not in \
the same reference space')
# mask_data = m[m != 0]
# uni_rois = np.unique(mask_data) # without zero
# n_rois = uni_rois.size
uni_rois = np.unique(m)[1:] # without zero
n_rois = uni_rois.size
ts_data = np.empty((n_rois, n_samples))
roi_counter = 0
# this also works with mask_indices that are not ascending;
# range(n_rois) does not
# TODO: faster when transform 4D --> 2D array!
# BUT only for n_rois > 400; for n_rois = 4000 -> 4 x faster
# but the transformation generates a huge overhead
# mm = m!=0
# mask_2d = m[mm]
# data_2d = d[mm]
#
# if mode is 'mean':
# for i in uni_rois:
# ts_data[roi_counter,:] = np.mean(data_2d[mask_2d == i,:], axis=0)
# roi_counter += 1
for i in uni_rois:
ts_data[roi_counter, :] = np.mean(d[m == i, :], axis=0)
roi_counter += 1
del d
return ts_data
def save_roi_mri(data, mask, fname='roi_data.nii.gz', sort=None):
"""saves ROI data (e.g. local graph metrics) to imaging file
Parameters
----------
data : ndarray, shape(n,) **or** shape(n, n_tps)
Local graph metric that corresponds to the ROIs defined in the mask file
mask : string
Imaging-file (e.g. nifti). ROIs are defined as areas with the same
unique mask values. Only mask values > 1 are regarded as brain tissue.
fname : string
Filename (default='roi_data.nii.gz').
sort : ndarray, shape(n,), optional
Integer providing the mapping between data and mask. If no mapping is
provided, it is assumed to be in ascending order of unique mask values.
#TODO carefully check sort parameter
Notes
-----
If ``sort=None`` the mapping between data values and mask is assumed to be
``data[0]=np.unique(mask[mask!=0])[0]``
See Also
--------
load_roi_mri: load ROI data
Examples
--------
>>> _, func_path = get_fmri_rss_data()
>>> _, mask_path, labels, coords = aal(n=116, space='3mm')
>>> data = load_roi_mri(func_path, mask_path)
>>> A = adj_static(data)
>>> A[A<0.1] = 0
>>> k = degree(A)
>>> print k.shape
(116,)
>>> # save local degree k to nifti file
>>> save_roi_mri(k, mask_path, fname='local_degree.nii.gz')
"""
# load mask data
f = nib.load(mask)
m = f.get_data()
aff = f.get_affine()
uni_rois = np.unique(m[m != 0]) # without zeros
if data.ndim == 2:
n_tps = data.shape[1]
else:
n_tps = 1
data = data[:, np.newaxis]
if not uni_rois.size == data.shape[0]:
raise ValueError('The number of nodes provided by data and mask do not\
match')
xdim, ydim, zdim = m.shape
res = np.zeros((xdim, ydim, zdim, n_tps)) # + time
roi_counter = 0
for i in uni_rois:
res[m == i] = data[roi_counter, :]
roi_counter += 1
if sort is not None:
res = res[sort, :]
# save to disk
if fname is not None:
nib.save(nib.Nifti1Image(res, aff), fname)
| [
"numpy.mean",
"numpy.unique",
"nibabel.load",
"numpy.zeros",
"numpy.empty",
"nibabel.Nifti1Image"
] | [((3741, 3755), 'nibabel.load', 'nib.load', (['mask'], {}), '(mask)\n', (3749, 3755), True, 'import nibabel as nib\n'), ((3952, 3987), 'numpy.zeros', 'np.zeros', (['(s[0], s[1], s[2], n_tps)'], {}), '((s[0], s[1], s[2], n_tps))\n', (3960, 3987), True, 'import numpy as np\n'), ((6100, 6129), 'numpy.empty', 'np.empty', (['(n_rois, n_samples)'], {}), '((n_rois, n_samples))\n', (6108, 6129), True, 'import numpy as np\n'), ((8206, 8220), 'nibabel.load', 'nib.load', (['mask'], {}), '(mask)\n', (8214, 8220), True, 'import nibabel as nib\n'), ((8282, 8302), 'numpy.unique', 'np.unique', (['m[m != 0]'], {}), '(m[m != 0])\n', (8291, 8302), True, 'import numpy as np\n'), ((8636, 8671), 'numpy.zeros', 'np.zeros', (['(xdim, ydim, zdim, n_tps)'], {}), '((xdim, ydim, zdim, n_tps))\n', (8644, 8671), True, 'import numpy as np\n'), ((6025, 6037), 'numpy.unique', 'np.unique', (['m'], {}), '(m)\n', (6034, 6037), True, 'import numpy as np\n'), ((6706, 6735), 'numpy.mean', 'np.mean', (['d[m == i, :]'], {'axis': '(0)'}), '(d[m == i, :], axis=0)\n', (6713, 6735), True, 'import numpy as np\n'), ((2906, 2920), 'nibabel.load', 'nib.load', (['mask'], {}), '(mask)\n', (2914, 2920), True, 'import nibabel as nib\n'), ((2962, 2976), 'nibabel.load', 'nib.load', (['func'], {}), '(func)\n', (2970, 2976), True, 'import nibabel as nib\n'), ((4084, 4109), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['res', 'aff'], {}), '(res, aff)\n', (4099, 4109), True, 'import nibabel as nib\n'), ((5538, 5552), 'nibabel.load', 'nib.load', (['mask'], {}), '(mask)\n', (5546, 5552), True, 'import nibabel as nib\n'), ((5594, 5608), 'nibabel.load', 'nib.load', (['func'], {}), '(func)\n', (5602, 5608), True, 'import nibabel as nib\n'), ((8909, 8934), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['res', 'aff'], {}), '(res, aff)\n', (8924, 8934), True, 'import nibabel as nib\n')] |
"""
Procedures for running a privacy evaluation on a generative model
"""
from sklearn.metrics import roc_curve, auc
from os import path
from numpy import concatenate, mean, ndarray
from pandas import DataFrame
from pandas.api.types import is_numeric_dtype
from multiprocessing import Pool
from synthetic_data.privacy_attacks.membership_inference import LABEL_IN, LABEL_OUT, generate_mia_shadow_data_shufflesplit
from synthetic_data.privacy_attacks.attribute_inference import AttributeInferenceAttackLinearRegression
from .datagen import convert_df_to_array
from warnings import filterwarnings
filterwarnings('ignore')
from .logging import LOGGER
PROCESSES = 16
def get_roc_auc(trueLables, scores):
"""
Calculate ROC curve of a binary classifier
:param trueLables: list: ground truth
:param scores: list: scores of a binary classifier for correct class
:return: tuple: (false positive rate, true positive rate, auc)
"""
fpr, tpr, _ = roc_curve(trueLables, scores)
area = auc(fpr, tpr)
return fpr, tpr, area
def get_scores(classProbabilities, trueLabels):
"""
Get scores of correct class
:param classProbabilities: list: list of arrays of prediction probabilities for each class
:param trueLabels: list: correct class labels
:return: list: scores for correct class
"""
return [p[l] for p, l in zip(classProbabilities, trueLabels)]
def get_fp_tn_fn_tp(guesses, trueLabels):
fp = sum([g == LABEL_IN for g,s in zip(guesses, trueLabels) if s == LABEL_OUT])
tn = sum([g == LABEL_OUT for g,s in zip(guesses, trueLabels) if s == LABEL_OUT])
fn = sum([g == LABEL_OUT for g,s in zip(guesses, trueLabels) if s == LABEL_IN])
tp = sum([g == LABEL_IN for g,s in zip(guesses, trueLabels) if s == LABEL_IN])
return fp, tn, fn, tp
def get_attack_accuracy(tp, tn, nguesses):
return (tp + tn)/nguesses
def get_attack_precision(fp, tp):
try:
return tp / (tp + fp)
except ZeroDivisionError:
return .5
def get_attack_recall(tp, fn):
try:
return tp / (tp + fn)
except ZeroDivisionError:
return .5
def get_record_privacy_loss(pSuccess, pPrior):
return pSuccess - pPrior
def get_record_privacy_gain(privLossRawT, privLossSynT):
return (privLossRawT - privLossSynT) / 2
def evaluate_mia(GenModel, attacksList, rawWithoutTargets, targetRecords, targetIDs, rawAidx, rawTindices, sizeRawT, sizeSynT, nSynT, nSynA, nShadows, metadata):
LOGGER.info(f'Start evaluation of generative target model {GenModel.__name__} on {len(targetIDs)} targets under {len(attacksList)} different MIA models.')
# Train and test MIA per target
with Pool(processes=PROCESSES) as pool:
tasks = [(GenModel, attacksList, targetRecords.loc[[tid], :], tid, rawWithoutTargets, metadata, rawAidx, rawTindices, sizeRawT, sizeSynT, nSynT, nSynA, nShadows) for tid in targetIDs]
resultsList = pool.map(worker_run_mia, tasks)
results = {
AM.__name__: {
'TargetID': [],
'TestRun': [],
'ProbSuccess': [],
'RecordPrivacyLossSyn': [],
'RecordPrivacyLossRaw': [],
'RecordPrivacyGain': []
} for AM in attacksList }
for res in resultsList:
for AM in attacksList:
for k in res[AM.__name__].keys():
results[AM.__name__][k].extend(res[AM.__name__][k])
for AM in attacksList:
LOGGER.info(f'Mean record privacy gain across {len(targetRecords)} Targets with Attack {AM.__name__}: {mean(results[AM.__name__]["RecordPrivacyGain"])}%')
return results
def worker_run_mia(params):
GenModel, attacksList, target, targetID, rawWithoutTargets, metadata, rawAidx, rawTindices, sizeRawT, sizeSynT, nSynT, nSynA, nShadows = params
# Generate shadow model data for training attacks on this target
if GenModel.datatype is DataFrame:
rawA = rawWithoutTargets.loc[rawAidx, :]
else:
rawA = convert_df_to_array(rawWithoutTargets.loc[rawAidx, :], metadata)
target = convert_df_to_array(target, metadata)
synA, labelsSynA = generate_mia_shadow_data_shufflesplit(GenModel, target, rawA, sizeRawT, sizeSynT, nShadows, nSynA)
for Attack in attacksList:
Attack.train(synA, labelsSynA)
# Clean up
del synA, labelsSynA
results = {
AM.__name__: {
'TargetID': [],
'TestRun': [],
'ProbSuccess': [],
'RecordPrivacyLossSyn': [],
'RecordPrivacyLossRaw': [],
'RecordPrivacyGain': []
} for AM in attacksList }
for nr, rt in enumerate(rawTindices):
# Generate synthetic datasets from generative model trained on RawT WITHOUT Target
if GenModel.datatype is DataFrame:
rawTout = rawWithoutTargets.loc[rt, :]
else:
rawTout = convert_df_to_array(rawWithoutTargets.loc[rt, :], metadata)
GenModel.fit(rawTout) # Fit model
synTwithoutTarget = [GenModel.generate_samples(sizeSynT) for _ in range(nSynT)]
# Generate synthetic datasets from generative model trained on RawT PLUS Target
if GenModel.datatype is DataFrame:
rawTin = rawTout.append(target)
else:
if len(target.shape) == 1:
target = target.reshape(1, len(target))
rawTin = concatenate([rawTout, target])
GenModel.fit(rawTin)
synTwithTarget = [GenModel.generate_samples(sizeSynT) for _ in range(nSynT)]
# Create balanced test dataset
synT = synTwithTarget + synTwithoutTarget
labelsSynT = [LABEL_IN for _ in range(len(synTwithTarget))] + [LABEL_OUT for _ in range(len(synTwithoutTarget))]
# Run attacks on synthetic datasets from target generative model
for AM in attacksList:
res = run_mia(AM, synT, labelsSynT, targetID, nr)
for k, v in res.items():
results[AM.__name__][k].extend(v)
del synT, labelsSynT
return results
def run_mia(Attack, synT, labelsSynT, targetID, nr):
probSuccessSyn = Attack.get_probability_of_success(synT, labelsSynT)
priorProb = [Attack._get_prior_probability(s) for s in labelsSynT]
privLossSyn = [get_record_privacy_loss(ps, pp) for ps, pp in zip(probSuccessSyn, priorProb)]
privLossRaw = [get_record_privacy_loss(1, pp) for pp in priorProb]
privGain = [get_record_privacy_gain(plR, plS) for plR, plS in zip(privLossRaw, privLossSyn)]
results = {
'TargetID': [targetID for _ in labelsSynT],
'TestRun': [f'Run {nr + 1}' for _ in labelsSynT],
'ProbSuccess': probSuccessSyn,
'RecordPrivacyLossSyn': privLossSyn,
'RecordPrivacyLossRaw': privLossRaw,
'RecordPrivacyGain': privGain
}
return results
def craft_outlier(data, size):
# Craft outlier target
outlier = DataFrame(columns=list(data))
for attr in list(data):
if is_numeric_dtype(data[attr]):
outlier[attr] = [data[attr].max() for _ in range(size)]
else:
outlier[attr] = [data[attr].value_counts().index[-1] for _ in range(size)]
outlier.index = ['Crafted' for _ in range(size)]
return outlier
def evaluate_ai(GenModel, rawWithoutTargets, targetRecords, targetIDs, rawA, rawTindices, sensitiveAttribute, sizeSynT, nSynT, metadata):
LOGGER.info(f'Start evaluation of generative target model {GenModel.__name__} on {len(targetIDs)} targets under MLE-AI.')
results = {'LinearRegression':
{
'Target': [],
'TrueValue': [],
'ProbCorrectPrior': [],
'MLERawT': [],
'SigmaRawT': [],
'ProbCorrectRawT': [],
'MLESynT': [],
'SigmaSynT': [],
'ProbCorrectSynT': [],
'TestRun': []
}
}
for nr, rt in enumerate(rawTindices):
LOGGER.info(f'Raw target test set {nr+1}/{len(rawTindices)}')
# Get raw target test set
rawT = rawWithoutTargets.loc[rt, :]
# Get baseline from raw data
AttackBaseline = AttributeInferenceAttackLinearRegression(sensitiveAttribute, metadata, rawA)
LOGGER.info(f'Train Attack {AttackBaseline.__name__} on RawT')
AttackBaseline.train(rawT)
# Train generative model on raw data and sample synthetic copies
LOGGER.info(f'Start fitting {GenModel.__class__.__name__} to RawT')
if GenModel.datatype is ndarray:
rawT = convert_df_to_array(rawT, metadata)
GenModel.fit(rawT)
LOGGER.info(f'Sample {nSynT} copies of synthetic data from {GenModel.__class__.__name__}')
synT = [GenModel.generate_samples(sizeSynT) for _ in range(nSynT)]
LOGGER.info(f'Start Attack evaluation on SynT for {len(targetIDs)} targets')
with Pool(processes=PROCESSES) as pool:
tasks = [(s, targetRecords, targetIDs, sensitiveAttribute, AttackBaseline, metadata, rawA) for s in synT]
resList = pool.map(worker_run_mleai, tasks)
# Gather results
for res in resList:
for k, v in res[AttackBaseline.__name__].items():
results[AttackBaseline.__name__][k].extend(v)
results[AttackBaseline.__name__]['TestRun'].extend([f'Run {nr + 1}' for _ in range(len(targetIDs))])
return results
def worker_run_mleai(params):
"""Worker for parallel processing"""
syn, targetRecords, targetIDs, sensitiveAttribute, AttackBaseline, metadata, rawA = params
results = {
AttackBaseline.__name__: {
'Target': [],
'TrueValue': [],
'ProbCorrectPrior': [],
'MLERawT': [],
'SigmaRawT': [],
'ProbCorrectRawT': [],
'MLESynT': [],
'SigmaSynT': [],
'ProbCorrectSynT': []
}
}
Attack = AttributeInferenceAttackLinearRegression(sensitiveAttribute, metadata, rawA)
Attack.train(syn)
for tid in targetIDs:
t = targetRecords.loc[[tid], :]
targetAux = t.drop_duplicates().drop(sensitiveAttribute, axis=1)
targetSecret = t.drop_duplicates().loc[tid, sensitiveAttribute]
# Baseline on raw data
results[AttackBaseline.__name__]['Target'].append(tid)
results[AttackBaseline.__name__]['TrueValue'].append(targetSecret)
results[AttackBaseline.__name__]['ProbCorrectPrior'].append(AttackBaseline.get_prior_probability(targetSecret))
results[AttackBaseline.__name__]['SigmaRawT'].append(AttackBaseline.sigma)
results[AttackBaseline.__name__]['MLERawT'].extend(AttackBaseline.attack(targetAux).tolist())
results[AttackBaseline.__name__]['ProbCorrectRawT'].extend(AttackBaseline.get_likelihood(targetAux, targetSecret).tolist())
# Get attack results for this synthetic dataset
results[Attack.__name__]['SigmaSynT'].append(Attack.sigma)
results[Attack.__name__]['MLESynT'].extend(Attack.attack(targetAux).tolist())
results[Attack.__name__]['ProbCorrectSynT'].extend(Attack.get_likelihood(targetAux, targetSecret).tolist())
return results
| [
"warnings.filterwarnings",
"numpy.mean",
"sklearn.metrics.auc",
"pandas.api.types.is_numeric_dtype",
"sklearn.metrics.roc_curve",
"multiprocessing.Pool",
"synthetic_data.privacy_attacks.membership_inference.generate_mia_shadow_data_shufflesplit",
"numpy.concatenate",
"synthetic_data.privacy_attacks.... | [((597, 621), 'warnings.filterwarnings', 'filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (611, 621), False, 'from warnings import filterwarnings\n'), ((970, 999), 'sklearn.metrics.roc_curve', 'roc_curve', (['trueLables', 'scores'], {}), '(trueLables, scores)\n', (979, 999), False, 'from sklearn.metrics import roc_curve, auc\n'), ((1011, 1024), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (1014, 1024), False, 'from sklearn.metrics import roc_curve, auc\n'), ((4134, 4236), 'synthetic_data.privacy_attacks.membership_inference.generate_mia_shadow_data_shufflesplit', 'generate_mia_shadow_data_shufflesplit', (['GenModel', 'target', 'rawA', 'sizeRawT', 'sizeSynT', 'nShadows', 'nSynA'], {}), '(GenModel, target, rawA, sizeRawT,\n sizeSynT, nShadows, nSynA)\n', (4171, 4236), False, 'from synthetic_data.privacy_attacks.membership_inference import LABEL_IN, LABEL_OUT, generate_mia_shadow_data_shufflesplit\n'), ((9902, 9978), 'synthetic_data.privacy_attacks.attribute_inference.AttributeInferenceAttackLinearRegression', 'AttributeInferenceAttackLinearRegression', (['sensitiveAttribute', 'metadata', 'rawA'], {}), '(sensitiveAttribute, metadata, rawA)\n', (9942, 9978), False, 'from synthetic_data.privacy_attacks.attribute_inference import AttributeInferenceAttackLinearRegression\n'), ((2686, 2711), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'PROCESSES'}), '(processes=PROCESSES)\n', (2690, 2711), False, 'from multiprocessing import Pool\n'), ((6973, 7001), 'pandas.api.types.is_numeric_dtype', 'is_numeric_dtype', (['data[attr]'], {}), '(data[attr])\n', (6989, 7001), False, 'from pandas.api.types import is_numeric_dtype\n'), ((8128, 8204), 'synthetic_data.privacy_attacks.attribute_inference.AttributeInferenceAttackLinearRegression', 'AttributeInferenceAttackLinearRegression', (['sensitiveAttribute', 'metadata', 'rawA'], {}), '(sensitiveAttribute, metadata, rawA)\n', (8168, 8204), False, 'from synthetic_data.privacy_attacks.attribute_inference import AttributeInferenceAttackLinearRegression\n'), ((5383, 5413), 'numpy.concatenate', 'concatenate', (['[rawTout, target]'], {}), '([rawTout, target])\n', (5394, 5413), False, 'from numpy import concatenate, mean, ndarray\n'), ((8859, 8884), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'PROCESSES'}), '(processes=PROCESSES)\n', (8863, 8884), False, 'from multiprocessing import Pool\n'), ((3556, 3603), 'numpy.mean', 'mean', (["results[AM.__name__]['RecordPrivacyGain']"], {}), "(results[AM.__name__]['RecordPrivacyGain'])\n", (3560, 3603), False, 'from numpy import concatenate, mean, ndarray\n')] |
import numpy as np
from model import generate_recommendations
user_address = '0x8c373ed467f3eabefd8633b52f4e1b2df00c9fe8'
already_rated = ['0x006bea43baa3f7a6f765f14f10a1a1b08334ef45','0x5102791ca02fc3595398400bfe0e33d7b6c82267','0x68d57c9a1c35f63e2c83ee8e49a64e9d70528d25','0xc528c28fec0a90c083328bc45f587ee215760a0f']
k = 5
model_dir = '../jobs/wals_ml_local_20190107_235006'
user_map = np.load(model_dir + "/model/user.npy")
item_map = np.load(model_dir + "/model/item.npy")
row_factor = np.load(model_dir + "/model/row.npy")
col_factor = np.load(model_dir + "/model/col.npy")
user_idx = np.searchsorted(user_map, user_address)
user_rated = [np.searchsorted(item_map, i) for i in already_rated]
recommendations = generate_recommendations(user_idx, user_rated, row_factor, col_factor, k)
tokens = [item_map[i] for i in recommendations]
print(tokens) | [
"numpy.searchsorted",
"numpy.load",
"model.generate_recommendations"
] | [((392, 430), 'numpy.load', 'np.load', (["(model_dir + '/model/user.npy')"], {}), "(model_dir + '/model/user.npy')\n", (399, 430), True, 'import numpy as np\n'), ((442, 480), 'numpy.load', 'np.load', (["(model_dir + '/model/item.npy')"], {}), "(model_dir + '/model/item.npy')\n", (449, 480), True, 'import numpy as np\n'), ((494, 531), 'numpy.load', 'np.load', (["(model_dir + '/model/row.npy')"], {}), "(model_dir + '/model/row.npy')\n", (501, 531), True, 'import numpy as np\n'), ((545, 582), 'numpy.load', 'np.load', (["(model_dir + '/model/col.npy')"], {}), "(model_dir + '/model/col.npy')\n", (552, 582), True, 'import numpy as np\n'), ((594, 633), 'numpy.searchsorted', 'np.searchsorted', (['user_map', 'user_address'], {}), '(user_map, user_address)\n', (609, 633), True, 'import numpy as np\n'), ((720, 793), 'model.generate_recommendations', 'generate_recommendations', (['user_idx', 'user_rated', 'row_factor', 'col_factor', 'k'], {}), '(user_idx, user_rated, row_factor, col_factor, k)\n', (744, 793), False, 'from model import generate_recommendations\n'), ((648, 676), 'numpy.searchsorted', 'np.searchsorted', (['item_map', 'i'], {}), '(item_map, i)\n', (663, 676), True, 'import numpy as np\n')] |
import copy
import json
import numpy
import cepton_sdk.common.transform
from cepton_sdk.common import *
_all_builder = AllBuilder(__name__)
def _convert_keys_to_int(d, ignore_invalid=False):
d_int = {}
for key, value in d.items():
try:
key = int(key)
except:
if ignore_invalid:
continue
else:
raise
d_int[key] = value
return d_int
def _convert_keys_to_string(d):
return {str(key): value for (key, value) in d.items()}
def _get_pretty_json(d):
d = _convert_keys_to_string(d)
return json.dumps(d, sort_keys=True, indent=2, separators=(',', ': '))
def _save_pretty_json(d, f):
f.write(_get_pretty_json(d))
class _ManagerBase:
def update_from_dict(self, input_dict):
raise NotImplementedError()
def to_dict(self):
raise NotImplementedError()
def update_from_json(self, input_json):
input_dict = input_json
self.update_from_dict(input_dict)
@classmethod
def from_json(cls, input_json):
self = cls()
self.update_from_json(input_json)
return self
def to_json(self):
input_dict = self.to_dict()
input_json = _convert_keys_to_string(input_dict)
return input_json
def update_from_file(self, input_file):
input_json = json.load(input_file)
self.update_from_json(input_json)
@classmethod
def from_file(cls, input_file):
self = cls()
self.update_from_file(input_file)
return self
def to_file(self, output_file):
output_json = self.to_json()
_save_pretty_json(output_json, output_file)
def process_sensor_points(self, sensor_serial_number, points):
raise NotImplementedError
def process_points(self, points_dict):
for sensor_serial_number, points in points_dict.items():
self.process_sensor_points(sensor_serial_number, points)
return points_dict
class SensorTransformManager(_ManagerBase):
def __init__(self):
self.transforms = {}
def update_from_dict(self, transforms_dict):
for key, transform_dict in transforms_dict.items():
try:
sensor_serial_number = int(key)
except:
continue
transform = cepton_sdk.common.transform.Transform3d()
transform.translation = \
numpy.array(transform_dict["translation"], dtype=float)
rotation = numpy.array(transform_dict["rotation"], dtype=float)
transform.rotation = \
cepton_sdk.common.transform.Quaternion.from_vector(rotation)
self.transforms[sensor_serial_number] = transform
def to_dict(self):
transforms_dict = {}
for sensor_serial_number, transform in self.transforms.items():
transform_dict = {}
transform_dict["translation"] = transform.translation.tolist()
transform_dict["rotation"] = transform.rotation.to_vector().tolist()
transforms_dict[sensor_serial_number] = transform_dict
return transforms_dict
def process_sensor_points(self, sensor_serial_number, points):
if sensor_serial_number not in self.transforms:
return points
if len(points) == 0:
return points
transform = self.transforms[sensor_serial_number]
points.positions[:, :] = transform.apply(points.positions)
return points
class SensorClip:
def __init__(self):
self.distance_lb = -numpy.inf
self.distance_ub = numpy.inf
self.image_lb = numpy.full([2], -numpy.inf)
self.image_ub = numpy.full([2], numpy.inf)
@classmethod
def from_dict(cls, d):
self = cls()
if "min_distance" in d:
self.distance_lb = d["min_distance"]
if "max_distance" in d:
self.distance_ub = d["max_distance"]
if "min_image_x" in d:
self.image_lb[0] = d["min_image_x"]
if "max_image_x" in d:
self.image_ub[0] = d["max_image_x"]
if "min_image_z" in d:
self.image_lb[1] = d["min_image_z"]
if "max_image_z" in d:
self.image_ub[1] = d["max_image_z"]
return self
def find_points(self, points):
if len(points) == 0:
return numpy.array([], dtype=bool)
return numpy.logical_or.reduce([
points.distances <= self.distance_lb,
points.distances > self.distance_ub,
numpy.any(points.image_positions < self.image_lb, axis=-1),
numpy.any(points.image_positions > self.image_ub, axis=-1),
])
class FocusClip:
def __init__(self):
self.lb = numpy.full([3], -numpy.inf)
self.ub = numpy.full([3], numpy.inf)
@classmethod
def from_dict(cls, d):
self = cls()
if "min_x" in d:
self.lb[0] = d["min_x"]
if "max_x" in d:
self.ub[0] = d["max_x"]
if "min_y" in d:
self.lb[1] = d["min_y"]
if "max_y" in d:
self.ub[1] = d["max_y"]
if "min_z" in d:
self.lb[2] = d["min_z"]
if "max_z" in d:
self.ub[2] = d["max_z"]
return self
def find_points(self, points):
if len(points) == 0:
return numpy.array([], dtype=bool)
return numpy.logical_or.reduce([
numpy.any(points.positions < self.lb, axis=-1),
numpy.any(points.positions > self.ub, axis=-1),
])
class GroundClip:
def __init__(self):
self.height = numpy.inf
self.distance_ub = 0
@classmethod
def from_dict(cls, d):
self = cls()
if "height" in d:
self.height = d["height"]
if "max_distance" in d:
self.distance_ub = d["max_distance"]
return self
def find_points(self, points):
if len(points) == 0:
return numpy.array([], dtype=bool)
return numpy.logical_and.reduce([
points.positions[:, 2] < self.height,
points.distances < self.distance_ub,
])
class SensorClipManager(_ManagerBase):
def __init__(self):
self.focus_clip = FocusClip()
self.ground_clip = GroundClip()
self.clips = {}
def update_from_dict(self, d):
for key, d_tmp in d.items():
if key == "focus":
self.focus_clip = FocusClip.from_dict(d_tmp)
elif key == "ground":
self.ground_clip = GroundClip.from_dict(d_tmp)
else:
try:
sensor_serial_number = int(key)
except:
continue
self.clips[sensor_serial_number] = SensorClip.from_dict(d_tmp)
def process_sensor_points(self, sensor_serial_number, points):
if len(points) == 0:
return points
is_clipped_list = [
self.focus_clip.find_points(points),
self.ground_clip.find_points(points),
]
if sensor_serial_number in self.clips:
is_clipped_list.append(
self.clips[sensor_serial_number].find_points(points))
is_clipped = numpy.logical_or.reduce(is_clipped_list)
points.flags[is_clipped, cepton_sdk.PointFlag.VALID] = False
return points
__all__ = _all_builder.get()
| [
"json.dumps",
"numpy.any",
"numpy.array",
"numpy.logical_and.reduce",
"json.load",
"numpy.full",
"numpy.logical_or.reduce"
] | [((606, 669), 'json.dumps', 'json.dumps', (['d'], {'sort_keys': '(True)', 'indent': '(2)', 'separators': "(',', ': ')"}), "(d, sort_keys=True, indent=2, separators=(',', ': '))\n", (616, 669), False, 'import json\n'), ((1361, 1382), 'json.load', 'json.load', (['input_file'], {}), '(input_file)\n', (1370, 1382), False, 'import json\n'), ((3647, 3674), 'numpy.full', 'numpy.full', (['[2]', '(-numpy.inf)'], {}), '([2], -numpy.inf)\n', (3657, 3674), False, 'import numpy\n'), ((3699, 3725), 'numpy.full', 'numpy.full', (['[2]', 'numpy.inf'], {}), '([2], numpy.inf)\n', (3709, 3725), False, 'import numpy\n'), ((4759, 4786), 'numpy.full', 'numpy.full', (['[3]', '(-numpy.inf)'], {}), '([3], -numpy.inf)\n', (4769, 4786), False, 'import numpy\n'), ((4805, 4831), 'numpy.full', 'numpy.full', (['[3]', 'numpy.inf'], {}), '([3], numpy.inf)\n', (4815, 4831), False, 'import numpy\n'), ((6033, 6139), 'numpy.logical_and.reduce', 'numpy.logical_and.reduce', (['[points.positions[:, 2] < self.height, points.distances < self.distance_ub]'], {}), '([points.positions[:, 2] < self.height, points.\n distances < self.distance_ub])\n', (6057, 6139), False, 'import numpy\n'), ((7257, 7297), 'numpy.logical_or.reduce', 'numpy.logical_or.reduce', (['is_clipped_list'], {}), '(is_clipped_list)\n', (7280, 7297), False, 'import numpy\n'), ((2434, 2489), 'numpy.array', 'numpy.array', (["transform_dict['translation']"], {'dtype': 'float'}), "(transform_dict['translation'], dtype=float)\n", (2445, 2489), False, 'import numpy\n'), ((2513, 2565), 'numpy.array', 'numpy.array', (["transform_dict['rotation']"], {'dtype': 'float'}), "(transform_dict['rotation'], dtype=float)\n", (2524, 2565), False, 'import numpy\n'), ((4374, 4401), 'numpy.array', 'numpy.array', (['[]'], {'dtype': 'bool'}), '([], dtype=bool)\n', (4385, 4401), False, 'import numpy\n'), ((5368, 5395), 'numpy.array', 'numpy.array', (['[]'], {'dtype': 'bool'}), '([], dtype=bool)\n', (5379, 5395), False, 'import numpy\n'), ((5989, 6016), 'numpy.array', 'numpy.array', (['[]'], {'dtype': 'bool'}), '([], dtype=bool)\n', (6000, 6016), False, 'import numpy\n'), ((4555, 4613), 'numpy.any', 'numpy.any', (['(points.image_positions < self.image_lb)'], {'axis': '(-1)'}), '(points.image_positions < self.image_lb, axis=-1)\n', (4564, 4613), False, 'import numpy\n'), ((4627, 4685), 'numpy.any', 'numpy.any', (['(points.image_positions > self.image_ub)'], {'axis': '(-1)'}), '(points.image_positions > self.image_ub, axis=-1)\n', (4636, 4685), False, 'import numpy\n'), ((5450, 5496), 'numpy.any', 'numpy.any', (['(points.positions < self.lb)'], {'axis': '(-1)'}), '(points.positions < self.lb, axis=-1)\n', (5459, 5496), False, 'import numpy\n'), ((5510, 5556), 'numpy.any', 'numpy.any', (['(points.positions > self.ub)'], {'axis': '(-1)'}), '(points.positions > self.ub, axis=-1)\n', (5519, 5556), False, 'import numpy\n')] |
import numpy as np
import sys
from contextlib import closing
from io import StringIO
from gym import Env, spaces, utils
from gym.utils import seeding
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
def categorical_sample(prob_n, np_random):
"""
Sample from categorical distribution
Each row specifies class probabilities
"""
prob_n = np.asarray(prob_n)
csprob_n = np.cumsum(prob_n)
return (csprob_n > np_random.rand()).argmax()
class DiscreteWithinBoxEnv(Env):
"""
Inpired from
https://github.com/openai/gym/blob/ee5ee3a4a5b9d09219ff4c932a45c4a661778cd7/gym/envs/toy_text/discrete.py
Has the following members
- nS: number of states
- nA: number of actions
- P: transitions (*)
- isd: initial state distribution (**)
(*) dictionary of lists, where
P[s][a] == [(probability, nextstate, reward, done), ...]
(**) list or array of length nS
"""
def __init__(self, nS, nA, P, isd):
self.P = P
self.isd = isd
self.lastaction = None # for rendering
self.nS = nS
self.nA = nA
self.action_space = spaces.Discrete(self.nA)
self.observation_space = spaces.Box(low=np.array([0]), high=np.array([self.nS]), shape=(1, ), dtype=np.int32)
self.seed()
self.s = np.array([int(categorical_sample(self.isd, self.np_random))])
self.max_episode_length = 200
self.episode_length = 0
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
self.episode_length = 0
self.s = np.array([int(categorical_sample(self.isd, self.np_random))])
self.lastaction = None
return self.s
def step(self, a):
transitions = self.P[self.s[0]][a]
i = categorical_sample([t[0] for t in transitions], self.np_random)
p, s, r, d = transitions[i]
print("input state, action, output state ",(self.s,a,s))
self.s = np.array([int(s)])
self.lastaction = a
self.episode_length += 1
done = (self.episode_length >= self.max_episode_length) or d
return (s, r, done, {"prob": p})
class BoxSpaceCliffWalker(DiscreteWithinBoxEnv, utils.EzPickle):
"""
This is a simple implementation of the Gridworld Cliff
reinforcement learning task with Box state space.
With inspiration from:
https://github.com/dennybritz/reinforcement-learning/blob/master/lib/envs/cliff_walking.py
"""
metadata = {'render.modes': ['human', 'ansi']}
def __init__(self):
utils.EzPickle.__init__(self)
self.shape = (4, 12)
self.start_state_index = np.ravel_multi_index((3, 0), self.shape)
nS = np.prod(self.shape)
nA = 4
# Cliff Location
self._cliff = np.zeros(self.shape, dtype=np.bool)
self._cliff[3, 1:-1] = True
# Calculate transition probabilities and rewards
P = {}
for s in range(nS):
position = np.unravel_index(s, self.shape)
P[s] = {a: [] for a in range(nA)}
P[s][UP] = self._calculate_transition_prob(position, [-1, 0])
P[s][RIGHT] = self._calculate_transition_prob(position, [0, 1])
P[s][DOWN] = self._calculate_transition_prob(position, [1, 0])
P[s][LEFT] = self._calculate_transition_prob(position, [0, -1])
# Calculate initial state distribution
# We always start in state (3, 0)
isd = np.zeros(nS)
isd[self.start_state_index] = 1.0
super(BoxSpaceCliffWalker, self).__init__(nS, nA, P, isd)
def _limit_coordinates(self, coord):
"""
Prevent the agent from falling out of the grid world
:param coord:
:return:
"""
coord[0] = min(coord[0], self.shape[0] - 1)
coord[0] = max(coord[0], 0)
coord[1] = min(coord[1], self.shape[1] - 1)
coord[1] = max(coord[1], 0)
return coord
def _calculate_transition_prob(self, current, delta):
"""
Determine the outcome for an action. Transition Prob is always 1.0.
:param current: Current position on the grid as (row, col)
:param delta: Change in position for transition
:return: (1.0, new_state, reward, done)
"""
new_position = np.array(current) + np.array(delta)
new_position = self._limit_coordinates(new_position).astype(int)
new_state = np.ravel_multi_index(tuple(new_position), self.shape)
if self._cliff[tuple(new_position)]:
return [(1.0, self.start_state_index, -100, False)]
terminal_state = (self.shape[0] - 1, self.shape[1] - 1)
is_done = tuple(new_position) == terminal_state
return [(1.0, new_state, -1, is_done)]
def render(self, mode='human'):
outfile = StringIO() if mode == 'ansi' else sys.stdout
for s in range(self.nS):
position = np.unravel_index(s, self.shape)
if self.s == s:
output = " x "
# Print terminal state
elif position == (3, 11):
output = " T "
elif self._cliff[position]:
output = " C "
else:
output = " o "
if position[1] == 0:
output = output.lstrip()
if position[1] == self.shape[1] - 1:
output = output.rstrip()
output += '\n'
outfile.write(output)
outfile.write('\n')
# No need to return anything for human
if mode != 'human':
with closing(outfile):
return outfile.getvalue() | [
"numpy.prod",
"numpy.ravel_multi_index",
"numpy.asarray",
"gym.spaces.Discrete",
"numpy.array",
"numpy.zeros",
"gym.utils.EzPickle.__init__",
"numpy.unravel_index",
"contextlib.closing",
"numpy.cumsum",
"io.StringIO",
"gym.utils.seeding.np_random"
] | [((343, 361), 'numpy.asarray', 'np.asarray', (['prob_n'], {}), '(prob_n)\n', (353, 361), True, 'import numpy as np\n'), ((377, 394), 'numpy.cumsum', 'np.cumsum', (['prob_n'], {}), '(prob_n)\n', (386, 394), True, 'import numpy as np\n'), ((1119, 1143), 'gym.spaces.Discrete', 'spaces.Discrete', (['self.nA'], {}), '(self.nA)\n', (1134, 1143), False, 'from gym import Env, spaces, utils\n'), ((1498, 1521), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (1515, 1521), False, 'from gym.utils import seeding\n'), ((2582, 2611), 'gym.utils.EzPickle.__init__', 'utils.EzPickle.__init__', (['self'], {}), '(self)\n', (2605, 2611), False, 'from gym import Env, spaces, utils\n'), ((2674, 2714), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['(3, 0)', 'self.shape'], {}), '((3, 0), self.shape)\n', (2694, 2714), True, 'import numpy as np\n'), ((2729, 2748), 'numpy.prod', 'np.prod', (['self.shape'], {}), '(self.shape)\n', (2736, 2748), True, 'import numpy as np\n'), ((2812, 2847), 'numpy.zeros', 'np.zeros', (['self.shape'], {'dtype': 'np.bool'}), '(self.shape, dtype=np.bool)\n', (2820, 2847), True, 'import numpy as np\n'), ((3491, 3503), 'numpy.zeros', 'np.zeros', (['nS'], {}), '(nS)\n', (3499, 3503), True, 'import numpy as np\n'), ((3008, 3039), 'numpy.unravel_index', 'np.unravel_index', (['s', 'self.shape'], {}), '(s, self.shape)\n', (3024, 3039), True, 'import numpy as np\n'), ((4329, 4346), 'numpy.array', 'np.array', (['current'], {}), '(current)\n', (4337, 4346), True, 'import numpy as np\n'), ((4349, 4364), 'numpy.array', 'np.array', (['delta'], {}), '(delta)\n', (4357, 4364), True, 'import numpy as np\n'), ((4844, 4854), 'io.StringIO', 'StringIO', ([], {}), '()\n', (4852, 4854), False, 'from io import StringIO\n'), ((4946, 4977), 'numpy.unravel_index', 'np.unravel_index', (['s', 'self.shape'], {}), '(s, self.shape)\n', (4962, 4977), True, 'import numpy as np\n'), ((1193, 1206), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1201, 1206), True, 'import numpy as np\n'), ((1213, 1232), 'numpy.array', 'np.array', (['[self.nS]'], {}), '([self.nS])\n', (1221, 1232), True, 'import numpy as np\n'), ((5613, 5629), 'contextlib.closing', 'closing', (['outfile'], {}), '(outfile)\n', (5620, 5629), False, 'from contextlib import closing\n')] |
'''Provides a class for representing a hand pose and a hand volume.'''
# python
from time import time
from copy import copy
# scipy
from matplotlib import pyplot
from numpy.linalg import inv, norm
from numpy.random import rand, randn
from numpy import arange, arccos, arctan, arctan2, array, ascontiguousarray, ceil, concatenate, \
cross, dot, eye, linspace, maximum, meshgrid, ones, pi, reshape, round, sqrt, stack, zeros
# self
class HandDescriptor():
def __init__(self, T, imP, imD, imW):
'''Creates a HandDescriptor object with everything needed.'''
self.T = T
# hand size (used for drawing)
self.depth = 0.075
self.width = 0.085
self.height = 0.01
# image size (used for image descriptor)
self.imP = imP
self.imD = imD
self.imW = imW
# hand axes
self.axis = T[0:3, 0]
self.binormal = T[0:3, 1]
self.approach = -T[0:3, 2]
self.center = T[0:3, 3]
self.bottom = self.center - 0.5 * self.depth * self.approach
self.top = self.center + 0.5 * self.depth * self.approach
# internal variables
self.image = None
def GenerateHeightmap(self, env):
'''Generates a heightmap for the current descriptor given an environment with shape primitives.
- Input env: rl_environment_pegs_on_disks object.
- Returns self.image: The image generated for this descriptor.
'''
# precomputed values
dxy = self.imW / self.imP
cornerXi = (self.center[0] / dxy) - ((self.imP - 1.0) / 2.0)
cornerYi = (self.center[1] / dxy) - ((self.imP - 1.0) / 2.0)
value = 0.50 + ((env.tablePosition[2] + env.tableExtents[2]) - self.center[2]) / self.imD
value = min(max(0.0, value), 1.0)
self.image = value * ones((self.imP, self.imP, 1), dtype='float32')
# for each object, compute relevant image indices
objects = env.objects + env.supportObjects
for obj in objects:
objCenter = obj.GetTransform()[0:3, 3]
oXiLo = max(int(round((objCenter[0] - obj.radius) / dxy - cornerXi)), 0)
oXiHi = min(int(round((objCenter[0] + obj.radius) / dxy - cornerXi))+1, self.imP)
if oXiLo >= self.imP or oXiHi < 0: continue
oYiLo = max(int(round((objCenter[1] - obj.radius) / dxy - cornerYi)), 0)
oYiHi = min(int(round((objCenter[1] + obj.radius) / dxy - cornerYi))+1, self.imP)
if oYiLo >= self.imP or oYiHi < 0: continue
value = 0.50 + ((objCenter[2] + obj.height/2.0) - self.center[2]) / self.imD
value = min(max(0.0, value), 1.0)
self.image[oXiLo:oXiHi, oYiLo:oYiHi, 0] = \
maximum(value, self.image[oXiLo:oXiHi, oYiLo:oYiHi, 0])
return self.image
def PlotImage(self):
'''Plots the image descriptor for this grasp.'''
if self.image is None:
return
fig = pyplot.figure()
pyplot.imshow(self.image[:, :, 0], vmin=0.00, vmax=1.00, cmap='gray')
pyplot.title("Hand Image")
fig.axes[0].set_xticks([])
fig.axes[0].set_yticks([])
pyplot.show(block=True)
# UTILITIES ========================================================================================
def PoseFromApproachAxisCenter(approach, axis, center):
'''Given grasp approach and axis unit vectors, and center, get homogeneous transform for grasp.'''
T = eye(4)
T[0:3, 0] = axis
T[0:3, 1] = cross(approach, axis)
T[0:3, 2] = approach
T[0:3, 3] = center
return T | [
"matplotlib.pyplot.imshow",
"numpy.eye",
"numpy.cross",
"numpy.ones",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.maximum",
"numpy.round",
"matplotlib.pyplot.show"
] | [((3231, 3237), 'numpy.eye', 'eye', (['(4)'], {}), '(4)\n', (3234, 3237), False, 'from numpy import arange, arccos, arctan, arctan2, array, ascontiguousarray, ceil, concatenate, cross, dot, eye, linspace, maximum, meshgrid, ones, pi, reshape, round, sqrt, stack, zeros\n'), ((3271, 3292), 'numpy.cross', 'cross', (['approach', 'axis'], {}), '(approach, axis)\n', (3276, 3292), False, 'from numpy import arange, arccos, arctan, arctan2, array, ascontiguousarray, ceil, concatenate, cross, dot, eye, linspace, maximum, meshgrid, ones, pi, reshape, round, sqrt, stack, zeros\n'), ((2751, 2766), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (2764, 2766), False, 'from matplotlib import pyplot\n'), ((2771, 2838), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['self.image[:, :, 0]'], {'vmin': '(0.0)', 'vmax': '(1.0)', 'cmap': '"""gray"""'}), "(self.image[:, :, 0], vmin=0.0, vmax=1.0, cmap='gray')\n", (2784, 2838), False, 'from matplotlib import pyplot\n'), ((2846, 2872), 'matplotlib.pyplot.title', 'pyplot.title', (['"""Hand Image"""'], {}), "('Hand Image')\n", (2858, 2872), False, 'from matplotlib import pyplot\n'), ((2940, 2963), 'matplotlib.pyplot.show', 'pyplot.show', ([], {'block': '(True)'}), '(block=True)\n', (2951, 2963), False, 'from matplotlib import pyplot\n'), ((1706, 1752), 'numpy.ones', 'ones', (['(self.imP, self.imP, 1)'], {'dtype': '"""float32"""'}), "((self.imP, self.imP, 1), dtype='float32')\n", (1710, 1752), False, 'from numpy import arange, arccos, arctan, arctan2, array, ascontiguousarray, ceil, concatenate, cross, dot, eye, linspace, maximum, meshgrid, ones, pi, reshape, round, sqrt, stack, zeros\n'), ((2539, 2594), 'numpy.maximum', 'maximum', (['value', 'self.image[oXiLo:oXiHi, oYiLo:oYiHi, 0]'], {}), '(value, self.image[oXiLo:oXiHi, oYiLo:oYiHi, 0])\n', (2546, 2594), False, 'from numpy import arange, arccos, arctan, arctan2, array, ascontiguousarray, ceil, concatenate, cross, dot, eye, linspace, maximum, meshgrid, ones, pi, reshape, round, sqrt, stack, zeros\n'), ((1946, 1997), 'numpy.round', 'round', (['((objCenter[0] - obj.radius) / dxy - cornerXi)'], {}), '((objCenter[0] - obj.radius) / dxy - cornerXi)\n', (1951, 1997), False, 'from numpy import arange, arccos, arctan, arctan2, array, ascontiguousarray, ceil, concatenate, cross, dot, eye, linspace, maximum, meshgrid, ones, pi, reshape, round, sqrt, stack, zeros\n'), ((2163, 2214), 'numpy.round', 'round', (['((objCenter[1] - obj.radius) / dxy - cornerYi)'], {}), '((objCenter[1] - obj.radius) / dxy - cornerYi)\n', (2168, 2214), False, 'from numpy import arange, arccos, arctan, arctan2, array, ascontiguousarray, ceil, concatenate, cross, dot, eye, linspace, maximum, meshgrid, ones, pi, reshape, round, sqrt, stack, zeros\n'), ((2025, 2076), 'numpy.round', 'round', (['((objCenter[0] + obj.radius) / dxy - cornerXi)'], {}), '((objCenter[0] + obj.radius) / dxy - cornerXi)\n', (2030, 2076), False, 'from numpy import arange, arccos, arctan, arctan2, array, ascontiguousarray, ceil, concatenate, cross, dot, eye, linspace, maximum, meshgrid, ones, pi, reshape, round, sqrt, stack, zeros\n'), ((2242, 2293), 'numpy.round', 'round', (['((objCenter[1] + obj.radius) / dxy - cornerYi)'], {}), '((objCenter[1] + obj.radius) / dxy - cornerYi)\n', (2247, 2293), False, 'from numpy import arange, arccos, arctan, arctan2, array, ascontiguousarray, ceil, concatenate, cross, dot, eye, linspace, maximum, meshgrid, ones, pi, reshape, round, sqrt, stack, zeros\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle.fluid as fluid
from ...common import SAController
__all__ = ['OneShotSuperNet', 'OneShotSearch']
def OneShotSearch(model, eval_func, strategy='sa', search_steps=100):
"""
Search a best tokens which represents a sub-network.
Args:
model(fluid.dygraph.Layer): A dynamic graph module whose sub-modules should contain
one instance of `OneShotSuperNet` at least.
eval_func(function): A callback function which accept model and tokens as arguments.
strategy(str): The name of strategy used to search. Default: 'sa'.
search_steps(int): The total steps for searching.
Returns:
list<int>: The best tokens searched.
"""
super_net = None
for layer in model.sublayers(include_sublayers=False):
print("layer: {}".format(layer))
if isinstance(layer, OneShotSuperNet):
super_net = layer
break
assert super_net is not None
controller = None
if strategy == "sa":
contoller = SAController(
range_table=super_net.range_table(),
init_tokens=super_net.init_tokens())
assert (controller is not None, "Unsupported searching strategy.")
for i in range(search_steps):
tokens = contoller.next_tokens()
reward = eval_func(model, tokens)
contoller.update(tokens, reward, i)
return contoller.best_tokens()
class OneShotSuperNet(fluid.dygraph.Layer):
"""The base class of super net used in one-shot searching strategy.
A super net is a dygraph layer.
Args:
name_scope(str): The name scope of super net.
"""
def __init__(self, name_scope):
super(OneShotSuperNet, self).__init__(name_scope)
def init_tokens(self):
"""Get init tokens in search space.
Returns:
lis<int>t: The init tokens which is a list of integer.
"""
raise NotImplementedError('Abstract method.')
def range_table(self):
"""Get range table of current search space.
Returns:
range_table(tuple): The maximum value and minimum value in each position of tokens
with format `(min_values, max_values)`. The `min_values` is
a list of integers indicating the minimum values while `max_values`
indicating the maximum values.
"""
raise NotImplementedError('Abstract method.')
def _forward_impl(self, *inputs, **kwargs):
"""Defines the computation performed at every call.
Should be overridden by all subclasses.
Args:
inputs(tuple): unpacked tuple arguments
kwargs(dict): unpacked dict arguments
"""
raise NotImplementedError('Abstract method.')
def forward(self, input, tokens=None):
"""
Defines the computation performed at every call.
Args:
input(variable): The input of super net.
tokens(list): The tokens used to generate a sub-network.
None means computing in super net training mode.
Otherwise, it will execute the sub-network generated by tokens.
The `tokens` should be set in searching stage and final training stage.
Default: None.
Returns:
Varaible: The output of super net.
"""
if tokens == None:
tokens = self._random_tokens()
return self._forward_impl(input, tokens=tokens)
def _random_tokens(self):
tokens = []
for min_v, max_v in zip(self.range_table()[0], self.range_table()[1]):
tokens.append(np.random.randint(min_v, max_v))
return tokens
| [
"numpy.random.randint"
] | [((4361, 4392), 'numpy.random.randint', 'np.random.randint', (['min_v', 'max_v'], {}), '(min_v, max_v)\n', (4378, 4392), True, 'import numpy as np\n')] |
import numpy as np
import pytest
import sets
@pytest.fixture
def dataset():
data = [[1, 3], [0, -1.5], [0, 0]]
target = [0, 0.5, 1]
return sets.Dataset(data=data, target=target)
def test_concat(dataset):
dataset['other'] = [[1], [2], [3]]
result = sets.Concat(1, 'data')(dataset, columns=('data', 'other'))
assert 'other' not in result.columns
assert (result.target == dataset.target).all()
assert len(result) == len(dataset)
assert result.data.shape[1] == dataset.data.shape[1] + 1
assert (result.data[:, :-1] == dataset.data).all()
def test_onehot(dataset):
result = sets.OneHot(dataset.target)(dataset, columns=['target'])
assert result.target.shape[1] == len(np.unique(dataset.target))
assert (result.target.sum(axis=1)).all()
assert (result.target.max(axis=1)).all()
def test_split(dataset):
one, two = sets.Split(0.5)(dataset)
assert len(one) + len(two) == len(dataset)
data = np.concatenate((one.data, two.data))
target = np.concatenate((one.target, two.target))
assert (data == dataset.data).all()
assert (target == dataset.target).all()
def test_normalize(dataset):
width = dataset.data.shape[1]
normalize = sets.Normalize(dataset)
result = normalize(dataset)
assert np.allclose(result.data.mean(axis=0), np.zeros(width))
assert np.allclose(result.data.std(axis=0), np.ones(width))
assert np.allclose(result.target.mean(axis=0), np.zeros(width))
assert np.allclose(result.target.std(axis=0), np.ones(width))
shifted = dataset.data + 1
other = normalize(sets.Dataset(data=shifted, target=dataset.target))
assert not np.allclose(other.data.mean(axis=0), np.zeros(width))
assert np.allclose(other.data.std(axis=0), np.ones(width))
assert np.allclose(result.target.mean(axis=0), np.zeros(width))
assert np.allclose(result.target.std(axis=0), np.ones(width))
def test_embedding_found():
data = list('ceabb')
target = list('abddd')
vocabulary = list('abc')
dataset = sets.Dataset(data=data, target=target)
dataset, found = sets.OneHot(vocabulary)(
dataset, columns=['data', 'target'], return_found=True)
assert found == 6 / 10
| [
"sets.Concat",
"sets.Dataset",
"sets.Normalize",
"numpy.ones",
"numpy.unique",
"sets.Split",
"sets.OneHot",
"numpy.zeros",
"numpy.concatenate"
] | [((162, 200), 'sets.Dataset', 'sets.Dataset', ([], {'data': 'data', 'target': 'target'}), '(data=data, target=target)\n', (174, 200), False, 'import sets\n'), ((985, 1021), 'numpy.concatenate', 'np.concatenate', (['(one.data, two.data)'], {}), '((one.data, two.data))\n', (999, 1021), True, 'import numpy as np\n'), ((1036, 1076), 'numpy.concatenate', 'np.concatenate', (['(one.target, two.target)'], {}), '((one.target, two.target))\n', (1050, 1076), True, 'import numpy as np\n'), ((1247, 1270), 'sets.Normalize', 'sets.Normalize', (['dataset'], {}), '(dataset)\n', (1261, 1270), False, 'import sets\n'), ((2078, 2116), 'sets.Dataset', 'sets.Dataset', ([], {'data': 'data', 'target': 'target'}), '(data=data, target=target)\n', (2090, 2116), False, 'import sets\n'), ((284, 306), 'sets.Concat', 'sets.Concat', (['(1)', '"""data"""'], {}), "(1, 'data')\n", (295, 306), False, 'import sets\n'), ((638, 665), 'sets.OneHot', 'sets.OneHot', (['dataset.target'], {}), '(dataset.target)\n', (649, 665), False, 'import sets\n'), ((900, 915), 'sets.Split', 'sets.Split', (['(0.5)'], {}), '(0.5)\n', (910, 915), False, 'import sets\n'), ((1354, 1369), 'numpy.zeros', 'np.zeros', (['width'], {}), '(width)\n', (1362, 1369), True, 'import numpy as np\n'), ((1420, 1434), 'numpy.ones', 'np.ones', (['width'], {}), '(width)\n', (1427, 1434), True, 'import numpy as np\n'), ((1488, 1503), 'numpy.zeros', 'np.zeros', (['width'], {}), '(width)\n', (1496, 1503), True, 'import numpy as np\n'), ((1556, 1570), 'numpy.ones', 'np.ones', (['width'], {}), '(width)\n', (1563, 1570), True, 'import numpy as np\n'), ((1627, 1676), 'sets.Dataset', 'sets.Dataset', ([], {'data': 'shifted', 'target': 'dataset.target'}), '(data=shifted, target=dataset.target)\n', (1639, 1676), False, 'import sets\n'), ((1796, 1810), 'numpy.ones', 'np.ones', (['width'], {}), '(width)\n', (1803, 1810), True, 'import numpy as np\n'), ((1864, 1879), 'numpy.zeros', 'np.zeros', (['width'], {}), '(width)\n', (1872, 1879), True, 'import numpy as np\n'), ((1932, 1946), 'numpy.ones', 'np.ones', (['width'], {}), '(width)\n', (1939, 1946), True, 'import numpy as np\n'), ((2139, 2162), 'sets.OneHot', 'sets.OneHot', (['vocabulary'], {}), '(vocabulary)\n', (2150, 2162), False, 'import sets\n'), ((737, 762), 'numpy.unique', 'np.unique', (['dataset.target'], {}), '(dataset.target)\n', (746, 762), True, 'import numpy as np\n'), ((1731, 1746), 'numpy.zeros', 'np.zeros', (['width'], {}), '(width)\n', (1739, 1746), True, 'import numpy as np\n')] |
import glob as glob
import albumentations
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import os
from model import Net
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = Net().to(device)
# load the model checkpoint
checkpoint = torch.load('../outputs/model.pth')
# load model weights state_dict
model.load_state_dict(checkpoint['model_state_dict'])
# read all image paths
root_dir = '../../input/german_traffic_sign/GTSRB/Final_Test/Images/'
# read the test dataframe
test_df = pd.read_csv(
'../../input/german_traffic_sign/GTSRB/Final_Test/GTSRB_Final_Test_GT/GT-final_test.csv',
delimiter=';', nrows=10
)
# change index to filename for easier access to labels
gt_df = test_df.set_index('Filename', drop=True)
# read sign label dataframes
sign_df = pd.read_csv(
'../../input/german_traffic_sign/GTSRB/Final_Training/signnames.csv'
)
aug = albumentations.Compose([
# 48x48 resizing is required for this network model
albumentations.Resize(48, 48, always_apply=True),
])
for i in range(len(test_df)):
image_path = root_dir+test_df.loc[i, 'Filename']
image = plt.imread(image_path)
orig = image.copy()
model.eval()
with torch.no_grad():
image = image / 255.
image = aug(image=np.array(image))['image']
image = np.transpose(image, (2, 0, 1))
image = torch.tensor(image, dtype=torch.float).to(device)
image = image.unsqueeze(0)
outputs = model(image)
_, preds = torch.max(outputs.data, 1)
# get the prediction label
label = sign_df.loc[int(preds), 'SignName']
# get the ground truth label
filename = image_path.split('/')[-1]
gt_id = gt_df.loc[filename].ClassId
gt_label = sign_df.loc[int(gt_id), 'SignName']
# image = image.detach().cpu().numpy()
# image = image.squeeze(0)
# image = np.transpose(image, (1, 2, 0))
# plt.imshow(image)
# plt.title('Image that the model sees')
# plt.show()
plt.imshow(orig)
plt.title(f"Prediction - {str(label)}\nGround Truth - {str(gt_label)}")
plt.axis('off')
plt.savefig(f"../outputs/{filename.split('.')[0]}.png")
plt.show()
plt.close() | [
"matplotlib.pyplot.imshow",
"pandas.read_csv",
"torch.load",
"matplotlib.pyplot.imread",
"torch.max",
"matplotlib.pyplot.close",
"torch.no_grad",
"torch.tensor",
"torch.cuda.is_available",
"numpy.array",
"albumentations.Resize",
"matplotlib.pyplot.axis",
"numpy.transpose",
"model.Net",
"... | [((325, 359), 'torch.load', 'torch.load', (['"""../outputs/model.pth"""'], {}), "('../outputs/model.pth')\n", (335, 359), False, 'import torch\n'), ((577, 713), 'pandas.read_csv', 'pd.read_csv', (['"""../../input/german_traffic_sign/GTSRB/Final_Test/GTSRB_Final_Test_GT/GT-final_test.csv"""'], {'delimiter': '""";"""', 'nrows': '(10)'}), "(\n '../../input/german_traffic_sign/GTSRB/Final_Test/GTSRB_Final_Test_GT/GT-final_test.csv'\n , delimiter=';', nrows=10)\n", (588, 713), True, 'import pandas as pd\n'), ((862, 948), 'pandas.read_csv', 'pd.read_csv', (['"""../../input/german_traffic_sign/GTSRB/Final_Training/signnames.csv"""'], {}), "(\n '../../input/german_traffic_sign/GTSRB/Final_Training/signnames.csv')\n", (873, 948), True, 'import pandas as pd\n'), ((1240, 1262), 'matplotlib.pyplot.imread', 'plt.imread', (['image_path'], {}), '(image_path)\n', (1250, 1262), True, 'import matplotlib.pyplot as plt\n'), ((2106, 2122), 'matplotlib.pyplot.imshow', 'plt.imshow', (['orig'], {}), '(orig)\n', (2116, 2122), True, 'import matplotlib.pyplot as plt\n'), ((2203, 2218), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2211, 2218), True, 'import matplotlib.pyplot as plt\n'), ((2283, 2293), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2291, 2293), True, 'import matplotlib.pyplot as plt\n'), ((2298, 2309), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2307, 2309), True, 'import matplotlib.pyplot as plt\n'), ((221, 246), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (244, 246), False, 'import torch\n'), ((267, 272), 'model.Net', 'Net', ([], {}), '()\n', (270, 272), False, 'from model import Net\n'), ((1078, 1126), 'albumentations.Resize', 'albumentations.Resize', (['(48)', '(48)'], {'always_apply': '(True)'}), '(48, 48, always_apply=True)\n', (1099, 1126), False, 'import albumentations\n'), ((1314, 1329), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1327, 1329), False, 'import torch\n'), ((1428, 1458), 'numpy.transpose', 'np.transpose', (['image', '(2, 0, 1)'], {}), '(image, (2, 0, 1))\n', (1440, 1458), True, 'import numpy as np\n'), ((1610, 1636), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (1619, 1636), False, 'import torch\n'), ((1475, 1513), 'torch.tensor', 'torch.tensor', (['image'], {'dtype': 'torch.float'}), '(image, dtype=torch.float)\n', (1487, 1513), False, 'import torch\n'), ((1386, 1401), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1394, 1401), True, 'import numpy as np\n')] |
"""
Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import random
import argparse
from pathlib import Path
from multiprocessing import Pool
from itertools import repeat
import numpy as np
import SimpleITK as sitk
from loguru import logger
from nndet.io import save_json
from nndet.utils.check import env_guard
# # 2D example
# [Ignore, Not supported]
# dim = 2
# image_size = [512, 512]
# object_size = [32, 64]
# object_width = 6
# num_images_tr = 100
# num_images_ts = 100
# 3D example
dim = 3
image_size = [256, 256, 256]
object_size = [16, 32]
object_width = 4
def generate_image(image_dir, label_dir, idx):
random.seed(idx)
np.random.seed(idx)
logger.info(f"Generating case_{idx}")
selected_size = np.random.randint(object_size[0], object_size[1])
selected_class = np.random.randint(0, 2)
data = np.random.rand(*image_size)
mask = np.zeros_like(data)
top_left = [np.random.randint(0, image_size[i] - selected_size) for i in range(dim)]
if selected_class == 0:
slicing = tuple([slice(tp, tp + selected_size) for tp in top_left])
data[slicing] = data[slicing] + 0.4
data = data.clip(0, 1)
mask[slicing] = 1
elif selected_class == 1:
slicing = tuple([slice(tp, tp + selected_size) for tp in top_left])
inner_slicing = [slice(tp + object_width, tp + selected_size - object_width) for tp in top_left]
if len(inner_slicing) == 3:
inner_slicing[0] = slice(0, image_size[0])
inner_slicing = tuple(inner_slicing)
object_mask = np.zeros_like(mask).astype(bool)
object_mask[slicing] = 1
object_mask[inner_slicing] = 0
data[object_mask] = data[object_mask] + 0.4
data = data.clip(0, 1)
mask[object_mask] = 1
else:
raise NotImplementedError
if dim == 2:
data = data[None]
mask = mask[None]
data_itk = sitk.GetImageFromArray(data)
mask_itk = sitk.GetImageFromArray(mask)
mask_meta = {
"instances": {
"1": selected_class
},
}
sitk.WriteImage(data_itk, str(image_dir / f"case_{idx}_0000.nii.gz"))
sitk.WriteImage(mask_itk, str(label_dir / f"case_{idx}.nii.gz"))
save_json(mask_meta, label_dir / f"case_{idx}.json")
@env_guard
def main():
"""
Generate an example dataset for nnDetection to test the installation or
experiment with ideas.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--full',
help="Increase size of dataset. "
"Default sizes train/test 10/10 and full 1000/1000.",
action='store_true',
)
parser.add_argument(
'--num_processes',
help="Use multiprocessing to create dataset.",
type=int,
default=0,
)
args = parser.parse_args()
full = args.full
num_processes = args.num_processes
num_images_tr = 1000 if full else 10
num_images_ts = 1000 if full else 10
meta = {
"task": f"Task000D{dim}_Example",
"name": "Example",
"target_class": None,
"test_labels": True,
"labels": {"0": "Square", "1": "SquareHole"},
"modalities": {"0": "MRI"},
"dim": dim,
}
# setup paths
data_task_dir = Path(os.getenv("det_data")) / meta["task"]
data_task_dir.mkdir(parents=True, exist_ok=True)
save_json(meta, data_task_dir / "dataset.json")
raw_splitted_dir = data_task_dir / "raw_splitted"
images_tr_dir = raw_splitted_dir / "imagesTr"
images_tr_dir.mkdir(parents=True, exist_ok=True)
labels_tr_dir = raw_splitted_dir / "labelsTr"
labels_tr_dir.mkdir(parents=True, exist_ok=True)
images_ts_dir = raw_splitted_dir / "imagesTs"
images_ts_dir.mkdir(parents=True, exist_ok=True)
labels_ts_dir = raw_splitted_dir / "labelsTs"
labels_ts_dir.mkdir(parents=True, exist_ok=True)
if num_processes == 0:
for idx in range(num_images_tr):
generate_image(
images_tr_dir,
labels_tr_dir,
idx,
)
for idx in range(num_images_tr, num_images_tr + num_images_ts):
generate_image(
images_ts_dir,
labels_ts_dir,
idx,
)
else:
logger.info("Using multiprocessing to create example dataset.")
with Pool(processes=num_processes) as p:
p.starmap(
generate_image,
zip(
repeat(images_tr_dir),
repeat(labels_tr_dir),
range(num_images_tr),
)
)
with Pool(processes=num_processes) as p:
p.starmap(
generate_image,
zip(
repeat(images_ts_dir),
repeat(labels_ts_dir),
range(num_images_tr, num_images_tr + num_images_ts),
)
)
if __name__ == '__main__':
main()
| [
"nndet.io.save_json",
"numpy.random.rand",
"loguru.logger.info",
"SimpleITK.GetImageFromArray",
"argparse.ArgumentParser",
"os.getenv",
"random.seed",
"numpy.random.randint",
"numpy.random.seed",
"multiprocessing.Pool",
"numpy.zeros_like",
"itertools.repeat"
] | [((1225, 1241), 'random.seed', 'random.seed', (['idx'], {}), '(idx)\n', (1236, 1241), False, 'import random\n'), ((1246, 1265), 'numpy.random.seed', 'np.random.seed', (['idx'], {}), '(idx)\n', (1260, 1265), True, 'import numpy as np\n'), ((1271, 1308), 'loguru.logger.info', 'logger.info', (['f"""Generating case_{idx}"""'], {}), "(f'Generating case_{idx}')\n", (1282, 1308), False, 'from loguru import logger\n'), ((1329, 1378), 'numpy.random.randint', 'np.random.randint', (['object_size[0]', 'object_size[1]'], {}), '(object_size[0], object_size[1])\n', (1346, 1378), True, 'import numpy as np\n'), ((1400, 1423), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (1417, 1423), True, 'import numpy as np\n'), ((1436, 1463), 'numpy.random.rand', 'np.random.rand', (['*image_size'], {}), '(*image_size)\n', (1450, 1463), True, 'import numpy as np\n'), ((1475, 1494), 'numpy.zeros_like', 'np.zeros_like', (['data'], {}), '(data)\n', (1488, 1494), True, 'import numpy as np\n'), ((2511, 2539), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['data'], {}), '(data)\n', (2533, 2539), True, 'import SimpleITK as sitk\n'), ((2555, 2583), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['mask'], {}), '(mask)\n', (2577, 2583), True, 'import SimpleITK as sitk\n'), ((2821, 2873), 'nndet.io.save_json', 'save_json', (['mask_meta', "(label_dir / f'case_{idx}.json')"], {}), "(mask_meta, label_dir / f'case_{idx}.json')\n", (2830, 2873), False, 'from nndet.io import save_json\n'), ((3031, 3056), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3054, 3056), False, 'import argparse\n'), ((3969, 4016), 'nndet.io.save_json', 'save_json', (['meta', "(data_task_dir / 'dataset.json')"], {}), "(meta, data_task_dir / 'dataset.json')\n", (3978, 4016), False, 'from nndet.io import save_json\n'), ((1512, 1563), 'numpy.random.randint', 'np.random.randint', (['(0)', '(image_size[i] - selected_size)'], {}), '(0, image_size[i] - selected_size)\n', (1529, 1563), True, 'import numpy as np\n'), ((4902, 4965), 'loguru.logger.info', 'logger.info', (['"""Using multiprocessing to create example dataset."""'], {}), "('Using multiprocessing to create example dataset.')\n", (4913, 4965), False, 'from loguru import logger\n'), ((3874, 3895), 'os.getenv', 'os.getenv', (['"""det_data"""'], {}), "('det_data')\n", (3883, 3895), False, 'import os\n'), ((4979, 5008), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'num_processes'}), '(processes=num_processes)\n', (4983, 5008), False, 'from multiprocessing import Pool\n'), ((5264, 5293), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'num_processes'}), '(processes=num_processes)\n', (5268, 5293), False, 'from multiprocessing import Pool\n'), ((2162, 2181), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (2175, 2181), True, 'import numpy as np\n'), ((5111, 5132), 'itertools.repeat', 'repeat', (['images_tr_dir'], {}), '(images_tr_dir)\n', (5117, 5132), False, 'from itertools import repeat\n'), ((5154, 5175), 'itertools.repeat', 'repeat', (['labels_tr_dir'], {}), '(labels_tr_dir)\n', (5160, 5175), False, 'from itertools import repeat\n'), ((5396, 5417), 'itertools.repeat', 'repeat', (['images_ts_dir'], {}), '(images_ts_dir)\n', (5402, 5417), False, 'from itertools import repeat\n'), ((5439, 5460), 'itertools.repeat', 'repeat', (['labels_ts_dir'], {}), '(labels_ts_dir)\n', (5445, 5460), False, 'from itertools import repeat\n')] |
import numpy as np
import tensorflow as tf
def set_seed(x):
"""
Set seed for both NumPy and TensorFlow.
"""
np.random.seed(x)
tf.set_random_seed(x)
def check_is_tf_vector(x):
if isinstance(x, tf.Tensor):
dimensions = x.get_shape()
if(len(dimensions) == 0):
raise TypeError("util::check_is_tf_vector: "
"input is a scalar.")
elif(len(dimensions) == 1):
if(dimensions[0].value <= 1):
raise TypeError("util::check_is_tf_vector: "
"input has first dimension <= 1.")
else:
pass
elif(len(dimensions) == 2):
if(dimensions[1]!=1):
raise TypeError("util::check_is_tf_vector: "
"input has second dimension != 1.")
else:
raise TypeError("util::check_is_tf_vector: "
"input has too many dimensions.")
else:
raise TypeError("util::check_is_tf_vector: "
"input is not a TensorFlow object.")
def log_sum_exp(x):
"""
Computes the log_sum_exp of the elements in x.
Works for x with
shape=TensorShape([Dimension(N)])
shape=TensorShape([Dimension(N), Dimension(1)])
Not tested for anything beyond that.
"""
check_is_tf_vector(x)
x_max = tf.reduce_max(x)
return tf.add(x_max, tf.log(tf.reduce_sum(tf.exp(tf.sub(x, x_max)))))
def logit(x):
return tf.truediv(1.0, (1.0 + tf.exp(-x)))
def probit(x):
return 0.5 * (1.0 + tf.erf(x / tf.sqrt(2.0)))
def sigmoid(x):
"Numerically-stable sigmoid function."
if x >= 0.0:
z = tf.exp(-x)
return 1.0 / (1.0 + z)
else:
z = tf.exp(x)
return z / (1.0 + z)
def dot(x, y):
"""
x is M x N matrix and y is N-vector, or
x is M-vector and y is M x N matrix
"""
if len(x.get_shape()) == 1:
vec = x
mat = y
d = vec.get_shape()[0].value
return tf.matmul(tf.reshape(vec, [1, d]), mat)
else:
mat = x
vec = y
d = vec.get_shape()[0].value
return tf.matmul(mat, tf.reshape(vec, [d, 1]))
def trace(X):
# assumes square
n = X.get_shape()[0].value
mask = tf.diag(tf.ones([n], dtype=X.dtype))
X = tf.mul(mask, X)
return tf.reduce_sum(X)
def get_dims(x):
"""
Get values of each dimension.
Arguments
----------
x: tensor scalar or array
"""
dims = x.get_shape()
if len(dims) == 0: # scalar
return [1]
else: # array
return [dim.value for dim in dims]
def kl_multivariate_normal(loc, scale):
"""
KL( N(z; loc, scale) || N(z; 0, 1) ) for vector inputs, or
sum_{m=1}^M KL( N(z_{m,:}; loc, scale) || N(z_{m,:}; 0, 1) ) for matrix inputs
Parameters
----------
loc : tf.Tensor
n-dimensional vector, or M x n-dimensional matrix where each
row represents the mean of a n-dimensional Gaussian
scale : tf.Tensor
n-dimensional vector, or M x n-dimensional matrix where each
row represents the standard deviation of a n-dimensional Gaussian
Returns
-------
tf.Tensor
scalar
"""
return -0.5 * tf.reduce_sum(1.0 + 2.0 * tf.log(scale + 1e-8) - \
tf.square(loc) - tf.square(scale))
def log_gamma(x):
"""
TensorFlow doesn't have special functions, so use a
log/exp/polynomial approximation.
http://www.machinedlearnings.com/2011/06/faster-lda.html
"""
logterm = tf.log(x * (1.0 + x) * (2.0 + x))
xp3 = 3.0 + x
return -2.081061466 - x + 0.0833333 / xp3 - logterm + (2.5 + x) * tf.log(xp3)
def log_beta(x, y):
"""
TensorFlow doesn't have special functions, so use a
log/exp/polynomial approximation.
"""
return log_gamma(x) + log_gamma(y) - log_gamma(x+y)
def logit(x, clip_finite=True):
if isinstance(x, tf.Tensor):
if clip_finite:
x = tf.clip_by_value(x, -88, 88, name="clipped_logit_input")
transformed = 1.0 / (1 + tf.exp(-x))
jacobian = transformed * (1-transformed)
if clip_finite:
jacobian = tf.clip_by_value(jacobian, 1e-45, 1e38, name="clipped_jacobian")
log_jacobian = tf.reduce_sum(tf.log(jacobian))
else:
transformed = 1.0 / (1 + np.exp(-x))
jacobian = transformed * (1-transformed)
log_jacobian = np.sum(np.log(jacobian))
return transformed, log_jacobian
def multivariate_log_beta(x):
return tf.reduce_sum(log_gamma(x)) - log_gamma(tf.reduce_sum(x))
def rbf(x):
"""RBF kernel element-wise."""
return tf.exp(-0.5*x*x)
# This is taken from PrettyTensor.
# https://github.com/google/prettytensor/blob/c9b69fade055d0eb35474fd23d07c43c892627bc/prettytensor/pretty_tensor_class.py#L1497
class VarStoreMethod(object):
"""Convenience base class for registered methods that create variables.
This tracks the variables and requries subclasses to provide a __call__
method.
"""
def __init__(self):
self.vars = {}
def variable(self, var_name, shape, init=tf.random_normal_initializer(), dt=tf.float32, train=True):
"""Adds a named variable to this bookkeeper or returns an existing one.
Variables marked train are returned by the training_variables method. If
the requested name already exists and it is compatible (same shape, dt and
train) then it is returned. In case of an incompatible type, an exception is
thrown.
Args:
var_name: The unique name of this variable. If a variable with the same
name exists, then it is returned.
shape: The shape of the variable.
init: The init function to use or a Tensor to copy.
dt: The datatype, defaults to float. This will automatically extract the
base dtype.
train: Whether or not the variable should be trained.
Returns:
A TensorFlow tensor.
Raises:
ValueError: if reuse is False (or unspecified and allow_reuse is False)
and the variable already exists or if the specification of a reused
variable does not match the original.
"""
# Make sure it is a TF dtype and convert it into a base dtype.
dt = tf.as_dtype(dt).base_dtype
if var_name in self.vars:
v = self.vars[var_name]
if v.get_shape() != shape:
raise ValueError(
'Shape mismatch: %s vs %s. Perhaps a UnboundVariable had '
'incompatible values within a graph.' % (v.get_shape(), shape))
return v
elif callable(init):
v = tf.get_variable(var_name,
shape=shape,
dtype=dt,
initializer=init,
trainable=train)
self.vars[var_name] = v
return v
else:
v = tf.convert_to_tensor(init, name=var_name, dtype=dt)
v.get_shape().assert_is_compatible_with(shape)
self.vars[var_name] = v
return v
class VARIABLE(VarStoreMethod):
"""
A simple wrapper to contain variables. It will create a TensorFlow
variable the first time it is called and return the variable; in
subsequent calls, it will simply return the variable and not
create the TensorFlow variable again.
This enables variables to be stored outside of classes which
depend on parameters. It is a useful application for parametric
distributions whose parameters may or may not be random (e.g.,
through a prior), and for inverse mappings such as auto-encoders
where we'd like to store inverse mapping parameters outside of the
distribution class.
"""
def __call__(self, name, shape):
self.name = name
return self.variable(name, shape)
Variable = VARIABLE()
| [
"tensorflow.get_variable",
"tensorflow.reduce_sum",
"numpy.log",
"tensorflow.set_random_seed",
"tensorflow.log",
"tensorflow.random_normal_initializer",
"numpy.exp",
"numpy.random.seed",
"tensorflow.clip_by_value",
"tensorflow.square",
"tensorflow.convert_to_tensor",
"tensorflow.reduce_max",
... | [((125, 142), 'numpy.random.seed', 'np.random.seed', (['x'], {}), '(x)\n', (139, 142), True, 'import numpy as np\n'), ((147, 168), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['x'], {}), '(x)\n', (165, 168), True, 'import tensorflow as tf\n'), ((1395, 1411), 'tensorflow.reduce_max', 'tf.reduce_max', (['x'], {}), '(x)\n', (1408, 1411), True, 'import tensorflow as tf\n'), ((2335, 2350), 'tensorflow.mul', 'tf.mul', (['mask', 'X'], {}), '(mask, X)\n', (2341, 2350), True, 'import tensorflow as tf\n'), ((2362, 2378), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['X'], {}), '(X)\n', (2375, 2378), True, 'import tensorflow as tf\n'), ((3586, 3619), 'tensorflow.log', 'tf.log', (['(x * (1.0 + x) * (2.0 + x))'], {}), '(x * (1.0 + x) * (2.0 + x))\n', (3592, 3619), True, 'import tensorflow as tf\n'), ((4681, 4701), 'tensorflow.exp', 'tf.exp', (['(-0.5 * x * x)'], {}), '(-0.5 * x * x)\n', (4687, 4701), True, 'import tensorflow as tf\n'), ((1703, 1713), 'tensorflow.exp', 'tf.exp', (['(-x)'], {}), '(-x)\n', (1709, 1713), True, 'import tensorflow as tf\n'), ((1767, 1776), 'tensorflow.exp', 'tf.exp', (['x'], {}), '(x)\n', (1773, 1776), True, 'import tensorflow as tf\n'), ((2298, 2325), 'tensorflow.ones', 'tf.ones', (['[n]'], {'dtype': 'X.dtype'}), '([n], dtype=X.dtype)\n', (2305, 2325), True, 'import tensorflow as tf\n'), ((5143, 5173), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {}), '()\n', (5171, 5173), True, 'import tensorflow as tf\n'), ((1535, 1545), 'tensorflow.exp', 'tf.exp', (['(-x)'], {}), '(-x)\n', (1541, 1545), True, 'import tensorflow as tf\n'), ((2048, 2071), 'tensorflow.reshape', 'tf.reshape', (['vec', '[1, d]'], {}), '(vec, [1, d])\n', (2058, 2071), True, 'import tensorflow as tf\n'), ((2187, 2210), 'tensorflow.reshape', 'tf.reshape', (['vec', '[d, 1]'], {}), '(vec, [d, 1])\n', (2197, 2210), True, 'import tensorflow as tf\n'), ((3708, 3719), 'tensorflow.log', 'tf.log', (['xp3'], {}), '(xp3)\n', (3714, 3719), True, 'import tensorflow as tf\n'), ((4013, 4069), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x', '(-88)', '(88)'], {'name': '"""clipped_logit_input"""'}), "(x, -88, 88, name='clipped_logit_input')\n", (4029, 4069), True, 'import tensorflow as tf\n'), ((4211, 4276), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['jacobian', '(1e-45)', '(1e+38)'], {'name': '"""clipped_jacobian"""'}), "(jacobian, 1e-45, 1e+38, name='clipped_jacobian')\n", (4227, 4276), True, 'import tensorflow as tf\n'), ((4313, 4329), 'tensorflow.log', 'tf.log', (['jacobian'], {}), '(jacobian)\n', (4319, 4329), True, 'import tensorflow as tf\n'), ((4466, 4482), 'numpy.log', 'np.log', (['jacobian'], {}), '(jacobian)\n', (4472, 4482), True, 'import numpy as np\n'), ((4604, 4620), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['x'], {}), '(x)\n', (4617, 4620), True, 'import tensorflow as tf\n'), ((6253, 6268), 'tensorflow.as_dtype', 'tf.as_dtype', (['dt'], {}), '(dt)\n', (6264, 6268), True, 'import tensorflow as tf\n'), ((3364, 3380), 'tensorflow.square', 'tf.square', (['scale'], {}), '(scale)\n', (3373, 3380), True, 'import tensorflow as tf\n'), ((4103, 4113), 'tensorflow.exp', 'tf.exp', (['(-x)'], {}), '(-x)\n', (4109, 4113), True, 'import tensorflow as tf\n'), ((4375, 4385), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (4381, 4385), True, 'import numpy as np\n'), ((6597, 6684), 'tensorflow.get_variable', 'tf.get_variable', (['var_name'], {'shape': 'shape', 'dtype': 'dt', 'initializer': 'init', 'trainable': 'train'}), '(var_name, shape=shape, dtype=dt, initializer=init,\n trainable=train)\n', (6612, 6684), True, 'import tensorflow as tf\n'), ((6850, 6901), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['init'], {'name': 'var_name', 'dtype': 'dt'}), '(init, name=var_name, dtype=dt)\n', (6870, 6901), True, 'import tensorflow as tf\n'), ((1465, 1481), 'tensorflow.sub', 'tf.sub', (['x', 'x_max'], {}), '(x, x_max)\n', (1471, 1481), True, 'import tensorflow as tf\n'), ((1599, 1611), 'tensorflow.sqrt', 'tf.sqrt', (['(2.0)'], {}), '(2.0)\n', (1606, 1611), True, 'import tensorflow as tf\n'), ((3347, 3361), 'tensorflow.square', 'tf.square', (['loc'], {}), '(loc)\n', (3356, 3361), True, 'import tensorflow as tf\n'), ((3290, 3311), 'tensorflow.log', 'tf.log', (['(scale + 1e-08)'], {}), '(scale + 1e-08)\n', (3296, 3311), True, 'import tensorflow as tf\n')] |
import otsu
import cv2
import numpy as np
if __name__ == "__main__":
image = cv2.imread('7.jpg', cv2.IMREAD_GRAYSCALE)
arr = np.asarray(image)
arr2 = cv2.resize(arr, (28, 28))
np.savetxt('./7_2.txt', arr2, fmt='%f')
otsu.otsu(arr2)
| [
"otsu.otsu",
"numpy.asarray",
"numpy.savetxt",
"cv2.resize",
"cv2.imread"
] | [((82, 123), 'cv2.imread', 'cv2.imread', (['"""7.jpg"""', 'cv2.IMREAD_GRAYSCALE'], {}), "('7.jpg', cv2.IMREAD_GRAYSCALE)\n", (92, 123), False, 'import cv2\n'), ((134, 151), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (144, 151), True, 'import numpy as np\n'), ((163, 188), 'cv2.resize', 'cv2.resize', (['arr', '(28, 28)'], {}), '(arr, (28, 28))\n', (173, 188), False, 'import cv2\n'), ((193, 232), 'numpy.savetxt', 'np.savetxt', (['"""./7_2.txt"""', 'arr2'], {'fmt': '"""%f"""'}), "('./7_2.txt', arr2, fmt='%f')\n", (203, 232), True, 'import numpy as np\n'), ((237, 252), 'otsu.otsu', 'otsu.otsu', (['arr2'], {}), '(arr2)\n', (246, 252), False, 'import otsu\n')] |
import os
import glob
import argparse
import numpy as np
from scipy.stats import gaussian_kde
from matplotlib import pyplot as plt
from matplotlib import gridspec
from cryoio import star
def calc_rmse(a, b):
"""
[[11, 12, 13],
[21, 22, 23],
...,
[n1, n2, n3]]
"""
return np.sqrt(np.mean((a - b)**2, axis=1))
def plot_rmse(working_directory):
figure_dir = os.path.join(working_directory, 'Figures')
if not os.path.exists(figure_dir):
os.makedirs(figure_dir, exist_ok=True)
x_grid = np.arange(0, 360, 10)
correct = star.get_EAs_from_star(os.path.join(
working_directory, 'exp_projections.star'))
plt.figure(0)
first = star.get_EAs_from_star(os.path.join(
working_directory, 'it000', 'orientations.star'))
correct_rmse = calc_rmse(correct, first)
# plt.hist(correct_rmse)]
correct_kde = gaussian_kde(correct_rmse)
plt.plot(x_grid, correct_kde.evaluate(x_grid))
# plt.ylim([0, 1])
plt.xlabel('RMSE for 3 Euler angles')
plt.ylabel('Count')
plt.title('Compare with correct angle distribution')
plt.savefig(os.path.join(figure_dir, 'it000'), dpi=150)
exp_folder = glob.glob(os.path.join(working_directory, 'it*'))
last = star.get_EAs_from_star(os.path.join(exp_folder[0], 'orientations.star'))
exp_folder.pop(0)
for i, folder in enumerate(exp_folder, start=1):
now = star.get_EAs_from_star(os.path.join(
folder, 'orientations.star'))
correct_rmse = calc_rmse(correct, now)
last_rmse = calc_rmse(last, now)
last = now
fig = plt.figure(num=i, figsize=(16, 6))
gs = gridspec.GridSpec(1, 2, width_ratios=[1, 1])
plt.suptitle('Iteration: {0}'.format(i))
plt.subplot(gs[0])
# plt.hist(correct_rmse)
correct_kde = gaussian_kde(correct_rmse)
plt.plot(x_grid, correct_kde.evaluate(x_grid))
plt.xlabel('RMSE for 3 Euler angles')
plt.ylabel('Count')
plt.title('Compare with correct angle distribution')
plt.subplot(gs[1])
# plt.hist(last_rmse)
last_kde = gaussian_kde(last_rmse)
plt.plot(x_grid, last_kde.evaluate(x_grid))
plt.xlabel('RMSE for 3 Euler angles')
plt.ylabel('Count')
plt.title('Compare with angle distribution of last iteration')
plt.savefig(os.path.join(figure_dir, 'it' + str(i).zfill(3)),
dpi=150, bbox_inches='tight')
plt.close(fig)
def main():
# working_directory = '/home/lqhuang/Git/SOD-cryoem/data/job1'
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--working_directory', type=str)
args = parser.parse_args()
working_directory = args.working_directory
WD = os.path.abspath(working_directory)
plot_rmse(WD)
if __name__ == '__main__':
main()
| [
"numpy.mean",
"os.path.exists",
"scipy.stats.gaussian_kde",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"os.makedirs",
"matplotlib.pyplot.xlabel",
"os.path.join",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"os.path.abspath",
"matplotlib.... | [((397, 439), 'os.path.join', 'os.path.join', (['working_directory', '"""Figures"""'], {}), "(working_directory, 'Figures')\n", (409, 439), False, 'import os\n'), ((540, 561), 'numpy.arange', 'np.arange', (['(0)', '(360)', '(10)'], {}), '(0, 360, 10)\n', (549, 561), True, 'import numpy as np\n'), ((670, 683), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (680, 683), True, 'from matplotlib import pyplot as plt\n'), ((884, 910), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['correct_rmse'], {}), '(correct_rmse)\n', (896, 910), False, 'from scipy.stats import gaussian_kde\n'), ((989, 1026), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""RMSE for 3 Euler angles"""'], {}), "('RMSE for 3 Euler angles')\n", (999, 1026), True, 'from matplotlib import pyplot as plt\n'), ((1031, 1050), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (1041, 1050), True, 'from matplotlib import pyplot as plt\n'), ((1055, 1107), 'matplotlib.pyplot.title', 'plt.title', (['"""Compare with correct angle distribution"""'], {}), "('Compare with correct angle distribution')\n", (1064, 1107), True, 'from matplotlib import pyplot as plt\n'), ((2586, 2611), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2609, 2611), False, 'import argparse\n'), ((2762, 2796), 'os.path.abspath', 'os.path.abspath', (['working_directory'], {}), '(working_directory)\n', (2777, 2796), False, 'import os\n'), ((314, 343), 'numpy.mean', 'np.mean', (['((a - b) ** 2)'], {'axis': '(1)'}), '((a - b) ** 2, axis=1)\n', (321, 343), True, 'import numpy as np\n'), ((451, 477), 'os.path.exists', 'os.path.exists', (['figure_dir'], {}), '(figure_dir)\n', (465, 477), False, 'import os\n'), ((487, 525), 'os.makedirs', 'os.makedirs', (['figure_dir'], {'exist_ok': '(True)'}), '(figure_dir, exist_ok=True)\n', (498, 525), False, 'import os\n'), ((599, 654), 'os.path.join', 'os.path.join', (['working_directory', '"""exp_projections.star"""'], {}), "(working_directory, 'exp_projections.star')\n", (611, 654), False, 'import os\n'), ((719, 780), 'os.path.join', 'os.path.join', (['working_directory', '"""it000"""', '"""orientations.star"""'], {}), "(working_directory, 'it000', 'orientations.star')\n", (731, 780), False, 'import os\n'), ((1124, 1157), 'os.path.join', 'os.path.join', (['figure_dir', '"""it000"""'], {}), "(figure_dir, 'it000')\n", (1136, 1157), False, 'import os\n'), ((1196, 1234), 'os.path.join', 'os.path.join', (['working_directory', '"""it*"""'], {}), "(working_directory, 'it*')\n", (1208, 1234), False, 'import os\n'), ((1270, 1318), 'os.path.join', 'os.path.join', (['exp_folder[0]', '"""orientations.star"""'], {}), "(exp_folder[0], 'orientations.star')\n", (1282, 1318), False, 'import os\n'), ((1611, 1645), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'i', 'figsize': '(16, 6)'}), '(num=i, figsize=(16, 6))\n', (1621, 1645), True, 'from matplotlib import pyplot as plt\n'), ((1659, 1703), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {'width_ratios': '[1, 1]'}), '(1, 2, width_ratios=[1, 1])\n', (1676, 1703), False, 'from matplotlib import gridspec\n'), ((1761, 1779), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0]'], {}), '(gs[0])\n', (1772, 1779), True, 'from matplotlib import pyplot as plt\n'), ((1835, 1861), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['correct_rmse'], {}), '(correct_rmse)\n', (1847, 1861), False, 'from scipy.stats import gaussian_kde\n'), ((1925, 1962), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""RMSE for 3 Euler angles"""'], {}), "('RMSE for 3 Euler angles')\n", (1935, 1962), True, 'from matplotlib import pyplot as plt\n'), ((1971, 1990), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (1981, 1990), True, 'from matplotlib import pyplot as plt\n'), ((1999, 2051), 'matplotlib.pyplot.title', 'plt.title', (['"""Compare with correct angle distribution"""'], {}), "('Compare with correct angle distribution')\n", (2008, 2051), True, 'from matplotlib import pyplot as plt\n'), ((2060, 2078), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1]'], {}), '(gs[1])\n', (2071, 2078), True, 'from matplotlib import pyplot as plt\n'), ((2128, 2151), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['last_rmse'], {}), '(last_rmse)\n', (2140, 2151), False, 'from scipy.stats import gaussian_kde\n'), ((2212, 2249), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""RMSE for 3 Euler angles"""'], {}), "('RMSE for 3 Euler angles')\n", (2222, 2249), True, 'from matplotlib import pyplot as plt\n'), ((2258, 2277), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (2268, 2277), True, 'from matplotlib import pyplot as plt\n'), ((2286, 2348), 'matplotlib.pyplot.title', 'plt.title', (['"""Compare with angle distribution of last iteration"""'], {}), "('Compare with angle distribution of last iteration')\n", (2295, 2348), True, 'from matplotlib import pyplot as plt\n'), ((2477, 2491), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (2486, 2491), True, 'from matplotlib import pyplot as plt\n'), ((1433, 1474), 'os.path.join', 'os.path.join', (['folder', '"""orientations.star"""'], {}), "(folder, 'orientations.star')\n", (1445, 1474), False, 'import os\n')] |
import logging
from functools import partial
import numpy as np
import torch
from torch import nn
from deepqmc import Molecule
from deepqmc.physics import pairwise_diffs, pairwise_distance
from deepqmc.torchext import sloglindet, triu_flat
from deepqmc.utils import NULL_DEBUG
from deepqmc.wf import WaveFunction
from .cusp import CuspCorrection, ElectronicAsymptotic
from .distbasis import DistanceBasis
from .gto import GTOBasis
from .molorb import MolecularOrbital
from .omni import OmniSchNet
__version__ = '0.1.0'
__all__ = ['PauliNet']
log = logging.getLogger(__name__)
def eval_slater(xs):
if xs.shape[-1] == 0:
return xs.new_ones(xs.shape[:-2])
return torch.det(xs.contiguous())
def eval_log_slater(xs):
if xs.shape[-1] == 0:
return xs.new_ones(xs.shape[:-2]), xs.new_zeros(xs.shape[:-2])
return xs.contiguous().slogdet()
def eval_vandermonde(xs):
print('non-log vandermonde is evaluated')
product_of_wavefunctions = xs.prod(dim=-1)
total = 1.0
if product_of_wavefunctions.shape[-1] == 1:
# if only 1 basis func, just return it
squeezed = product_of_wavefunctions.squeeze(dim=-1)
return squeezed
for i in range(product_of_wavefunctions.shape[-1]):
for j in range(i+1, product_of_wavefunctions.shape[-1]):
total = total * (product_of_wavefunctions[..., j] - product_of_wavefunctions[..., i])
return total
def eval_log_vandermonde(xs):
product_of_wavefunctions = xs.prod(dim=-1)
# + torch.sum(torch.log(torch.abs(a[...,
# torch.triu(torch.ones(self.dim, self.dim), diagonal=1).nonzero(as_tuple=True)[0],
# torch.triu(torch.ones(self.dim, self.dim), diagonal=1).nonzero(as_tuple=True)[
# 1]])), -1) )
total = 0.0
sign = 1.0
if product_of_wavefunctions.shape[-1] == 1:
# if only 1 basis func, just return it
squeezed = product_of_wavefunctions.squeeze(dim=-1)
return torch.sign(squeezed), torch.log(torch.abs(squeezed))
# prod_all_sign = torch.sign(product_all_wavefunctions)
for i in range(product_of_wavefunctions.shape[-1]):
for j in range(i+1, product_of_wavefunctions.shape[-1]):
diff = product_of_wavefunctions[..., j] - product_of_wavefunctions[..., i]
total = total + torch.log(torch.abs(diff))
sign = sign * torch.sign(diff)
return sign, total
class PauliNet(WaveFunction):
r"""Implements the PauliNet ansatz from [Hermann19]_.
Derived from :class:`WaveFunction`. This constructor provides a fully
flexible low-level interface. See the alternative constructors for the
high-level interfaces.
The PauliNet ansatz combines a multireference Slater determinant expansion,
Gaussian-type cusp-corrected molecular orbitals (:class:`MolecularOrbital`),
electronic cusp Jastrow factor (:class:`ElectronicAsymptotic`) and many-body
Jastrow factor and backflow transformation represented by neural networks
that use featurized particle distances,
:math:`\mathbf e(|\mathbf r-\mathbf r'|)` (:class:`DistanceBasis`), as input,
.. math::
\psi_{\boldsymbol\theta}(\mathbf r)
=\mathrm e^{\gamma(\mathbf r)+J_{\boldsymbol\theta}(\mathbf r)}
\sum_{pq} c_p
\det[\tilde\varphi_{\boldsymbol\theta,q{\mu_p}i}^\uparrow(\mathbf r)]
\det[\tilde\varphi_{\boldsymbol\theta,q{\mu_p}i}^\downarrow(\mathbf r)] \\
\tilde\varphi_{\boldsymbol\theta,q\mu i}(\mathbf r)
:=\big(1+2\tanh(\kappa_{\boldsymbol\theta,q\mu i}(\mathbf r))\big)
\varphi_\mu(\mathbf r_i)
Here, :math:`c_p,\mu_p` define the multideterminant expansion,
:math:`\varphi_\mu(\mathbf r)` are the baseline
single-electron molecular orbitals, :math:`J_{\boldsymbol\theta}(\mathbf r)`
is the permutation-invariant deep Jastrow factor,
:math:`\kappa_{\boldsymbol\theta,q\mu i}(\mathbf r)` is the :math:`q`-th
channel of the permutation-equivariant deep backflow, and :math:`\gamma`
enforces correct electronic cusp conditions.
Args:
mol (:class:`~deepqmc.Molecule`): molecule whose wave function is represented
basis (:class:`~deepqmc.wf.paulinet.GTOBasis`): basis for the molecular orbitals
jastrow_factory (callable): constructor for a Jastrow factor,
:math:`(M,\dim(\mathbf e),N^\uparrow,N^\downarrow)`
:math:`\rightarrow(\mathbf e_{ij},\mathbf e_{iI})\rightarrow J`
backflow_factory (callable): constructor for a backflow,
:math:`(M,\dim(\mathbf e),N^\uparrow,N^\downarrow,N_\text{orb},C)`
:math:`\rightarrow(\mathbf e_{ij},\mathbf e_{iI})`
:math:`\rightarrow\kappa_{q\mu i}`
omni_factory (callable): constructor for a combined Jastrow factor and backflow,
with interface identical to :class:`~deepqmc.wf.paulinet.OmniSchNet`
n_configurations (int): number of electron configurations
n_orbitals (int): number of distinct molecular orbitals used across all
configurations if given, otherwise the larger of the number of spin-up
and spin-down electrons
mo_factory (callable): passed to :class:`~deepqmc.wf.paulinet.MolecularOrbital`
as ``net_factory``
cusp_correction (bool): whether nuclear cusp correction is used
cusp_electrons (bool): whether electronic cusp function is used
dist_feat_dim (int): :math:`\dim(\mathbf e)`, number of distance features
dist_feat_cutoff (float, a.u.): distance at which distance features
go to zero
backflow_channels (int): :math:`C`, number of backflow channels
Attributes:
jastrow: :class:`torch.nn.Module` representing the Jastrow factor
backflow: :class:`torch.nn.Module` representing the backflow transformation
conf_coeff: :class:`torch.nn.Linear` with no bias that represents
the multireference coefficients :math:`c_p` via its :attr:`weight`
variable of shape :math:`(1,N_\text{det})`
"""
def __init__(
self,
mol,
basis,
jastrow_factory=None,
backflow_factory=None,
omni_factory=None,
n_configurations=1,
n_orbitals=None,
mo_factory=None,
return_log=True,
use_sloglindet='training',
use_vandermonde=False,
*,
cusp_correction=False,
cusp_electrons=False,
dist_feat_dim=32,
dist_feat_cutoff=10.0,
backflow_type='orbital',
backflow_channels=1,
backflow_transform='mult',
rc_scaling=1.0,
cusp_alpha=10.0,
freeze_embed=False,
):
assert use_sloglindet in {'never', 'training', 'always'}
assert return_log or use_sloglindet == 'never'
super().__init__(mol)
n_up, n_down = self.n_up, self.n_down
self.dist_basis = (
DistanceBasis(dist_feat_dim, cutoff=dist_feat_cutoff, envelope='nocusp')
if mo_factory or jastrow_factory or backflow_factory or omni_factory
else None
)
n_orbitals = n_orbitals or max(n_up, n_down)
confs = [list(range(n_up)) + list(range(n_down))] + [
sum((torch.randperm(n_orbitals)[:n].tolist() for n in (n_up, n_down)), [])
for _ in range(n_configurations - 1)
]
self.register_buffer('confs', torch.tensor(confs))
self.conf_coeff = (
nn.Linear(n_configurations, 1, bias=False)
if n_configurations > 1
else nn.Identity()
)
self.mo = MolecularOrbital(
mol,
basis,
n_orbitals,
net_factory=mo_factory,
dist_feat_dim=dist_feat_dim,
cusp_correction=cusp_correction,
rc_scaling=rc_scaling,
)
self.cusp_same, self.cusp_anti = (
(ElectronicAsymptotic(cusp=cusp, alpha=cusp_alpha) for cusp in (0.25, 0.5))
if cusp_electrons
else (None, None)
)
self.jastrow = (
jastrow_factory(len(mol), dist_feat_dim, n_up, n_down)
if jastrow_factory
else None
)
backflow_spec = {
'orbital': [n_orbitals, backflow_channels],
'det': [max(n_up, n_down), len(self.confs) * backflow_channels],
}[backflow_type]
if backflow_transform == 'both':
backflow_spec[1] *= 2
self.backflow_type = backflow_type
self.backflow_transform = backflow_transform
self.backflow = (
backflow_factory(len(mol), dist_feat_dim, n_up, n_down, *backflow_spec)
if backflow_factory
else None
)
self.r_backflow = None
if omni_factory:
assert not backflow_factory and not jastrow_factory
self.omni = omni_factory(mol, dist_feat_dim, n_up, n_down, *backflow_spec)
self.backflow = self.omni.forward_backflow
self.r_backflow = self.omni.forward_r_backflow
self.jastrow = self.omni.forward_jastrow
else:
self.omni = None
self.return_log = return_log
if freeze_embed:
self.requires_grad_embeddings_(False)
self.n_determinants = len(self.confs) * backflow_channels
self.n_backflows = 0 if not self.backflow else backflow_spec[1]
self.use_vandermonde = use_vandermonde
if n_up <= 1 or n_down <= 1:
self.use_sloglindet = 'never'
log.warning(
'Setting use_sloglindet to "never" as not implemented for n=0 and n=1.'
)
if self.use_vandermonde:
self.use_sloglindet = 'never'
log.warning(
'Setting use_sloglindet to "never" as incompatible with Vandermonde determinant.'
)
# TODO implement sloglindet for special cases n=0 and n=1
else:
self.use_sloglindet = use_sloglindet
def requires_grad_classes_(self, classes, requires_grad):
for m in self.modules():
if isinstance(m, classes):
for p in m.parameters(recurse=False):
p.requires_grad_(requires_grad)
return self
def requires_grad_cusps_(self, requires_grad):
return self.requires_grad_classes_(CuspCorrection, requires_grad)
def requires_grad_embeddings_(self, requires_grad):
return self.requires_grad_classes_(nn.Embedding, requires_grad)
def requires_grad_nets_(self, requires_grad):
return self.requires_grad_classes_(nn.Linear, requires_grad)
@classmethod
def from_pyscf(
cls,
mf,
*,
init_weights=True,
freeze_mos=True,
freeze_confs=False,
conf_cutoff=1e-2,
conf_limit=None,
**kwargs,
):
r"""Construct a :class:`PauliNet` instance from a finished PySCF_ calculation.
Args:
mf (:class:`pyscf.scf.hf.RHF` | :class:`pyscf.mcscf.mc1step.CASSCF`):
restricted (multireference) HF calculation
init_weights (bool): whether molecular orbital coefficients and
configuration coefficients are initialized from the HF calculation
freeze_mos (bool): whether the MO coefficients are frozen for
gradient optimization
freeze_confs (bool): whether the configuration coefficients are
frozen for gradient optimization
conf_cutoff (float): determinants with a linear coefficient above
this threshold are included in the determinant expansion
conf_limit (int): if given, at maximum the given number of configurations
with the largest linear coefficients are used in the ansatz
kwargs: all other arguments are passed to the :class:`PauliNet`
constructor
.. _PySCF: http://pyscf.org
"""
assert not (set(kwargs) & {'n_configurations', 'n_orbitals'})
n_up, n_down = mf.mol.nelec
if hasattr(mf, 'fcisolver'):
if conf_limit:
conf_cutoff = max(
np.sort(abs(mf.ci.flatten()))[-conf_limit:][0] - 1e-10, conf_cutoff,
)
for tol in [conf_cutoff, conf_cutoff + 2e-10]:
conf_coeff, *confs = zip(
*mf.fcisolver.large_ci(
mf.ci, mf.ncas, mf.nelecas, tol=tol, return_strs=False
)
)
if not conf_limit or len(conf_coeff) <= conf_limit:
break
else:
raise AssertionError()
# discard the last ci wave function if degenerate
ns_dbl = n_up - mf.nelecas[0], n_down - mf.nelecas[1]
conf_coeff = torch.tensor(conf_coeff)
confs = [
[
torch.arange(n_dbl, dtype=torch.long).expand(len(conf_coeff), -1),
torch.tensor(cfs, dtype=torch.long) + n_dbl,
]
for n_dbl, cfs in zip(ns_dbl, confs)
]
confs = [torch.cat(cfs, dim=-1) for cfs in confs]
confs = torch.cat(confs, dim=-1)
kwargs['n_configurations'] = len(confs)
kwargs['n_orbitals'] = confs.max().item() + 1
else:
confs = None
mol = Molecule(
mf.mol.atom_coords().astype('float32'),
mf.mol.atom_charges(),
mf.mol.charge,
mf.mol.spin,
)
basis = GTOBasis.from_pyscf(mf.mol)
wf = cls(mol, basis, **kwargs)
if init_weights:
wf.mo.init_from_pyscf(mf, freeze_mos=freeze_mos)
if confs is not None:
wf.confs.detach().copy_(confs)
if len(confs) > 1:
wf.conf_coeff.weight.detach().copy_(conf_coeff)
if freeze_confs:
wf.conf_coeff.weight.requires_grad_(False)
return wf
@classmethod
def from_hf(
cls, mol, *, basis='6-311g', cas=None, pauli_kwargs=None, omni_kwargs=None
):
r"""Construct a :class:`PauliNet` instance by running a HF calculation.
This is the top-level interface.
Args:
mol (:class:`~deepqmc.Molecule`): molecule whose wave function
is represented
basis (str): basis of the internal HF calculation
cas ((int, int)): tuple of the number of active orbitals and number of
active electrons for a complete active space multireference
HF calculation
pauli_kwargs: arguments passed to :func:`PauliNet.from_pyscf`
omni_kwargs: arguments passed to :class:`~deepqmc.wf.paulinet.OmniSchNet`
"""
from pyscf import gto, lib, mcscf, scf
mol = gto.M(
atom=mol.as_pyscf(),
unit='bohr',
basis=basis,
charge=mol.charge,
spin=mol.spin,
cart=True,
)
log.info('Running HF...')
mf = scf.RHF(mol)
mf.kernel()
if cas:
log.info('Running MCSCF...')
mc = mcscf.CASSCF(mf, *cas)
mc.kernel()
lib.chkfile.dump(mc.chkfile, 'ci', mc.ci)
lib.chkfile.dump(mc.chkfile, 'nelecas', mc.nelecas)
wf = PauliNet.from_pyscf(
mc if cas else mf,
**{
'omni_factory': partial(OmniSchNet, **(omni_kwargs or {})),
'cusp_correction': True,
'cusp_electrons': True,
**(pauli_kwargs or {}),
},
)
wf.mf = mf
return wf
def _backflow_op(self, xs, fs):
if self.backflow_transform == 'mult':
fs_mult, fs_add = fs, None
elif self.backflow_transform == 'add':
fs_mult, fs_add = None, fs
elif self.backflow_transform == 'both':
fs_mult, fs_add = fs[:, : fs.shape[1] // 2], fs[:, fs.shape[1] // 2 :]
if fs_add is not None:
envel = (xs ** 2).mean(dim=-1, keepdim=True).sqrt()
if fs_mult is not None:
xs = xs * (1 + 2 * torch.tanh(fs_mult / 4))
if fs_add is not None:
xs = xs + 0.1 * envel * torch.tanh(fs_add / 4)
return xs
def forward(self, rs, debug=NULL_DEBUG): # noqa: C901
batch_dim, n_elec = rs.shape[:2]
assert n_elec == self.confs.shape[1]
n_atoms = len(self.mol)
coords = self.mol.coords
diffs_nuc = pairwise_diffs(torch.cat([coords, rs.flatten(end_dim=1)]), coords)
edges_nuc = (
self.dist_basis(diffs_nuc[:, :, 3].sqrt())
if self.jastrow or self.mo.net
else None
)
if self.r_backflow or self.backflow or self.cusp_same or self.jastrow:
dists_elec = pairwise_distance(rs, rs)
if self.r_backflow or self.backflow or self.jastrow:
edges_nuc = edges_nuc[n_atoms:].view(batch_dim, n_elec, n_atoms, -1)
edges = self.dist_basis(dists_elec), edges_nuc
if self.r_backflow:
rs_flowed = self.r_backflow(rs, *edges, debug=debug)
diffs_nuc = pairwise_diffs(
torch.cat([coords, rs_flowed.flatten(end_dim=1)]), coords
)
with debug.cd('mos'):
xs = self.mo(diffs_nuc, edges_nuc, debug=debug)
# get orbitals as [bs, 1, i, mu]
xs = debug['slaters'] = xs.view(batch_dim, 1, n_elec, -1)
# this should be a product of one orbital evaluated on each electron.
single_orbital = xs[..., 0].prod(dim=-1).squeeze(dim=-1)
if self.backflow:
with debug.cd('backflow'):
fs = self.backflow(*edges, debug=debug) # [bs, q, i, mu/nu]
if self.backflow_type == 'orbital':
xs = self._backflow_op(xs, fs)
# form dets as [bs, q, p, i, nu]
conf_up, conf_down = self.confs[:, : self.n_up], self.confs[:, self.n_up :]
det_up = xs[:, :, : self.n_up, conf_up].transpose(-3, -2)
det_down = xs[:, :, self.n_up :, conf_down].transpose(-3, -2)
if self.backflow and self.backflow_type == 'det':
n_conf = len(self.confs)
fs = fs.unflatten(1, ((None, fs.shape[1] // n_conf), (None, n_conf)))
det_up = self._backflow_op(det_up, fs[..., : self.n_up, : self.n_up])
det_down = self._backflow_op(det_down, fs[..., self.n_up :, : self.n_down])
# with open-shell systems, part of the backflow output is not used
if self.use_sloglindet == 'always' or (
self.use_sloglindet == 'training' and not self.sampling
):
bf_dim = det_up.shape[-4]
if isinstance(self.conf_coeff, nn.Linear):
conf_coeff = self.conf_coeff.weight[0]
conf_coeff = conf_coeff.expand(bf_dim, -1).flatten() / np.sqrt(bf_dim)
else:
conf_coeff = det_up.new_ones(1)
det_up = det_up.flatten(start_dim=-4, end_dim=-3).contiguous()
det_down = det_down.flatten(start_dim=-4, end_dim=-3).contiguous()
sign, psi = sloglindet(conf_coeff, det_up, det_down)
sign = sign.detach()
else:
if self.return_log:
if self.use_vandermonde:
sign_up, det_up = eval_log_vandermonde(det_up)
sign_down, det_down = eval_log_vandermonde(det_down)
xs = det_up + det_down
else:
sign_up, det_up = eval_log_slater(det_up)
sign_down, det_down = eval_log_slater(det_down)
xs = det_up + det_down
# premultiply by diagonal to ensure cusps? make sure antisymmetry is maintained!
xs_shift = xs.flatten(start_dim=1).max(dim=-1).values
# the exp-normalize trick, to avoid over/underflow of the exponential
xs = sign_up * sign_down * torch.exp(xs - xs_shift[:, None, None])
else:
if self.use_vandermonde:
det_up = debug['det_up'] = eval_vandermonde(det_up)
det_down = debug['det_down'] = eval_vandermonde(det_down)
xs = det_up * det_down
else:
det_up = debug['det_up'] = eval_slater(det_up)
det_down = debug['det_down'] = eval_slater(det_down)
xs = det_up * det_down
psi = self.conf_coeff(xs).squeeze(dim=-1).mean(dim=-1)
if self.return_log:
psi, sign = psi.abs().log() + xs_shift, psi.sign().detach()
if self.cusp_same:
cusp_same = self.cusp_same(
torch.cat(
[triu_flat(dists_elec[:, idxs, idxs]) for idxs in self.spin_slices],
dim=1,
)
)
cusp_anti = self.cusp_anti(
dists_elec[:, : self.n_up, self.n_up :].flatten(start_dim=1)
)
if self.use_vandermonde:
# here we multiply the final result by the single orbital
psi = (
psi + cusp_same + cusp_anti + torch.log(torch.abs(single_orbital))
if self.return_log
else psi * torch.exp(cusp_same + cusp_anti) * single_orbital
)
if not self.return_log:
print('it gets called without log')
else:
sign = sign * torch.sign(single_orbital)
else:
psi = (
psi + cusp_same + cusp_anti
if self.return_log
else psi * torch.exp(cusp_same + cusp_anti)
)
if self.jastrow:
with debug.cd('jastrow'):
J = self.jastrow(*edges, debug=debug)
psi = psi + J if self.return_log else psi * torch.exp(J)
if self.omni:
self.omni.forward_close()
return (psi, sign) if self.return_log else psi
| [
"logging.getLogger",
"numpy.sqrt",
"torch.randperm",
"torch.exp",
"pyscf.mcscf.CASSCF",
"deepqmc.torchext.triu_flat",
"torch.arange",
"torch.tanh",
"torch.nn.Identity",
"deepqmc.physics.pairwise_distance",
"torch.abs",
"pyscf.lib.chkfile.dump",
"torch.sign",
"deepqmc.torchext.sloglindet",
... | [((553, 580), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (570, 580), False, 'import logging\n'), ((15215, 15227), 'pyscf.scf.RHF', 'scf.RHF', (['mol'], {}), '(mol)\n', (15222, 15227), False, 'from pyscf import gto, lib, mcscf, scf\n'), ((2044, 2064), 'torch.sign', 'torch.sign', (['squeezed'], {}), '(squeezed)\n', (2054, 2064), False, 'import torch\n'), ((7494, 7513), 'torch.tensor', 'torch.tensor', (['confs'], {}), '(confs)\n', (7506, 7513), False, 'import torch\n'), ((7555, 7597), 'torch.nn.Linear', 'nn.Linear', (['n_configurations', '(1)'], {'bias': '(False)'}), '(n_configurations, 1, bias=False)\n', (7564, 7597), False, 'from torch import nn\n'), ((7651, 7664), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (7662, 7664), False, 'from torch import nn\n'), ((12935, 12959), 'torch.tensor', 'torch.tensor', (['conf_coeff'], {}), '(conf_coeff)\n', (12947, 12959), False, 'import torch\n'), ((13319, 13343), 'torch.cat', 'torch.cat', (['confs'], {'dim': '(-1)'}), '(confs, dim=-1)\n', (13328, 13343), False, 'import torch\n'), ((15322, 15344), 'pyscf.mcscf.CASSCF', 'mcscf.CASSCF', (['mf', '*cas'], {}), '(mf, *cas)\n', (15334, 15344), False, 'from pyscf import gto, lib, mcscf, scf\n'), ((15381, 15422), 'pyscf.lib.chkfile.dump', 'lib.chkfile.dump', (['mc.chkfile', '"""ci"""', 'mc.ci'], {}), "(mc.chkfile, 'ci', mc.ci)\n", (15397, 15422), False, 'from pyscf import gto, lib, mcscf, scf\n'), ((15435, 15486), 'pyscf.lib.chkfile.dump', 'lib.chkfile.dump', (['mc.chkfile', '"""nelecas"""', 'mc.nelecas'], {}), "(mc.chkfile, 'nelecas', mc.nelecas)\n", (15451, 15486), False, 'from pyscf import gto, lib, mcscf, scf\n'), ((17011, 17036), 'deepqmc.physics.pairwise_distance', 'pairwise_distance', (['rs', 'rs'], {}), '(rs, rs)\n', (17028, 17036), False, 'from deepqmc.physics import pairwise_diffs, pairwise_distance\n'), ((19329, 19369), 'deepqmc.torchext.sloglindet', 'sloglindet', (['conf_coeff', 'det_up', 'det_down'], {}), '(conf_coeff, det_up, det_down)\n', (19339, 19369), False, 'from deepqmc.torchext import sloglindet, triu_flat\n'), ((2076, 2095), 'torch.abs', 'torch.abs', (['squeezed'], {}), '(squeezed)\n', (2085, 2095), False, 'import torch\n'), ((2446, 2462), 'torch.sign', 'torch.sign', (['diff'], {}), '(diff)\n', (2456, 2462), False, 'import torch\n'), ((13258, 13280), 'torch.cat', 'torch.cat', (['cfs'], {'dim': '(-1)'}), '(cfs, dim=-1)\n', (13267, 13280), False, 'import torch\n'), ((2403, 2418), 'torch.abs', 'torch.abs', (['diff'], {}), '(diff)\n', (2412, 2418), False, 'import torch\n'), ((15600, 15640), 'functools.partial', 'partial', (['OmniSchNet'], {}), '(OmniSchNet, **omni_kwargs or {})\n', (15607, 15640), False, 'from functools import partial\n'), ((16416, 16438), 'torch.tanh', 'torch.tanh', (['(fs_add / 4)'], {}), '(fs_add / 4)\n', (16426, 16438), False, 'import torch\n'), ((19069, 19084), 'numpy.sqrt', 'np.sqrt', (['bf_dim'], {}), '(bf_dim)\n', (19076, 19084), True, 'import numpy as np\n'), ((20166, 20205), 'torch.exp', 'torch.exp', (['(xs - xs_shift[:, None, None])'], {}), '(xs - xs_shift[:, None, None])\n', (20175, 20205), False, 'import torch\n'), ((13107, 13142), 'torch.tensor', 'torch.tensor', (['cfs'], {'dtype': 'torch.long'}), '(cfs, dtype=torch.long)\n', (13119, 13142), False, 'import torch\n'), ((16324, 16347), 'torch.tanh', 'torch.tanh', (['(fs_mult / 4)'], {}), '(fs_mult / 4)\n', (16334, 16347), False, 'import torch\n'), ((20953, 20989), 'deepqmc.torchext.triu_flat', 'triu_flat', (['dists_elec[:, idxs, idxs]'], {}), '(dists_elec[:, idxs, idxs])\n', (20962, 20989), False, 'from deepqmc.torchext import sloglindet, triu_flat\n'), ((21723, 21749), 'torch.sign', 'torch.sign', (['single_orbital'], {}), '(single_orbital)\n', (21733, 21749), False, 'import torch\n'), ((21910, 21942), 'torch.exp', 'torch.exp', (['(cusp_same + cusp_anti)'], {}), '(cusp_same + cusp_anti)\n', (21919, 21942), False, 'import torch\n'), ((22138, 22150), 'torch.exp', 'torch.exp', (['J'], {}), '(J)\n', (22147, 22150), False, 'import torch\n'), ((13020, 13057), 'torch.arange', 'torch.arange', (['n_dbl'], {'dtype': 'torch.long'}), '(n_dbl, dtype=torch.long)\n', (13032, 13057), False, 'import torch\n'), ((21406, 21431), 'torch.abs', 'torch.abs', (['single_orbital'], {}), '(single_orbital)\n', (21415, 21431), False, 'import torch\n'), ((21503, 21535), 'torch.exp', 'torch.exp', (['(cusp_same + cusp_anti)'], {}), '(cusp_same + cusp_anti)\n', (21512, 21535), False, 'import torch\n'), ((7327, 7353), 'torch.randperm', 'torch.randperm', (['n_orbitals'], {}), '(n_orbitals)\n', (7341, 7353), False, 'import torch\n')] |
import numpy
from gensim.summarization.bm25 import BM25
class WrappedBM25(BM25):
def __init__(self, docs, tokenizer='spacy'):
self.docs = docs
if tokenizer == 'spacy':
try:
import spacy
except ImportError:
raise ImportError('Please install spacy and spacy "en" model: '
'`pip install -U spacy && '
'python -m spacy download en` '
'or find alternative installation options '
'at spacy.io')
self._spacy = spacy.load('en')
self.tokenizer = self.spacy_tokenize
else:
self.tokenizer = self.split_tokenize
corpus = []
for doc in self.docs:
corpus.append(self.tokenizer(doc))
super().__init__(corpus)
self.average_idf = sum(map(lambda k: float(self.idf[k]), self.idf.keys())) / len(self.idf.keys())
def find_topk_doc(self, doc, topk=10, rm_first=True):
scores = self.get_scores(self.tokenizer(doc))
arg_idx = numpy.argsort(scores)
arg_idx = arg_idx[::-1]
result = []
for i in range(topk):
result.append(self.docs[arg_idx[i]])
if rm_first:
del result[0] # remove self
return result
def find_tailk_doc(self, doc, tailk=10):
scores = self.get_scores(self.tokenizer(doc))
arg_idx = numpy.argsort(scores)
result = []
for i in range(tailk):
result.append(self.docs[arg_idx[i]])
return result
def spacy_tokenize(self, text):
tokens = self._spacy.tokenizer(text)
return [t.text for t in tokens]
def split_tokenize(self, text):
return text.strip().split(' ')
| [
"numpy.argsort",
"spacy.load"
] | [((1126, 1147), 'numpy.argsort', 'numpy.argsort', (['scores'], {}), '(scores)\n', (1139, 1147), False, 'import numpy\n'), ((1481, 1502), 'numpy.argsort', 'numpy.argsort', (['scores'], {}), '(scores)\n', (1494, 1502), False, 'import numpy\n'), ((629, 645), 'spacy.load', 'spacy.load', (['"""en"""'], {}), "('en')\n", (639, 645), False, 'import spacy\n')] |
'''
Extracts embeddings from ESM models.
'''
import argparse
from collections import defaultdict
import os
import pathlib
import numpy as np
import pandas as pd
import torch
from esm import Alphabet, FastaBatchedDataset, ProteinBertModel, pretrained, BatchConverter
from utils import read_fasta, save
criterion = torch.nn.CrossEntropyLoss(reduction='none')
def create_parser():
parser = argparse.ArgumentParser(
description="Extract per-token representations and model outputs for sequences in a FASTA file" # noqa
)
parser.add_argument(
"fasta_file",
type=pathlib.Path,
help="FASTA file on which to extract representations",
)
parser.add_argument(
"wt_fasta_file",
type=pathlib.Path,
help="FASTA file for WT",
)
parser.add_argument(
"output_dir",
type=pathlib.Path,
help="output dir",
)
parser.add_argument(
"--model_location",
type=str,
help="model location",
default="/mnt/esm_weights/esm1b/esm1b_t33_650M_UR50S.pt"
)
parser.add_argument(
"--save_hidden", type=bool, default=False, help="whether to save rep"
)
parser.add_argument(
"--toks_per_batch", type=int, default=4096, help="maximum batch size"
)
parser.add_argument("--nogpu", action="store_true", help="Do not use GPU even if available")
return parser
def main(args):
model, alphabet = pretrained.load_model_and_alphabet(args.model_location)
batch_converter = alphabet.get_batch_converter()
padding_idx = torch.tensor(alphabet.padding_idx)
model.eval()
if torch.cuda.is_available() and not args.nogpu:
model = model.cuda()
print("Transferred model to GPU")
dataset = FastaBatchedDataset.from_file(args.fasta_file)
batches = dataset.get_batch_indices(args.toks_per_batch, extra_toks_per_seq=1)
data_loader = torch.utils.data.DataLoader(
dataset, collate_fn=batch_converter, batch_sampler=batches
)
print(f"Read {args.fasta_file} with {len(dataset)} sequences")
repr_layers = [model.num_layers] # extract last layer
label_vals = []
avg_rep_vals = []
with torch.no_grad():
for batch_idx, (labels, strs, toks) in enumerate(data_loader):
print(
f"Processing {batch_idx + 1} of {len(batches)} batches ({toks.size(0)} sequences)"
)
if torch.cuda.is_available() and not args.nogpu:
toks = toks.to(device="cuda", non_blocking=True)
out = model(toks, repr_layers=repr_layers,
return_contacts=False)
# [B, T, E]
final_layer = out["representations"][model.num_layers]
notpad = torch.unsqueeze(toks != padding_idx, 2)
avg_rep = (final_layer * notpad).mean(dim=1).to(
device="cpu").numpy()
avg_rep_vals.append(avg_rep)
label_vals.append(labels)
args.output_dir.mkdir(parents=True, exist_ok=True)
avg_rep_vals = np.concatenate(avg_rep_vals, axis=0)
label_vals = np.concatenate(label_vals)
np.savetxt(os.path.join(args.output_dir, 'labels.npy'),
label_vals, fmt="%s")
save(os.path.join(args.output_dir, 'rep.npy'), avg_rep_vals)
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
main(args)
| [
"torch.nn.CrossEntropyLoss",
"argparse.ArgumentParser",
"esm.FastaBatchedDataset.from_file",
"torch.unsqueeze",
"os.path.join",
"esm.pretrained.load_model_and_alphabet",
"torch.tensor",
"torch.cuda.is_available",
"numpy.concatenate",
"torch.utils.data.DataLoader",
"torch.no_grad"
] | [((317, 360), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (342, 360), False, 'import torch\n'), ((397, 527), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Extract per-token representations and model outputs for sequences in a FASTA file"""'}), "(description=\n 'Extract per-token representations and model outputs for sequences in a FASTA file'\n )\n", (420, 527), False, 'import argparse\n'), ((1453, 1508), 'esm.pretrained.load_model_and_alphabet', 'pretrained.load_model_and_alphabet', (['args.model_location'], {}), '(args.model_location)\n', (1487, 1508), False, 'from esm import Alphabet, FastaBatchedDataset, ProteinBertModel, pretrained, BatchConverter\n'), ((1580, 1614), 'torch.tensor', 'torch.tensor', (['alphabet.padding_idx'], {}), '(alphabet.padding_idx)\n', (1592, 1614), False, 'import torch\n'), ((1772, 1818), 'esm.FastaBatchedDataset.from_file', 'FastaBatchedDataset.from_file', (['args.fasta_file'], {}), '(args.fasta_file)\n', (1801, 1818), False, 'from esm import Alphabet, FastaBatchedDataset, ProteinBertModel, pretrained, BatchConverter\n'), ((1920, 2011), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'collate_fn': 'batch_converter', 'batch_sampler': 'batches'}), '(dataset, collate_fn=batch_converter,\n batch_sampler=batches)\n', (1947, 2011), False, 'import torch\n'), ((3057, 3093), 'numpy.concatenate', 'np.concatenate', (['avg_rep_vals'], {'axis': '(0)'}), '(avg_rep_vals, axis=0)\n', (3071, 3093), True, 'import numpy as np\n'), ((3111, 3137), 'numpy.concatenate', 'np.concatenate', (['label_vals'], {}), '(label_vals)\n', (3125, 3137), True, 'import numpy as np\n'), ((1640, 1665), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1663, 1665), False, 'import torch\n'), ((2203, 2218), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2216, 2218), False, 'import torch\n'), ((3153, 3196), 'os.path.join', 'os.path.join', (['args.output_dir', '"""labels.npy"""'], {}), "(args.output_dir, 'labels.npy')\n", (3165, 3196), False, 'import os\n'), ((3241, 3281), 'os.path.join', 'os.path.join', (['args.output_dir', '"""rep.npy"""'], {}), "(args.output_dir, 'rep.npy')\n", (3253, 3281), False, 'import os\n'), ((2760, 2799), 'torch.unsqueeze', 'torch.unsqueeze', (['(toks != padding_idx)', '(2)'], {}), '(toks != padding_idx, 2)\n', (2775, 2799), False, 'import torch\n'), ((2438, 2463), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2461, 2463), False, 'import torch\n')] |
import numpy as np
def Gini_index(Y_data):
gini = 0
#========= Edit here ==========
gini = 1 - np.sum([(count / len(Y_data)) ** 2 for _, count in zip(*np.unique(Y_data, return_counts=True))])
#====================================
return gini
def Entropy(Y_data):
entropy = 0
# ===== Edit here ========
arr = np.array([(count / len(Y_data)) for _, count in zip(*np.unique(Y_data, return_counts=True))])
entropy = -(arr * np.log2(arr)).sum()
# ==============================
return entropy
def impurity_func(Y_data, criterion):
if criterion == 'gini':
return Gini_index(Y_data)
elif criterion == 'entropy':
return Entropy(Y_data)
def Finding_split_point(df, feature, criterion):
col_data = df[feature]
Y = df.values[:, -1]
distinct_data = np.unique(col_data)
split_point = distinct_data[0]
min_purity = 1
for idx, val in enumerate(distinct_data):
less_idx = (col_data < val)
y0 = Y[less_idx]
y1 = Y[~less_idx]
p0 = len(y0) / len(Y)
p1 = len(y1) / len(Y)
purity = np.sum([p0 * impurity_func(y0, criterion), p1 * impurity_func(y1, criterion)])
if min_purity > purity:
min_purity = purity
split_point = val
return split_point
def Gaussian_prob(x, mean, std):
'''
:param x: input value: X
:param mean: the mean of X
:param std: the standard deviation of X
:return: probaility (X) ~ N(μ, σ^2)
'''
ret = 0
# ======== Edit here =========
z = (x - mean) / std
ret = np.exp(z ** 2 / -2) / (std * np.sqrt(2 * np.pi))
# =========================================
return ret | [
"numpy.exp",
"numpy.log2",
"numpy.sqrt",
"numpy.unique"
] | [((836, 855), 'numpy.unique', 'np.unique', (['col_data'], {}), '(col_data)\n', (845, 855), True, 'import numpy as np\n'), ((1612, 1631), 'numpy.exp', 'np.exp', (['(z ** 2 / -2)'], {}), '(z ** 2 / -2)\n', (1618, 1631), True, 'import numpy as np\n'), ((1641, 1659), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1648, 1659), True, 'import numpy as np\n'), ((470, 482), 'numpy.log2', 'np.log2', (['arr'], {}), '(arr)\n', (477, 482), True, 'import numpy as np\n'), ((407, 444), 'numpy.unique', 'np.unique', (['Y_data'], {'return_counts': '(True)'}), '(Y_data, return_counts=True)\n', (416, 444), True, 'import numpy as np\n'), ((170, 207), 'numpy.unique', 'np.unique', (['Y_data'], {'return_counts': '(True)'}), '(Y_data, return_counts=True)\n', (179, 207), True, 'import numpy as np\n')] |
#%matplotlib inline
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from sklearn.metrics import confusion_matrix
import time
from datetime import timedelta
import math
import os
tf.logging.set_verbosity(tf.logging.INFO)
log_dir = 'tmp/loopy-nn/board/loop004' # Tensorboard log dir
save_dir = 'tmp/loopy-nn/checkpoints/loop004' # Checkpoint saver dir
plt_dir = 'tmp/loopy-nn/plots/loop004' # Checkpoint saver dir
for d in log_dir, save_dir, plt_dir:
if not os.path.exists(d):
os.makedirs(d)
# MNIST data set
from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets('data/MNIST/', one_hot=True)
require_improvement = 1000 # Stop optimization if no improvement found in n iterations
train_batch_size = 64
learning_rate = 1e-4
# Convolutional Layer 1 Filter size (number of pixels) 5 equals 5 x 5 square
filter_size1 = 5
# Convolutional Loop Layer 1
loop_filter_shape = [16, 8, 1] # number of filters per layer
unrolls = 1 # Loops in network
# Convolutional Layer 1 for baseline (disabled when running loop)
run_baseline = True # Set to True when running baseline algorithm without loops
num_filters1 = 16
use_pooling_1 = True
# Convolutional Layer 2
filter_size2 = 5
num_filters2 = 36
use_pooling_2 = True
# Fully-connected layer size
fc_size = 128
# Image parameters
img_size = 28 # MNIST images are 28 x 28 pixels
img_size_flat = img_size * img_size # flatten images into array
img_shape = (img_size, img_size) # Tuple for reshaping images from flattened arrays
num_channels = 1 # color channels (1 = grayscale, 3 = rgb)
num_classes = 10 # Number of classes (MNIST has 10 digits to classify)
# Placeholders
x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')
x_image = tf.reshape(x, [-1, img_size, img_size, num_channels])
y_true = tf.placeholder(tf.float32, shape=[None, 10], name='y_true')
y_true_cls = tf.argmax(y_true, dimension=1)
# Many of the functions here are modified or taken directly from <NAME>'s excellent series on Tensorflow tutorials. Please see his repositories at: https://githhub.com/Hvass-Labs/TensorFlow-Tutorials
def new_weights(i, lp, shape):
if lp == -1:
name = 'weights_'
else:
name = 'weights_' + str(lp) + '_' + str(i)
return tf.Variable(tf.truncated_normal(shape, stddev=0.05), name=name)
def new_biases(length):
return tf.Variable(tf.constant(0.05, shape=[length]))
def get_weights_variable(layer_name, tensor_name):
with tf.variable_scope(layer_name, reuse=True):
variable = tf.get_variable(tensor_name)
return variable
# Standard convolutional layer constructor
def new_conv_layer(input, # Layer input
num_input_channels, # Number of channels in previous layer
filter_size, # Width and height of each filter
num_filters, # Number of filters
use_pooling=True): # Use max pooling boolean
shape = [filter_size, filter_size, num_input_channels, num_filters]
weights = new_weights(i=-1, lp=-1, shape=shape)
biases = new_biases(length=num_filters)
layer = tf.nn.conv2d(input=input,
filter=weights,
strides=[1, 1, 1, 1],
padding='SAME',
name='layer_conv2')
layer += biases
if use_pooling:
layer = tf.nn.max_pool(value=layer,
ksize=[1, 2, 2, 1], # [img number, kernel size x-axis, kernel size y-axis, input channel]
strides=[1, 2, 2, 1], # [img number, stride x-axis, stride y-axis, input channel]
padding='SAME')
layer = tf.nn.relu(layer)
return layer, weights
# Function to build layered looping network
def loop_layered_network(input, # Layer input for loop
num_input_channels, # Number of channels in previous layer
filter_size, # Width and height of each filter
num_filters, # Number of filters
loop_filter_shape, # Number of filters per layer default = [16, 8, 1]
unrolls, # Num loops (i.e. how many times the conv network will get unrolled)
use_pooling=True): # Use max pooling boolean
loops = unrolls
begin = True # If first unroll then begin = True
lp = 0
biases = new_biases(length=num_filters)
while loops:
for i in range(len(loop_filter_shape)):
if begin:
begin = False
layer = input
layer, weights = loop_first_layer(layer, num_input_channels, filter_size, num_filters, loop_filter_shape, i, lp)
elif i == 0:
layer, weights = loop_begin_loop(layer, num_input_channels, filter_size, num_filters, loop_filter_shape, i, lp)
elif i < (len(loop_filter_shape)-1):
layer, weights = loop_mid_layer(layer, num_input_channels, filter_size, num_filters, loop_filter_shape, i, lp)
elif loops == 1:
layer, weights = loop_exit(layer, num_input_channels, filter_size, num_filters, loop_filter_shape, use_pooling, i, lp)
else:
layer, weights = loop_last_layer(layer, num_input_channels, filter_size, num_filters, loop_filter_shape, i, lp)
loops -= 1
lp += 1
layer += biases
layer = tf.nn.relu(layer)
return layer, weights
# Function for constructing convolutional loop layers
def const_conv_layer(layer, weights, use_pooling):
layer = tf.nn.conv2d(input=layer,
filter=weights,
strides=[1, 1, 1, 1],
padding='SAME',
name='layer_conv1')
if use_pooling:
layer = tf.nn.max_pool(value=layer,
ksize=[1, 2, 2, 1], # [img number, kernel size x-axis, kernel size y-axis, input channel]
strides=[1, 2, 2, 1], # [img number, stride x-axis, stride y-axis, input channel]
padding='SAME',
name='layer_conv1')
return layer, weights
# Loop layer constructors
def loop_first_layer(input, num_input_channels, filter_size, num_filters, loop_filter_shape, i, lp):
layer = input
shape = [filter_size, filter_size, num_input_channels, loop_filter_shape[0]]
weights = new_weights(i, lp, shape=shape) # Initialize weights for loop
return const_conv_layer(layer, weights, use_pooling=False)
def loop_begin_loop(input, num_input_channels, filter_size, num_filters, loop_filter_shape, i, lp):
layer = input
shape = [filter_size, filter_size, loop_filter_shape[-1], loop_filter_shape[0]]
weights = new_weights(i, lp, shape=shape) # Initialize weights for loop
return const_conv_layer(layer, weights, use_pooling=False)
def loop_mid_layer(input, num_input_channels, filter_size, num_filters, loop_filter_shape, i, lp):
layer = input
shape = [filter_size, filter_size, loop_filter_shape[i-1], loop_filter_shape[i]]
weights = new_weights(i, lp, shape=shape) # Initialize weights for loop
return const_conv_layer(layer, weights, use_pooling=False)
def loop_last_layer(input, num_input_channels, filter_size, num_filters, loop_filter_shape, i, lp):
layer = input
shape = [filter_size, filter_size, loop_filter_shape[-2], loop_filter_shape[-1]]
weights = new_weights(i, lp, shape=shape) # Initialize weights for loop
return const_conv_layer(layer, weights, use_pooling=False)
def loop_exit(input, num_input_channels, filter_size, num_filters, loop_filter_shape, use_pooling, i, lp):
layer = input
shape = [filter_size, filter_size, loop_filter_shape[-2], loop_filter_shape[-1]]
weights = new_weights(i, lp, shape=shape) # Initialize weights for loop
return const_conv_layer(layer, weights, use_pooling)
# Flatten layers before entering fully connected layers
def flatten_layer(layer):
layer_shape = layer.get_shape() # [num_images, img_height, img_width, num_channels]
num_features = layer_shape[1:4].num_elements() # (img_height * img_width * num_channels)
layer_flat = tf.reshape(layer, [-1, num_features]) # [num_images, num_features]
return layer_flat, num_features
# Fully-connected layer constructor
def new_fc_layer(input, # The previous layer.
num_inputs, # Num. inputs from prev. layer.
num_outputs, # Num. outputs.
use_relu=True): # Use Rectified Linear Unit (ReLU)?
weights = new_weights(i=-1, lp=-1, shape=[num_inputs, num_outputs])
biases = new_biases(length=num_outputs)
layer = tf.matmul(input, weights) + biases
if use_relu:
layer = tf.nn.relu(layer)
return layer
# When running comparative tests, swap standard convolutional network for looping network
if run_baseline:
layer_conv1, weights_conv1 = \
new_conv_layer(input=x_image,
num_input_channels=num_channels,
filter_size=filter_size1,
num_filters=num_filters1,
use_pooling=use_pooling_1)
else:
layer_conv1, weights_conv1 = \
loop_layered_network(input=x_image,
num_input_channels=num_channels,
filter_size=filter_size1,
num_filters=num_filters1,
loop_filter_shape=loop_filter_shape,
unrolls=unrolls,
use_pooling=use_pooling_1)
# Build network after looping layer
layer_conv2, weights_conv2 = \
new_conv_layer(input=layer_conv1,
num_input_channels=num_filters1,
filter_size=filter_size2,
num_filters=num_filters2,
use_pooling=use_pooling_2)
layer_flat, num_features = flatten_layer(layer_conv2)
layer_fc1 = new_fc_layer(input=layer_flat,
num_inputs=num_features,
num_outputs=fc_size,
use_relu=True)
layer_fc2 = new_fc_layer(input=layer_fc1,
num_inputs=fc_size,
num_outputs=num_classes,
use_relu=False)
best_validation_accuracy = 0.0
last_improvement = 0
total_iterations = 0
def optimize_loopy(num_iterations):
global total_iterations
global best_validation_accuracy
global last_improvement
start_time = time.time()
for i in range(num_iterations):
total_iterations += 1
x_batch, y_true_batch = data.train.next_batch(train_batch_size)
feed_dict_train = {x: x_batch,
y_true: y_true_batch}
summary, optimize = session.run([summary_op, optimizer], feed_dict=feed_dict_train)
summary_writer_train.add_summary(summary, i)
# Print status every 100 iterations and after last iteration.
if (total_iterations % 100 == 0) or (i == (num_iterations - 1)):
acc_train = session.run(accuracy, feed_dict=feed_dict_train)
acc_validation, _ = validation_accuracy()
# If validation accuracy is an improvement over best-known.
if acc_validation > best_validation_accuracy:
best_validation_accuracy = acc_validation
last_improvement = total_iterations
saver.save(sess=session, save_path=save_path)
improved_str = '*'
else:
improved_str = ''
msg = "Iter: {0:>6}, Train-Batch Accuracy: {1:>6.1%}, Validation Acc: {2:>6.1%} {3}"
print(msg.format(i + 1, acc_train, acc_validation, improved_str))
# If no improvement found in the required number of iterations.
if total_iterations - last_improvement > require_improvement:
print("No improvement found in a while, stopping optimization.")
break
end_time = time.time()
time_dif = end_time - start_time
print("Time usage: " + str(timedelta(seconds=int(round(time_dif)))))
######################################################################
# Plotting Functions
######################################################################
def plot_images(images, cls_true, cls_pred=None):
assert len(images) == len(cls_true) == 9
# Create figure with 3x3 sub-plots.
fig, axes = plt.subplots(3, 3)
fig.subplots_adjust(hspace=0.3, wspace=0.3)
for i, ax in enumerate(axes.flat):
# Plot image.
ax.imshow(images[i].reshape(img_shape), cmap='binary')
# Show true and predicted classes.
if cls_pred is None:
xlabel = "True: {0}".format(cls_true[i])
else:
xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i])
# Show the classes as the label on the x-axis.
ax.set_xlabel(xlabel)
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
def plot_example_errors(cls_pred, correct):
# This function is called from print_test_accuracy() below.
# cls_pred is an array of the predicted class-number for
# all images in the test-set.
# correct is a boolean array whether the predicted class
# is equal to the true class for each image in the test-set.
# Negate the boolean array.
incorrect = (correct == False)
# Get the images from the test-set that have been
# incorrectly classified.
images = data.test.images[incorrect]
# Get the predicted classes for those images.
cls_pred = cls_pred[incorrect]
# Get the true classes for those images.
cls_true = data.test.cls[incorrect]
# Plot the first 9 images.
plot_images(images=images[0:9],
cls_true=cls_true[0:9],
cls_pred=cls_pred[0:9])
def plot_confusion_matrix(cls_pred, iter_num):
# This is called from print_test_accuracy() below.
# cls_pred is an array of the predicted class-number for
# all images in the test-set.
# Get the true classifications for the test-set.
cls_true = data.test.cls
# Get the confusion matrix using sklearn.
cm = confusion_matrix(y_true=cls_true,
y_pred=cls_pred)
# Print the confusion matrix as text.
print(cm)
# Plot the confusion matrix as an image.
plt.matshow(cm)
# Make various adjustments to the plot.
plt.colorbar()
tick_marks = np.arange(num_classes)
plt.xticks(tick_marks, range(num_classes))
plt.yticks(tick_marks, range(num_classes))
plt.xlabel('Predicted')
plt.ylabel('True')
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
#plt.title('Confusion Matrix at iter: ' + str(iter_num))
plt.text(0.5, 0, 'Iter: ' + str(iter_num), verticalalignment='bottom')
file_name = plt_dir + '/Confusion_Matrix_iter_' + str(iter_num) + '.png'
plt.show()
plt.savefig(file_name, format='png', bbox_inches='tight')
# Split the data-set in batches of this size to limit RAM usage.
batch_size = 256
def predict_cls(images, labels, cls_true):
# Number of images.
num_images = len(images)
# Allocate an array for the predicted classes which
# will be calculated in batches and filled into this array.
cls_pred = np.zeros(shape=num_images, dtype=np.int)
# Now calculate the predicted classes for the batches.
# We will just iterate through all the batches.
# There might be a more clever and Pythonic way of doing this.
# The starting index for the next batch is denoted i.
i = 0
while i < num_images:
# The ending index for the next batch is denoted j.
j = min(i + batch_size, num_images)
# Create a feed-dict with the images and labels
# between index i and j.
feed_dict = {x: images[i:j, :],
y_true: labels[i:j, :]}
# Calculate the predicted class using TensorFlow.
cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict)
# Set the start-index for the next batch to the
# end-index of the current batch.
i = j
# Create a boolean array whether each image is correctly classified.
correct = (cls_true == cls_pred)
return correct, cls_pred
def predict_cls_test():
return predict_cls(images = data.test.images,
labels = data.test.labels,
cls_true = data.test.cls)
def predict_cls_validation():
return predict_cls(images = data.validation.images,
labels = data.validation.labels,
cls_true = data.validation.cls)
def cls_accuracy(correct):
# Calculate the number of correctly classified images.
# When summing a boolean array, False means 0 and True means 1.
correct_sum = correct.sum()
# Classification accuracy is the number of correctly classified
# images divided by the total number of images in the test-set.
acc = float(correct_sum) / len(correct)
return acc, correct_sum
def validation_accuracy():
# Get the array of booleans whether the classifications are correct
# for the validation-set.
# The function returns two values but we only need the first.
correct, _ = predict_cls_validation()
# Calculate the classification accuracy and return it.
return cls_accuracy(correct)
def validation_accuracy_summary():
# Get the array of booleans whether the classifications are correct
# for the validation-set.
# The function returns two values but we only need the first.
correct, predictions = predict_cls_validation()
# Calculate the classification accuracy and return it.
return correct
def print_test_accuracy(show_example_errors=False,
show_confusion_matrix=False):
# For all the images in the test-set,
# calculate the predicted classes and whether they are correct.
correct, cls_pred = predict_cls_test()
# Classification accuracy and the number of correct classifications.
acc, num_correct = cls_accuracy(correct)
# Number of images being classified.
num_images = len(correct)
# Print the accuracy.
msg = "Accuracy on Test-Set: {0:.1%} ({1} / {2})"
print(msg.format(acc, num_correct, num_images))
# Plot some examples of mis-classifications, if desired.
if show_example_errors:
print("Example errors:")
plot_example_errors(cls_pred=cls_pred, correct=correct)
# Plot the confusion matrix, if desired.
if show_confusion_matrix:
print("Confusion Matrix:")
plot_confusion_matrix(cls_pred=cls_pred, iter_num=total_iterations)
def plot_conv_weights(weights, input_channel=0, iter_num=0, layer_name='not defined'):
# Assume weights are TensorFlow ops for 4-dim variables
# e.g. weights_conv1 or weights_conv2.
# Retrieve the values of the weight-variables from TensorFlow.
# A feed-dict is not necessary because nothing is calculated.
w = session.run(weights)
# Print mean and standard deviation.
print("Mean: {0:.5f}, Stdev: {1:.5f}".format(w.mean(), w.std()))
# Get the lowest and highest values for the weights.
# This is used to correct the colour intensity across
# the images so they can be compared with each other.
w_min = np.min(w)
w_max = np.max(w)
# Number of filters used in the conv. layer.
num_filters = w.shape[3]
if num_filters > 1:
# Number of grids to plot.
# Rounded-up, square-root of the number of filters.
num_grids = math.ceil(math.sqrt(num_filters))
# Create figure with a grid of sub-plots.
fig, axes = plt.subplots(num_grids, num_grids)
# Plot all the filter-weights.
for i, ax in enumerate(axes.flat):
# Only plot the valid filter-weights.
if i<num_filters:
# Get the weights for the i'th filter of the input channel.
# The format of this 4-dim tensor is determined by the
# TensorFlow API. See Tutorial #02 for more details.
img = w[:, :, input_channel, i]
# Plot image.
ax.imshow(img, vmin=w_min, vmax=w_max,
interpolation='nearest', cmap='seismic')
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
else:
img = w[:, :, input_channel, 0]
# Plot image.
plt.imshow(img, vmin=w_min, vmax=w_max,
interpolation='nearest', cmap='seismic')
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
#plt.title('Weights for ' + layer_name + ' at iter: ' + str(iter_num))
plt.text(0.5, 0, 'Weights for ' + layer_name + ' at iter: ' + str(iter_num), verticalalignment='bottom')
file_name = plt_dir + '/Weights_iter_' + str(iter_num) + '_layer_' + str(layer_name) + '.png'
print(file_name)
plt.show()
plt.savefig(file_name, format='png', bbox_inches='tight')
# Split the test-set into smaller batches of this size.
#test_batch_size = 256
def plot_conv_layer(layer, image, iter_num, layer_name):
# Assume layer is a TensorFlow op that outputs a 4-dim tensor
# which is the output of a convolutional layer,
# e.g. layer_conv1 or layer_conv2.
# Create a feed-dict containing just one image.
# Note that we don't need to feed y_true because it is
# not used in this calculation.
feed_dict = {x: [image]}
# Calculate and retrieve the output values of the layer
# when inputting that image.
values = session.run(layer, feed_dict=feed_dict)
# Number of filters used in the conv. layer.
num_filters = values.shape[3]
if num_filters > 1:
# Number of grids to plot.
# Rounded-up, square-root of the number of filters.
num_grids = math.ceil(math.sqrt(num_filters))
# Create figure with a grid of sub-plots.
fig, axes = plt.subplots(num_grids, num_grids)
# Plot the output images of all the filters.
for i, ax in enumerate(axes.flat):
# Only plot the images for valid filters.
if i<num_filters:
# Get the output image of using the i'th filter.
# See new_conv_layer() for details on the format
# of this 4-dim tensor.
img = values[0, :, :, i]
# Plot image.
ax.imshow(img, interpolation='nearest', cmap='binary')
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
else:
img = values[0, :, :, 0]
# Plot image.
plt.imshow(img, # vmin=w_min, vmax=w_max,
interpolation='nearest', cmap='binary')
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
#plt.title('Layer ' + layer_name + ' at iter: ' + str(iter_num))
plt.text(0.5, 0, 'Layer ' + layer_name + ' at iter: ' + str(iter_num), verticalalignment='bottom')
file_name = plt_dir + '/Layer_iter_' + str(iter_num) + '_layer_' + str(layer_name) + '.png'
plt.show()
plt.savefig(file_name, format='png', bbox_inches='tight')
def plot_image(image):
plt.imshow(image.reshape(img_shape),
interpolation='nearest',
cmap='binary')
plt.show()
######################################################################
# Loss and Optimization
######################################################################
y_pred = tf.nn.softmax(layer_fc2)
y_pred_cls = tf.argmax(y_pred, dimension=1)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2, labels=y_true)
cost = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
save_path = save_dir + 'best_validation'
######################################################################
# Session
######################################################################
session = tf.Session()
session.run(tf.initialize_all_variables())
# TensorBoard Summary Writer
accuracy_summary = tf.scalar_summary('Training Accuracy', accuracy)
loss_summary = tf.scalar_summary('Training Loss', cost)
summary_op = tf.merge_all_summaries()
summary_writer_train = tf.train.SummaryWriter(log_dir + '/train', session.graph)
data.test.cls = np.argmax(data.test.labels, axis=1)
data.validation.cls = np.argmax(data.validation.labels, axis=1)
print("Size of:")
print("- Training-set:\t\t{}".format(len(data.train.labels)))
print("- Test-set:\t\t{}".format(len(data.test.labels)))
print("- Validation-set:\t{}".format(len(data.validation.labels)))
images = data.test.images[11:20] # View sample images from the test-set.
cls_true = data.test.cls[11:20] # True classes for sample images
plot_images(images=images, cls_true=cls_true) # Plot samples
image1 = data.test.images[0]
plot_image(image1)
image2 = data.test.images[13]
plot_image(image2)
print_test_accuracy()
plot_conv_weights(weights=weights_conv1, iter_num=total_iterations, layer_name='Conv_1')
plot_conv_weights(weights=weights_conv2, iter_num=total_iterations, layer_name='Conv_2')
optimize_loopy(num_iterations=1)
print_test_accuracy()
print_test_accuracy(show_example_errors=True,
show_confusion_matrix=True)
plot_conv_weights(weights=weights_conv1, iter_num=total_iterations, layer_name='Conv_1')
plot_conv_layer(layer=layer_conv1, image=image1, iter_num=total_iterations, layer_name='Conv_1')
plot_conv_layer(layer=layer_conv1, image=image2, iter_num=total_iterations, layer_name='Conv_1')
plot_conv_layer(layer=layer_conv2, image=image1, iter_num=total_iterations, layer_name='Conv_2')
plot_conv_layer(layer=layer_conv2, image=image2, iter_num=total_iterations, layer_name='Conv_2')
optimize_loopy(num_iterations=99)
print_test_accuracy()
print_test_accuracy(show_example_errors=True,
show_confusion_matrix=True)
plot_conv_weights(weights=weights_conv1, iter_num=total_iterations, layer_name='Conv_1')
plot_conv_layer(layer=layer_conv1, image=image1, iter_num=total_iterations, layer_name='Conv_1')
plot_conv_layer(layer=layer_conv1, image=image2, iter_num=total_iterations, layer_name='Conv_1')
plot_conv_layer(layer=layer_conv2, image=image1, iter_num=total_iterations, layer_name='Conv_2')
plot_conv_layer(layer=layer_conv2, image=image2, iter_num=total_iterations, layer_name='Conv_2')
optimize_loopy(num_iterations=900)
print_test_accuracy()
print_test_accuracy(show_example_errors=True,
show_confusion_matrix=True)
plot_conv_weights(weights=weights_conv1, iter_num=total_iterations, layer_name='Conv_1')
plot_conv_layer(layer=layer_conv1, image=image1, iter_num=total_iterations, layer_name='Conv_1')
plot_conv_layer(layer=layer_conv1, image=image2, iter_num=total_iterations, layer_name='Conv_1')
plot_conv_layer(layer=layer_conv2, image=image1, iter_num=total_iterations, layer_name='Conv_2')
plot_conv_layer(layer=layer_conv2, image=image2, iter_num=total_iterations, layer_name='Conv_2')
optimize_loopy(num_iterations=9000)
print_test_accuracy()
print_test_accuracy(show_example_errors=True,
show_confusion_matrix=True)
plot_conv_weights(weights=weights_conv1, iter_num=total_iterations, layer_name='Conv_1')
plot_conv_layer(layer=layer_conv1, image=image1, iter_num=total_iterations, layer_name='Conv_1')
plot_conv_layer(layer=layer_conv1, image=image2, iter_num=total_iterations, layer_name='Conv_1')
plot_conv_layer(layer=layer_conv2, image=image1, iter_num=total_iterations, layer_name='Conv_2')
plot_conv_layer(layer=layer_conv2, image=image2, iter_num=total_iterations, layer_name='Conv_2')
session.close()
| [
"tensorflow.equal",
"tensorflow.get_variable",
"matplotlib.pyplot.ylabel",
"tensorflow.logging.set_verbosity",
"math.sqrt",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.nn.softmax",
"tensorflow.reduce_mean",
"tensorflow.cast",
"numpy.arange",
"matplotlib.pyplot.im... | [((206, 247), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (230, 247), True, 'import tensorflow as tf\n'), ((616, 670), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""data/MNIST/"""'], {'one_hot': '(True)'}), "('data/MNIST/', one_hot=True)\n", (641, 670), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((1698, 1763), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, img_size_flat]', 'name': '"""x"""'}), "(tf.float32, shape=[None, img_size_flat], name='x')\n", (1712, 1763), True, 'import tensorflow as tf\n'), ((1774, 1827), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, img_size, img_size, num_channels]'], {}), '(x, [-1, img_size, img_size, num_channels])\n', (1784, 1827), True, 'import tensorflow as tf\n'), ((1837, 1896), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 10]', 'name': '"""y_true"""'}), "(tf.float32, shape=[None, 10], name='y_true')\n", (1851, 1896), True, 'import tensorflow as tf\n'), ((1910, 1940), 'tensorflow.argmax', 'tf.argmax', (['y_true'], {'dimension': '(1)'}), '(y_true, dimension=1)\n', (1919, 1940), True, 'import tensorflow as tf\n'), ((24192, 24216), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['layer_fc2'], {}), '(layer_fc2)\n', (24205, 24216), True, 'import tensorflow as tf\n'), ((24230, 24260), 'tensorflow.argmax', 'tf.argmax', (['y_pred'], {'dimension': '(1)'}), '(y_pred, dimension=1)\n', (24239, 24260), True, 'import tensorflow as tf\n'), ((24278, 24350), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'layer_fc2', 'labels': 'y_true'}), '(logits=layer_fc2, labels=y_true)\n', (24317, 24350), True, 'import tensorflow as tf\n'), ((24358, 24387), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {}), '(cross_entropy)\n', (24372, 24387), True, 'import tensorflow as tf\n'), ((24488, 24520), 'tensorflow.equal', 'tf.equal', (['y_pred_cls', 'y_true_cls'], {}), '(y_pred_cls, y_true_cls)\n', (24496, 24520), True, 'import tensorflow as tf\n'), ((24597, 24613), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (24611, 24613), True, 'import tensorflow as tf\n'), ((24840, 24852), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (24850, 24852), True, 'import tensorflow as tf\n'), ((24945, 24993), 'tensorflow.scalar_summary', 'tf.scalar_summary', (['"""Training Accuracy"""', 'accuracy'], {}), "('Training Accuracy', accuracy)\n", (24962, 24993), True, 'import tensorflow as tf\n'), ((25009, 25049), 'tensorflow.scalar_summary', 'tf.scalar_summary', (['"""Training Loss"""', 'cost'], {}), "('Training Loss', cost)\n", (25026, 25049), True, 'import tensorflow as tf\n'), ((25063, 25087), 'tensorflow.merge_all_summaries', 'tf.merge_all_summaries', ([], {}), '()\n', (25085, 25087), True, 'import tensorflow as tf\n'), ((25111, 25168), 'tensorflow.train.SummaryWriter', 'tf.train.SummaryWriter', (["(log_dir + '/train')", 'session.graph'], {}), "(log_dir + '/train', session.graph)\n", (25133, 25168), True, 'import tensorflow as tf\n'), ((25186, 25221), 'numpy.argmax', 'np.argmax', (['data.test.labels'], {'axis': '(1)'}), '(data.test.labels, axis=1)\n', (25195, 25221), True, 'import numpy as np\n'), ((25244, 25285), 'numpy.argmax', 'np.argmax', (['data.validation.labels'], {'axis': '(1)'}), '(data.validation.labels, axis=1)\n', (25253, 25285), True, 'import numpy as np\n'), ((3171, 3275), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'input', 'filter': 'weights', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""', 'name': '"""layer_conv2"""'}), "(input=input, filter=weights, strides=[1, 1, 1, 1], padding=\n 'SAME', name='layer_conv2')\n", (3183, 3275), True, 'import tensorflow as tf\n'), ((3751, 3768), 'tensorflow.nn.relu', 'tf.nn.relu', (['layer'], {}), '(layer)\n', (3761, 3768), True, 'import tensorflow as tf\n'), ((5511, 5528), 'tensorflow.nn.relu', 'tf.nn.relu', (['layer'], {}), '(layer)\n', (5521, 5528), True, 'import tensorflow as tf\n'), ((5683, 5787), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'layer', 'filter': 'weights', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""', 'name': '"""layer_conv1"""'}), "(input=layer, filter=weights, strides=[1, 1, 1, 1], padding=\n 'SAME', name='layer_conv1')\n", (5695, 5787), True, 'import tensorflow as tf\n'), ((8338, 8375), 'tensorflow.reshape', 'tf.reshape', (['layer', '[-1, num_features]'], {}), '(layer, [-1, num_features])\n', (8348, 8375), True, 'import tensorflow as tf\n'), ((10676, 10687), 'time.time', 'time.time', ([], {}), '()\n', (10685, 10687), False, 'import time\n'), ((12167, 12178), 'time.time', 'time.time', ([], {}), '()\n', (12176, 12178), False, 'import time\n'), ((12636, 12654), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {}), '(3, 3)\n', (12648, 12654), True, 'import matplotlib.pyplot as plt\n'), ((13333, 13343), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13341, 13343), True, 'import matplotlib.pyplot as plt\n'), ((14560, 14610), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', ([], {'y_true': 'cls_true', 'y_pred': 'cls_pred'}), '(y_true=cls_true, y_pred=cls_pred)\n', (14576, 14610), False, 'from sklearn.metrics import confusion_matrix\n'), ((14744, 14759), 'matplotlib.pyplot.matshow', 'plt.matshow', (['cm'], {}), '(cm)\n', (14755, 14759), True, 'import matplotlib.pyplot as plt\n'), ((14809, 14823), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (14821, 14823), True, 'import matplotlib.pyplot as plt\n'), ((14841, 14863), 'numpy.arange', 'np.arange', (['num_classes'], {}), '(num_classes)\n', (14850, 14863), True, 'import numpy as np\n'), ((14962, 14985), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted"""'], {}), "('Predicted')\n", (14972, 14985), True, 'import matplotlib.pyplot as plt\n'), ((14990, 15008), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True"""'], {}), "('True')\n", (15000, 15008), True, 'import matplotlib.pyplot as plt\n'), ((15321, 15331), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15329, 15331), True, 'import matplotlib.pyplot as plt\n'), ((15336, 15393), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(file_name, format='png', bbox_inches='tight')\n", (15347, 15393), True, 'import matplotlib.pyplot as plt\n'), ((15719, 15759), 'numpy.zeros', 'np.zeros', ([], {'shape': 'num_images', 'dtype': 'np.int'}), '(shape=num_images, dtype=np.int)\n', (15727, 15759), True, 'import numpy as np\n'), ((19822, 19831), 'numpy.min', 'np.min', (['w'], {}), '(w)\n', (19828, 19831), True, 'import numpy as np\n'), ((19844, 19853), 'numpy.max', 'np.max', (['w'], {}), '(w)\n', (19850, 19853), True, 'import numpy as np\n'), ((21524, 21534), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21532, 21534), True, 'import matplotlib.pyplot as plt\n'), ((21539, 21596), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(file_name, format='png', bbox_inches='tight')\n", (21550, 21596), True, 'import matplotlib.pyplot as plt\n'), ((23762, 23772), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23770, 23772), True, 'import matplotlib.pyplot as plt\n'), ((23777, 23834), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(file_name, format='png', bbox_inches='tight')\n", (23788, 23834), True, 'import matplotlib.pyplot as plt\n'), ((23975, 23985), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23983, 23985), True, 'import matplotlib.pyplot as plt\n'), ((24547, 24586), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (24554, 24586), True, 'import tensorflow as tf\n'), ((24865, 24894), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (24892, 24894), True, 'import tensorflow as tf\n'), ((490, 507), 'os.path.exists', 'os.path.exists', (['d'], {}), '(d)\n', (504, 507), False, 'import os\n'), ((517, 531), 'os.makedirs', 'os.makedirs', (['d'], {}), '(d)\n', (528, 531), False, 'import os\n'), ((2300, 2339), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.05)'}), '(shape, stddev=0.05)\n', (2319, 2339), True, 'import tensorflow as tf\n'), ((2404, 2437), 'tensorflow.constant', 'tf.constant', (['(0.05)'], {'shape': '[length]'}), '(0.05, shape=[length])\n', (2415, 2437), True, 'import tensorflow as tf\n'), ((2505, 2546), 'tensorflow.variable_scope', 'tf.variable_scope', (['layer_name'], {'reuse': '(True)'}), '(layer_name, reuse=True)\n', (2522, 2546), True, 'import tensorflow as tf\n'), ((2567, 2595), 'tensorflow.get_variable', 'tf.get_variable', (['tensor_name'], {}), '(tensor_name)\n', (2582, 2595), True, 'import tensorflow as tf\n'), ((3429, 3518), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', ([], {'value': 'layer', 'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(value=layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],\n padding='SAME')\n", (3443, 3518), True, 'import tensorflow as tf\n'), ((5920, 6029), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', ([], {'value': 'layer', 'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""', 'name': '"""layer_conv1"""'}), "(value=layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],\n padding='SAME', name='layer_conv1')\n", (5934, 6029), True, 'import tensorflow as tf\n'), ((8846, 8871), 'tensorflow.matmul', 'tf.matmul', (['input', 'weights'], {}), '(input, weights)\n', (8855, 8871), True, 'import tensorflow as tf\n'), ((8915, 8932), 'tensorflow.nn.relu', 'tf.nn.relu', (['layer'], {}), '(layer)\n', (8925, 8932), True, 'import tensorflow as tf\n'), ((20195, 20229), 'matplotlib.pyplot.subplots', 'plt.subplots', (['num_grids', 'num_grids'], {}), '(num_grids, num_grids)\n', (20207, 20229), True, 'import matplotlib.pyplot as plt\n'), ((21018, 21103), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'vmin': 'w_min', 'vmax': 'w_max', 'interpolation': '"""nearest"""', 'cmap': '"""seismic"""'}), "(img, vmin=w_min, vmax=w_max, interpolation='nearest', cmap='seismic'\n )\n", (21028, 21103), True, 'import matplotlib.pyplot as plt\n'), ((22557, 22591), 'matplotlib.pyplot.subplots', 'plt.subplots', (['num_grids', 'num_grids'], {}), '(num_grids, num_grids)\n', (22569, 22591), True, 'import matplotlib.pyplot as plt\n'), ((23291, 23346), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'interpolation': '"""nearest"""', 'cmap': '"""binary"""'}), "(img, interpolation='nearest', cmap='binary')\n", (23301, 23346), True, 'import matplotlib.pyplot as plt\n'), ((24400, 24451), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (24422, 24451), True, 'import tensorflow as tf\n'), ((20092, 20114), 'math.sqrt', 'math.sqrt', (['num_filters'], {}), '(num_filters)\n', (20101, 20114), False, 'import math\n'), ((22454, 22476), 'math.sqrt', 'math.sqrt', (['num_filters'], {}), '(num_filters)\n', (22463, 22476), False, 'import math\n')] |
from autogluon.core.utils.feature_selection import *
from autogluon.core.utils.utils import unevaluated_fi_df_template
import numpy as np
from numpy.core.fromnumeric import sort
import pandas as pd
import pytest
def evaluated_fi_df_template(features, importance=None, n=None):
rng = np.random.default_rng(0)
importance_df = pd.DataFrame({'name': features})
importance_df['importance'] = rng.standard_normal(len(features)) if importance is None else importance
importance_df['stddev'] = rng.standard_normal(len(features))
importance_df['p_value'] = None
importance_df['n'] = 5 if n is None else n
importance_df.set_index('name', inplace=True)
importance_df.index.name = None
return importance_df
@pytest.fixture
def sample_features():
return ['a', 'b', 'c', 'd', 'e']
@pytest.fixture
def sample_importance_df_1(sample_features):
return evaluated_fi_df_template(sample_features, importance=[0.2, 0.2, None, 1., None], n=[10, 5, 0, 5, 0])
@pytest.fixture
def sample_importance_df_2(sample_features):
return evaluated_fi_df_template(sample_features, importance=[-0.1, -0.1, 0.1, None, None], n=[5, 10, 10, 0, 0])
def test_add_noise_column_df():
# test noise columns are appended to input dataframe and feature_metadata
X = pd.DataFrame({'a': [1, 2]})
args = {'rng': np.random.default_rng(0), 'count': 2}
X_noised, noise_columns = add_noise_column(X, **args)
expected_features = X.columns.tolist() + noise_columns
assert expected_features == X_noised.columns.tolist()
def test_merge_importance_dfs_base(sample_features):
# test the scenario when previous feature importance df is none
prev_df, curr_df = None, unevaluated_fi_df_template(sample_features)
assert merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=set()) is curr_df
def test_merge_importance_dfs_same_model(sample_features, sample_importance_df_1, sample_importance_df_2):
# test the scenario where previous feature importance df exists and its importance estimates come from the same fitted model
prev_df, curr_df = sample_importance_df_1, sample_importance_df_2
result_df = merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=set())
assert [score if score == score else None for score in result_df['importance'].tolist()] == [0., 0.1, 0.1, 1., None]
assert result_df['n'].tolist() == [15, 15, 10, 5, 0]
def test_merge_importance_dfs_different_model(sample_features, sample_importance_df_1, sample_importance_df_2):
# test the scenario where previous feature importance df exists and its importance estimates come from a different fitted model
prev_df, curr_df = sample_importance_df_1, sample_importance_df_2
using_prev_fit_fi = set(sample_features)
result_df = merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=using_prev_fit_fi).sort_index()
assert len(using_prev_fit_fi) == 2
assert [score if score == score else None for score in result_df['importance'].tolist()] == [-0.1, -0.1, 0.1, 1., None]
assert result_df['n'].tolist() == [5, 10, 10, 5, 0]
def test_merge_importance_dfs_all(sample_features, sample_importance_df_1, sample_importance_df_2):
# test the scenario where previous feature importance df exists and its importance estimates come from both same and different fitted models
prev_df, curr_df = sample_importance_df_1, sample_importance_df_2
using_prev_fit_fi = set([sample_features[0]])
result_df = merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=using_prev_fit_fi).sort_index()
assert [score if score == score else None for score in result_df['importance'].tolist()] == [-0.1, 0., 0.1, 1., None]
assert result_df['n'].tolist() == [5, 15, 10, 5, 0]
assert using_prev_fit_fi == set()
def test_sort_features_by_priority_base(sample_features):
# test the ordering of feature importance computation when no prior feature importance computation was done
sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=None, using_prev_fit_fi=set())
assert sorted_features == sample_features
def test_sort_features_by_priority_same_model(sample_features):
# test the ordering of feature importance computation when prior feature importance computation from the same fitted model was done
prev_importance_df = evaluated_fi_df_template(sample_features)
sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=prev_importance_df, using_prev_fit_fi=set())
assert sorted_features == prev_importance_df.sort_values('importance').index.tolist()
def test_sort_features_by_priority_different_model(sample_features):
# test the ordering of feature importance computation when prior feature importance computation from a different fitted model was done
prev_importance_df = evaluated_fi_df_template(sample_features)
using_prev_fit_fi = sample_features[-2:]
sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=prev_importance_df, using_prev_fit_fi=using_prev_fit_fi)
sorted_prev_fit_features = prev_importance_df[prev_importance_df.index.isin(using_prev_fit_fi)].sort_values('importance').index.tolist()
sorted_curr_fit_features = prev_importance_df[~prev_importance_df.index.isin(using_prev_fit_fi)].sort_values('importance').index.tolist()
expected_features = sorted_prev_fit_features + sorted_curr_fit_features
assert sorted_features == expected_features
def test_sort_features_by_priority_all(sample_features):
# test the ordering of feature importance computation when feature impotance computation comes from mix of current and previous fit models,
# and some feature are unevaluated
length = len(sample_features)
using_prev_fit_fi = set(sample_features[:length//3])
evaluated_rows, unevaluated_rows = evaluated_fi_df_template(sample_features[:length//2]), unevaluated_fi_df_template(sample_features[length//2:])
prev_importance_df = pd.concat([evaluated_rows, unevaluated_rows])
sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=prev_importance_df, using_prev_fit_fi=using_prev_fit_fi)
unevaluated_features = unevaluated_rows.index.tolist()
sorted_prev_fit_features = evaluated_rows[(~evaluated_rows.index.isin(sample_features[length//2:]))
& (evaluated_rows.index.isin(using_prev_fit_fi))].sort_values('importance').index.tolist()
sorted_curr_fit_features = evaluated_rows[(~evaluated_rows.index.isin(sample_features[length//2:]))
& (~evaluated_rows.index.isin(using_prev_fit_fi))].sort_values('importance').index.tolist()
expected_features = unevaluated_features + sorted_prev_fit_features + sorted_curr_fit_features
assert sorted_features == expected_features
| [
"pandas.DataFrame",
"pandas.concat",
"numpy.random.default_rng",
"autogluon.core.utils.utils.unevaluated_fi_df_template"
] | [((289, 313), 'numpy.random.default_rng', 'np.random.default_rng', (['(0)'], {}), '(0)\n', (310, 313), True, 'import numpy as np\n'), ((334, 366), 'pandas.DataFrame', 'pd.DataFrame', (["{'name': features}"], {}), "({'name': features})\n", (346, 366), True, 'import pandas as pd\n'), ((1285, 1312), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 2]}"], {}), "({'a': [1, 2]})\n", (1297, 1312), True, 'import pandas as pd\n'), ((5992, 6037), 'pandas.concat', 'pd.concat', (['[evaluated_rows, unevaluated_rows]'], {}), '([evaluated_rows, unevaluated_rows])\n', (6001, 6037), True, 'import pandas as pd\n'), ((1332, 1356), 'numpy.random.default_rng', 'np.random.default_rng', (['(0)'], {}), '(0)\n', (1353, 1356), True, 'import numpy as np\n'), ((1697, 1740), 'autogluon.core.utils.utils.unevaluated_fi_df_template', 'unevaluated_fi_df_template', (['sample_features'], {}), '(sample_features)\n', (1723, 1740), False, 'from autogluon.core.utils.utils import unevaluated_fi_df_template\n'), ((5911, 5968), 'autogluon.core.utils.utils.unevaluated_fi_df_template', 'unevaluated_fi_df_template', (['sample_features[length // 2:]'], {}), '(sample_features[length // 2:])\n', (5937, 5968), False, 'from autogluon.core.utils.utils import unevaluated_fi_df_template\n')] |
import cv2
import random
import numpy as np
IMG_WIDTH = 1200
IMG_HEIGHT = 800
WATERMARK_WIDTH = 256
WATERMARK_HEIGHT = 256
IMG_SIZE = IMG_HEIGHT * IMG_WIDTH
WATERMARK_SIZE = WATERMARK_HEIGHT * WATERMARK_WIDTH
KEY = 1001
THRESH = 75
def mean_neighbour(img, x, y):
val = 0
num = 0
i = x
j = y
if i >= 0 and i < IMG_HEIGHT and j >= 0 and j < IMG_WIDTH:
val += img[i, j]
num += 1
i = x + 1
j = y + 1
if i >= 0 and i < IMG_HEIGHT and j >= 0 and j < IMG_WIDTH:
val += img[i, j]
num += 1
i = x - 1
j = y - 1
if i >= 0 and i < IMG_HEIGHT and j >= 0 and j < IMG_WIDTH:
val += img[i, j]
num += 1
i = x + 1
j = y
if i >= 0 and i < IMG_HEIGHT and j >= 0 and j < IMG_WIDTH:
val += img[i, j]
num += 1
i = x
j = y + 1
if i >= 0 and i < IMG_HEIGHT and j >= 0 and j < IMG_WIDTH:
val += img[i, j]
num += 1
i = x + 1
j = y - 1
if i >= 0 and i < IMG_HEIGHT and j >= 0 and j < IMG_WIDTH:
val += img[i, j]
num += 1
i = x - 1
j = y + 1
if i >= 0 and i < IMG_HEIGHT and j >= 0 and j < IMG_WIDTH:
val += img[i, j]
num += 1
i = x - 1
j = y
if i >= 0 and i < IMG_HEIGHT and j >= 0 and j < IMG_WIDTH:
val += img[i, j]
num += 1
i = x
j = y - 1
if i >= 0 and i < IMG_HEIGHT and j >= 0 and j < IMG_WIDTH:
val += img[i, j]
num += 1
return val/float(num)
random.seed(a=KEY)
random_points = random.sample(range(IMG_SIZE), WATERMARK_SIZE)
for cnt in range(0, 7):
og_img = cv2.imread('images\stolen_images\stolen_image_'+str(cnt)+'.jpg',0)
master_img = np.zeros((WATERMARK_WIDTH, WATERMARK_HEIGHT, 1), np.uint8)
i = 0
j = 0
for k in random_points:
x = k // IMG_WIDTH
y = k % IMG_WIDTH
if mean_neighbour(og_img, x, y) > THRESH:
master_img[i,j] = 255
j += 1
if j == 256:
j = 0
i += 1
cv2.imwrite('images\master_images\master_img_'+str(cnt)+'.jpg', master_img)
print (cnt)
print ("Done") | [
"numpy.zeros",
"random.seed"
] | [((1496, 1514), 'random.seed', 'random.seed', ([], {'a': 'KEY'}), '(a=KEY)\n', (1507, 1514), False, 'import random\n'), ((1701, 1759), 'numpy.zeros', 'np.zeros', (['(WATERMARK_WIDTH, WATERMARK_HEIGHT, 1)', 'np.uint8'], {}), '((WATERMARK_WIDTH, WATERMARK_HEIGHT, 1), np.uint8)\n', (1709, 1759), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
"""字典形式导入数据"""
x1 = {'Measure_1': [28.4,28.9,29.0,28.4,28.6],
'Measure_2': [28.4,29.0,29.1,28.5,28.6],
'Measure_3': [28.4,29.0,29.1,28.5,28.6]} # A测定员
x2 = {'Measure_1': [28.5,28.8,29.0,28.5,28.6],
'Measure_2': [28.4,28.9,29.0,28.5,28.6],
'Measure_3': [28.4,28.8,29.0,28.5,28.6]} # B测定员
x3 = {'Measure_1': [28.4,28.9,28.9,28.4,28.6],
'Measure_2': [28.5,28.9,28.9,28.5,28.7],
'Measure_3': [28.5,28.9,29.0,28.4,28.7]} # C测定员
"""数据转换成DataFrame存储方便画图"""
df1 = pd.DataFrame(x1)
x1_bar = df1.mean().mean()
R1 = (df1.max(axis=1) - df1.min(axis=1)).sum()/5
df1['sample_no'] = ['#'+str(i) for i in range(1,6)]
df1['researcher'] = 'A'
df2 = pd.DataFrame(x2)
x2_bar = df2.mean().mean()
R2 = (df2.max(axis=1) - df2.min(axis=1)).sum()/5
df2['sample_no'] = ['#'+str(i) for i in range(1,6)]
df2['researcher'] = 'B'
df3 = pd.DataFrame(x3)
x3_bar = df3.mean().mean()
R3 = (df3.max(axis=1) - df3.min(axis=1)).sum()/5
df3['sample_no'] = ['#'+str(i) for i in range(1,6)]
df3['researcher'] = 'C'
df = pd.concat([df1,df2,df3],ignore_index=True) # ABC测定员3组数据连接在同一张表
"""
Measure_1 Measure_2 Measure_3 sample_no researcher
0 28.4 28.4 28.4 #1 A
1 28.9 29.0 29.0 #2 A
2 29.0 29.1 29.1 #3 A
3 28.4 28.5 28.5 #4 A
"""
"""X-bar Chart"""
A2 = 1.023
xbarbar = (x1_bar+x2_bar+x3_bar)/3
rbar = (R1+R2+R3)/3
ucl = xbarbar + A2 * rbar
lcl = xbarbar - A2 * rbar
print(df1.mean())
print(rbar,ucl,lcl)
grouped = df.groupby('researcher') # 按照researcher来分组
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(16,4), sharey=True) # 定义包含1行3列子图
fig.suptitle('Xbar Chart', fontsize=18, y=0)
for (key, ax) in zip(grouped.groups.keys(), axes.flatten()):
df_group = grouped.get_group(key)[['Measure_1','Measure_2','Measure_3']].mean(axis=1)
# print(key) #A\B\C
# print(df_group)
df_group.index = range(1,6) # 取样本编号1,2,3,4,5作为x轴
df_group.plot(ax=ax, xticks=df_group.index, title=key, label='sample', style='go-', linewidth=2) # sample
ax.plot(range(1,6),xbarbar*np.ones(5),'k',label=r'$\bar\bar{x}$') # xbar
ax.plot(range(1,6),ucl*np.ones(5), label='UCL') # ucl
ax.plot(range(1,6),lcl*np.ones(5), label='LCL') # lcl
ax.legend()
ax.grid(False) # 隐藏网格
plt.show() | [
"numpy.ones",
"pandas.DataFrame",
"pandas.concat",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((597, 613), 'pandas.DataFrame', 'pd.DataFrame', (['x1'], {}), '(x1)\n', (609, 613), True, 'import pandas as pd\n'), ((779, 795), 'pandas.DataFrame', 'pd.DataFrame', (['x2'], {}), '(x2)\n', (791, 795), True, 'import pandas as pd\n'), ((961, 977), 'pandas.DataFrame', 'pd.DataFrame', (['x3'], {}), '(x3)\n', (973, 977), True, 'import pandas as pd\n'), ((1142, 1187), 'pandas.concat', 'pd.concat', (['[df1, df2, df3]'], {'ignore_index': '(True)'}), '([df1, df2, df3], ignore_index=True)\n', (1151, 1187), True, 'import pandas as pd\n'), ((1765, 1825), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(3)', 'figsize': '(16, 4)', 'sharey': '(True)'}), '(nrows=1, ncols=3, figsize=(16, 4), sharey=True)\n', (1777, 1825), True, 'import matplotlib.pyplot as plt\n'), ((2521, 2531), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2529, 2531), True, 'import matplotlib.pyplot as plt\n'), ((2295, 2305), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (2302, 2305), True, 'import numpy as np\n'), ((2372, 2382), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (2379, 2382), True, 'import numpy as np\n'), ((2436, 2446), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (2443, 2446), True, 'import numpy as np\n')] |
#zerosOnesAndLike.py
import numpy as np
list_of_lists = [[1,2,3], [4,5,6], [7,8,9]]
array_of_arrays = np.array(list_of_lists)
zeros_like_array = np.zeros_like(list_of_lists)
ones_like_array = np.ones_like(list_of_lists)
empty_like_array = np.empty_like(list_of_lists)
print("list_of_lists:", list_of_lists, sep="\n")
print("array_of_arrays:", array_of_arrays, sep="\n")
print("zeros_like_array:", zeros_like_array, sep="\n")
print("ones_like_array:", ones_like_array, sep="\n")
print("empty_like_array:", empty_like_array, sep="\n") | [
"numpy.empty_like",
"numpy.array",
"numpy.zeros_like",
"numpy.ones_like"
] | [((102, 125), 'numpy.array', 'np.array', (['list_of_lists'], {}), '(list_of_lists)\n', (110, 125), True, 'import numpy as np\n'), ((145, 173), 'numpy.zeros_like', 'np.zeros_like', (['list_of_lists'], {}), '(list_of_lists)\n', (158, 173), True, 'import numpy as np\n'), ((192, 219), 'numpy.ones_like', 'np.ones_like', (['list_of_lists'], {}), '(list_of_lists)\n', (204, 219), True, 'import numpy as np\n'), ((239, 267), 'numpy.empty_like', 'np.empty_like', (['list_of_lists'], {}), '(list_of_lists)\n', (252, 267), True, 'import numpy as np\n')] |
"""
.. module:: sparse_rep
.. moduleauthor:: <NAME>
.. moduleauthor:: <NAME>
The original SparsePZ code to be found at https://github.com/mgckind/SparsePz
This module reorganizes it for usage by DESC within qp, and is python3 compliant.
"""
__author__ = '<NAME>'
import numpy as np
from scipy.special import voigt_profile
from scipy import linalg as sla
from scipy import integrate as sciint
def shapes2pdf(wa, ma, sa, ga, meta, cut=1.e-5):
"""return a pdf evaluated at the meta['xvals'] values for the
given set of Voigt parameters"""
#input : list of shape parameters for a single object
x = meta['xvals']
pdf = np.zeros_like(x)
for w, m, s, g in zip(wa, ma, sa, ga):
pdft = voigt_profile(x - m, s, g)
pdft = np.where(pdft >= cut, pdft, 0.)
pdft = w * pdft / sla.norm(pdft)
pdf += pdft
pdf = np.where(pdf >= cut, pdf, 0.)
return pdf / sciint.trapz(pdf, x)
def create_basis(metadata, cut=1.e-5):
"""create the Voigt basis matrix out of a metadata dictionary"""
mu = metadata['mu']
Nmu = metadata['dims'][0]
sigma = metadata['sig']
Nsigma = metadata['dims'][1]
Nv = metadata['dims'][2]
xvals = metadata['xvals']
return create_voigt_basis(xvals, mu, Nmu, sigma, Nsigma, Nv, cut=cut)
def create_voigt_basis(xvals, mu, Nmu, sigma, Nsigma, Nv, cut=1.e-5):
"""
Creates a gaussian-voigt dictionary at the same resolution as the original PDF
:param float xvals: the x-axis point values for the PDF
:param float mu: [min_mu, max_mu], range of mean for gaussian
:param int Nmu: Number of values between min_mu and max_mu
:param float sigma: [min_sigma, max_sigma], range of variance for gaussian
:param int Nsigma: Number of values between min_sigma and max_sigma
:param Nv: Number of Voigt profiles per gaussian at given position mu and sigma
:param float cut: Lower cut for gaussians
:return: Dictionary as numpy array with shape (len(xvals), Nmu*Nsigma*Nv)
:rtype: float
"""
means = np.linspace(mu[0], mu[1], Nmu)
sig = np.linspace(sigma[0], sigma[1], Nsigma)
gamma = np.linspace(0, 0.5, Nv)
NA = Nmu * Nsigma * Nv
Npdf = len(xvals)
A = np.zeros((Npdf, NA))
kk = 0
for i in range(Nmu):
for j in range(Nsigma):
for k in range(Nv):
pdft = voigt_profile(xvals - means[i], sig[j], gamma[k])
pdft = np.where(pdft >= cut, pdft, 0.)
A[:, kk] = pdft / sla.norm(pdft)
kk += 1
return A
def sparse_basis(dictionary, query_vec, n_basis, tolerance=None):
"""
Compute sparse representation of a vector given Dictionary (basis)
for a given tolerance or number of basis. It uses Cholesky decomposition to speed the process and to
solve the linear operations adapted from <NAME>., <NAME>. and <NAME>., Technical Report - CS
Technion, April 2008
:param float dictionary: Array with all basis on each column, must has shape (len(vector), total basis) and each column must have euclidean l-2 norm equal to 1
:param float query_vec: vector of which a sparse representation is desired
:param int n_basis: number of desired basis
:param float tolerance: tolerance desired if n_basis is not needed to be fixed, must input a large number for n_basis to assure achieving tolerance
:return: indices, values (2 arrays one with the position and the second with the coefficients)
"""
a_n = np.zeros(dictionary.shape[1])
machine_eps = np.finfo(dictionary.dtype).eps
alpha = np.dot(dictionary.T, query_vec)
res = query_vec
idxs = np.arange(dictionary.shape[1]) # keeping track of swapping
L = np.zeros((n_basis, n_basis), dtype=dictionary.dtype)
L[0, 0] = 1.
for n_active in range(n_basis):
lam = np.argmax(abs(np.dot(dictionary.T, res)))
if lam < n_active or alpha[lam] ** 2 < machine_eps: #pragma: no cover
n_active -= 1
break
if n_active > 0: #pragma: no cover
# Updates the Cholesky decomposition of dictionary
L[n_active, :n_active] = np.dot(dictionary[:, :n_active].T, dictionary[:, lam])
sla.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], lower=True, overwrite_b=True)
v = sla.norm(L[n_active, :n_active]) ** 2
if 1 - v <= machine_eps:
print("Selected basis are dependent or normed are not unity")
break
L[n_active, n_active] = np.sqrt(1 - v)
dictionary[:, [n_active, lam]] = dictionary[:, [lam, n_active]]
alpha[[n_active, lam]] = alpha[[lam, n_active]]
idxs[[n_active, lam]] = idxs[[lam, n_active]]
# solves LL'x = query_vec as a composition of two triangular systems
gamma = sla.cho_solve((L[:n_active + 1, :n_active + 1], True), alpha[:n_active + 1], overwrite_b=False)
res = query_vec - np.dot(dictionary[:, :n_active + 1], gamma)
if tolerance is not None and sla.norm(res) ** 2 <= tolerance:
break
a_n[idxs[:n_active + 1]] = gamma
del dictionary
#return a_n
return idxs[:n_active + 1], gamma
def combine_int(Ncoef, Nbase):
"""
combine index of base (up to 62500 bases) and value (16 bits integer with sign) in a 32 bit integer
First half of word is for the value and second half for the index
:param int Ncoef: Integer with sign to represent the value associated with a base, this is a sign 16 bits integer
:param int Nbase: Integer representing the base, unsigned 16 bits integer
:return: 32 bits integer
"""
return (Ncoef << 16) | Nbase
def get_N(longN):
"""
Extract coefficients fro the 32bits integer,
Extract Ncoef and Nbase from 32 bit integer
return (longN >> 16), longN & 0xffff
:param int longN: input 32 bits integer
:return: Ncoef, Nbase both 16 bits integer
"""
return (longN >> 16), (longN & (2 ** 16 - 1))
def decode_sparse_indices(indices):
"""decode sparse indices into basis indices and weigth array
"""
Ncoef = 32001
sp_ind = np.array(list(map(get_N, indices)))
spi = sp_ind[:, 0, :]
dVals = 1./(Ncoef - 1)
vals = spi * dVals
vals[:, 0] = 1.
return sp_ind[:, 1, :], vals
def indices2shapes(sparse_indices, meta):
"""compute the Voigt shape parameters from the sparse index
Parameters
----------
sparse_index: `np.array`
1D Array of indices for each object in the ensemble
meta: `dict`
Dictionary of metadata to decode the sparse indices
"""
Nmu = meta['dims'][0]
Nsigma = meta['dims'][1]
Nv = meta['dims'][2]
Ncoef = meta['dims'][3]
mu = meta['mu']
sigma = meta['sig']
means_array = np.linspace(mu[0], mu[1], Nmu)
sig_array = np.linspace(sigma[0], sigma[1], Nsigma)
gam_array = np.linspace(0, 0.5, Nv)
#split the sparse indices into pairs (weight, basis_index)
#for each sparse index corresponding to one of the basis function
sp_ind = np.array(list(map(get_N, sparse_indices)))
spi = sp_ind[:, 0, :]
dVals = 1./(Ncoef - 1)
vals = spi * dVals
vals[:, 0] = 1.
Dind2 = sp_ind[:, 1, :]
means = means_array[np.array(Dind2 / (Nsigma * Nv), int)]
sigmas = sig_array[np.array((Dind2 % (Nsigma * Nv)) / Nv, int)]
gammas = gam_array[np.array((Dind2 % (Nsigma * Nv)) % Nv, int)]
return vals, means, sigmas, gammas
def build_sparse_representation(x, P, mu=None, Nmu=None, sig=None, Nsig=None, Nv=3, Nsparse=20, tol=1.e-10, verbose=True):
"""compute the sparse representation of a set of pdfs evaluated on a common x array
"""
#Note : the range for gamma is fixed to [0, 0.5] in create_voigt_basis
Ntot = len(P)
if verbose:
print("Total Galaxies = ", Ntot)
dx = x[1] - x[0]
if mu is None:
mu = [min(x), max(x)]
if Nmu is None:
Nmu = len(x)
if sig is None:
max_sig = (max(x) - min(x)) / 12.
min_sig = dx / 6.
sig = [min_sig, max_sig]
if Nsig is None:
Nsig = int(np.ceil(2. * (max_sig - min_sig) / dx))
if verbose:
print('dx = ', dx)
print('Nmu, Nsig, Nv = ', '[', Nmu, ',', Nsig, ',', Nv, ']')
print('Total bases in dictionary', Nmu * Nsig * Nv)
print('Nsparse (number of bases) = ', Nsparse)
#Create dictionary
print('Creating Dictionary...')
A = create_voigt_basis(x, mu, Nmu, sig, Nsig, Nv)
bigD = {}
Ncoef = 32001
AA = np.linspace(0, 1, Ncoef)
Da = AA[1] - AA[0]
bigD['xvals'] = x
bigD['mu'] = mu
bigD['sig'] = sig
bigD['dims'] = [Nmu, Nsig, Nv, Ncoef, Nsparse]
bigD['Ntot'] = Ntot
if verbose:
print('Creating Sparse representation...')
Sparse_Array = np.zeros((Ntot, Nsparse), dtype='int')
for k in range(Ntot):
pdf0 = P[k]
Dind, Dval = sparse_basis(A, pdf0, Nsparse, tolerance=tol)
if len(Dind) < 1:#pragma: no cover
continue
#bigD[k]['sparse'] = [Dind, Dval]
if max(Dval) > 0:
dval0 = Dval[0]
Dvalm = Dval / np.max(Dval)
index = np.array(list(map(round, (Dvalm / Da))), dtype='int')
index0 = int(round(dval0/Da))
index[0] = index0
else:
index = np.zeros(len(Dind), dtype='int') #pragma: no cover
sparse_ind = np.array(list(map(combine_int, index, Dind)))
Sparse_Array[k, 0:len(sparse_ind)] = sparse_ind
#swap back columns
A[:, [Dind]] = A[:, [np.arange(len(Dind))]]
if verbose:
print('done')
return Sparse_Array, bigD, A
def pdf_from_sparse(sparse_indices, A, xvals, cut=1.e-5):
"""return the array of evaluations at xvals from the sparse indices
"""
indices, vals = decode_sparse_indices(sparse_indices)
pdf_y = (A[:, indices]*vals).sum(axis=-1)
pdf_y = np.where(pdf_y >= cut, pdf_y, 0.)
pdf_x = xvals
norms = sciint.trapz(pdf_y.T, pdf_x)
pdf_y /= norms
return pdf_y
| [
"scipy.linalg.cho_solve",
"numpy.ceil",
"scipy.special.voigt_profile",
"scipy.integrate.trapz",
"numpy.sqrt",
"numpy.where",
"numpy.max",
"numpy.array",
"numpy.dot",
"numpy.linspace",
"numpy.zeros",
"scipy.linalg.solve_triangular",
"scipy.linalg.norm",
"numpy.finfo",
"numpy.zeros_like",
... | [((637, 653), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (650, 653), True, 'import numpy as np\n'), ((857, 887), 'numpy.where', 'np.where', (['(pdf >= cut)', 'pdf', '(0.0)'], {}), '(pdf >= cut, pdf, 0.0)\n', (865, 887), True, 'import numpy as np\n'), ((2034, 2064), 'numpy.linspace', 'np.linspace', (['mu[0]', 'mu[1]', 'Nmu'], {}), '(mu[0], mu[1], Nmu)\n', (2045, 2064), True, 'import numpy as np\n'), ((2075, 2114), 'numpy.linspace', 'np.linspace', (['sigma[0]', 'sigma[1]', 'Nsigma'], {}), '(sigma[0], sigma[1], Nsigma)\n', (2086, 2114), True, 'import numpy as np\n'), ((2127, 2150), 'numpy.linspace', 'np.linspace', (['(0)', '(0.5)', 'Nv'], {}), '(0, 0.5, Nv)\n', (2138, 2150), True, 'import numpy as np\n'), ((2208, 2228), 'numpy.zeros', 'np.zeros', (['(Npdf, NA)'], {}), '((Npdf, NA))\n', (2216, 2228), True, 'import numpy as np\n'), ((3480, 3509), 'numpy.zeros', 'np.zeros', (['dictionary.shape[1]'], {}), '(dictionary.shape[1])\n', (3488, 3509), True, 'import numpy as np\n'), ((3571, 3602), 'numpy.dot', 'np.dot', (['dictionary.T', 'query_vec'], {}), '(dictionary.T, query_vec)\n', (3577, 3602), True, 'import numpy as np\n'), ((3634, 3664), 'numpy.arange', 'np.arange', (['dictionary.shape[1]'], {}), '(dictionary.shape[1])\n', (3643, 3664), True, 'import numpy as np\n'), ((3702, 3754), 'numpy.zeros', 'np.zeros', (['(n_basis, n_basis)'], {'dtype': 'dictionary.dtype'}), '((n_basis, n_basis), dtype=dictionary.dtype)\n', (3710, 3754), True, 'import numpy as np\n'), ((6765, 6795), 'numpy.linspace', 'np.linspace', (['mu[0]', 'mu[1]', 'Nmu'], {}), '(mu[0], mu[1], Nmu)\n', (6776, 6795), True, 'import numpy as np\n'), ((6812, 6851), 'numpy.linspace', 'np.linspace', (['sigma[0]', 'sigma[1]', 'Nsigma'], {}), '(sigma[0], sigma[1], Nsigma)\n', (6823, 6851), True, 'import numpy as np\n'), ((6868, 6891), 'numpy.linspace', 'np.linspace', (['(0)', '(0.5)', 'Nv'], {}), '(0, 0.5, Nv)\n', (6879, 6891), True, 'import numpy as np\n'), ((8521, 8545), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'Ncoef'], {}), '(0, 1, Ncoef)\n', (8532, 8545), True, 'import numpy as np\n'), ((8796, 8834), 'numpy.zeros', 'np.zeros', (['(Ntot, Nsparse)'], {'dtype': '"""int"""'}), "((Ntot, Nsparse), dtype='int')\n", (8804, 8834), True, 'import numpy as np\n'), ((9912, 9946), 'numpy.where', 'np.where', (['(pdf_y >= cut)', 'pdf_y', '(0.0)'], {}), '(pdf_y >= cut, pdf_y, 0.0)\n', (9920, 9946), True, 'import numpy as np\n'), ((9976, 10004), 'scipy.integrate.trapz', 'sciint.trapz', (['pdf_y.T', 'pdf_x'], {}), '(pdf_y.T, pdf_x)\n', (9988, 10004), True, 'from scipy import integrate as sciint\n'), ((712, 738), 'scipy.special.voigt_profile', 'voigt_profile', (['(x - m)', 's', 'g'], {}), '(x - m, s, g)\n', (725, 738), False, 'from scipy.special import voigt_profile\n'), ((754, 786), 'numpy.where', 'np.where', (['(pdft >= cut)', 'pdft', '(0.0)'], {}), '(pdft >= cut, pdft, 0.0)\n', (762, 786), True, 'import numpy as np\n'), ((904, 924), 'scipy.integrate.trapz', 'sciint.trapz', (['pdf', 'x'], {}), '(pdf, x)\n', (916, 924), True, 'from scipy import integrate as sciint\n'), ((3528, 3554), 'numpy.finfo', 'np.finfo', (['dictionary.dtype'], {}), '(dictionary.dtype)\n', (3536, 3554), True, 'import numpy as np\n'), ((4813, 4912), 'scipy.linalg.cho_solve', 'sla.cho_solve', (['(L[:n_active + 1, :n_active + 1], True)', 'alpha[:n_active + 1]'], {'overwrite_b': '(False)'}), '((L[:n_active + 1, :n_active + 1], True), alpha[:n_active + 1],\n overwrite_b=False)\n', (4826, 4912), True, 'from scipy import linalg as sla\n'), ((7232, 7268), 'numpy.array', 'np.array', (['(Dind2 / (Nsigma * Nv))', 'int'], {}), '(Dind2 / (Nsigma * Nv), int)\n', (7240, 7268), True, 'import numpy as np\n'), ((7293, 7334), 'numpy.array', 'np.array', (['(Dind2 % (Nsigma * Nv) / Nv)', 'int'], {}), '(Dind2 % (Nsigma * Nv) / Nv, int)\n', (7301, 7334), True, 'import numpy as np\n'), ((7361, 7402), 'numpy.array', 'np.array', (['(Dind2 % (Nsigma * Nv) % Nv)', 'int'], {}), '(Dind2 % (Nsigma * Nv) % Nv, int)\n', (7369, 7402), True, 'import numpy as np\n'), ((812, 826), 'scipy.linalg.norm', 'sla.norm', (['pdft'], {}), '(pdft)\n', (820, 826), True, 'from scipy import linalg as sla\n'), ((4129, 4183), 'numpy.dot', 'np.dot', (['dictionary[:, :n_active].T', 'dictionary[:, lam]'], {}), '(dictionary[:, :n_active].T, dictionary[:, lam])\n', (4135, 4183), True, 'import numpy as np\n'), ((4196, 4300), 'scipy.linalg.solve_triangular', 'sla.solve_triangular', (['L[:n_active, :n_active]', 'L[n_active, :n_active]'], {'lower': '(True)', 'overwrite_b': '(True)'}), '(L[:n_active, :n_active], L[n_active, :n_active], lower\n =True, overwrite_b=True)\n', (4216, 4300), True, 'from scipy import linalg as sla\n'), ((4523, 4537), 'numpy.sqrt', 'np.sqrt', (['(1 - v)'], {}), '(1 - v)\n', (4530, 4537), True, 'import numpy as np\n'), ((4935, 4978), 'numpy.dot', 'np.dot', (['dictionary[:, :n_active + 1]', 'gamma'], {}), '(dictionary[:, :n_active + 1], gamma)\n', (4941, 4978), True, 'import numpy as np\n'), ((8089, 8128), 'numpy.ceil', 'np.ceil', (['(2.0 * (max_sig - min_sig) / dx)'], {}), '(2.0 * (max_sig - min_sig) / dx)\n', (8096, 8128), True, 'import numpy as np\n'), ((2352, 2401), 'scipy.special.voigt_profile', 'voigt_profile', (['(xvals - means[i])', 'sig[j]', 'gamma[k]'], {}), '(xvals - means[i], sig[j], gamma[k])\n', (2365, 2401), False, 'from scipy.special import voigt_profile\n'), ((2425, 2457), 'numpy.where', 'np.where', (['(pdft >= cut)', 'pdft', '(0.0)'], {}), '(pdft >= cut, pdft, 0.0)\n', (2433, 2457), True, 'import numpy as np\n'), ((3836, 3861), 'numpy.dot', 'np.dot', (['dictionary.T', 'res'], {}), '(dictionary.T, res)\n', (3842, 3861), True, 'import numpy as np\n'), ((4312, 4344), 'scipy.linalg.norm', 'sla.norm', (['L[n_active, :n_active]'], {}), '(L[n_active, :n_active])\n', (4320, 4344), True, 'from scipy import linalg as sla\n'), ((9137, 9149), 'numpy.max', 'np.max', (['Dval'], {}), '(Dval)\n', (9143, 9149), True, 'import numpy as np\n'), ((2491, 2505), 'scipy.linalg.norm', 'sla.norm', (['pdft'], {}), '(pdft)\n', (2499, 2505), True, 'from scipy import linalg as sla\n'), ((5016, 5029), 'scipy.linalg.norm', 'sla.norm', (['res'], {}), '(res)\n', (5024, 5029), True, 'from scipy import linalg as sla\n')] |
import cv2
import numpy as np
import os
import glob
import pickle
#from sklearn.preprocessing import normalize
current_file_path = os.path.dirname(os.path.abspath(__file__))
def save_obj(obj, name ):
with open(os.path.join(current_file_path,"calib_result", name + '.pkl'), 'wb') as f:
pickle.dump(obj, f)
def load_obj(name):
with open(os.path.join(current_file_path,"calib_result", name + '.pkl'), 'rb') as f:
return pickle.load(f)
def intrinsic_calib(images,
out_name,
CHECKERBOARD = (6, 8),
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)):
# Creating vector to store vectors of 3D points for each checkerboard image
objpoints = []
# Creating vector to store vectors of 2D points for each checkerboard image
imgpoints = []
# Defining the world coordinates for 3D points
objp = np.zeros((1, CHECKERBOARD[0] * CHECKERBOARD[1], 3), np.float32)
objp[0, :, :2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)
prev_img_shape = None
for fname in images:
name = os.path.basename(os.path.normpath(fname))
gray = cv2.imread(os.path.join(fname),cv2.IMREAD_GRAYSCALE)
print(name, gray.shape)
# Find the chess board corners
# If desired number of corners are found in the image then ret = true
ret, corners = cv2.findChessboardCorners(gray, CHECKERBOARD,None)
#cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)
"""
If desired number of corner are detected,
we refine the pixel coordinates and display
them on the images of checker board
"""
print(ret)
if ret == True:
objpoints.append(objp)
# refining pixel coordinates for given 2d points.
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(gray, CHECKERBOARD, corners2, ret)
cv2.imwrite(os.path.join(current_file_path, "calib_result",name), img)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
h,w= gray.shape[:2]
Omtx, roi= cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))
res = dict()
res["mtx"] = mtx
res["dist"] = dist
res["rvecs"] = rvecs
res["tvecs"] = tvecs
res["shape"] = gray.shape
res["omtx"] = Omtx
res["roi"] = roi
res["objpoints"] = objpoints
res["imgpoints"] = imgpoints
save_obj(res, out_name)
print("Camera matrix : \n")
print(mtx)
print("dist : \n")
print(dist)
print("rvecs : \n")
print(rvecs)
print("tvecs : \n")
print(tvecs)
return mtx, dist, rvecs, tvecs, Omtx, roi
def stereo_calib(criteria_stereo= (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)):
"""
"""
if not os.path.exists(os.path.join(current_file_path, "calib_result")):
os.mkdir(os.path.join(current_file_path, "calib_result"))
if not os.path.exists((os.path.join(current_file_path, "calib_result", "left_intrinsic_calib.pkl"))):
left_images = glob.glob(os.path.join(current_file_path, "images","left_*"))
intrinsic_calib(left_images, "left_intrinsic_calib")
if not os.path.exists((os.path.join(current_file_path, "calib_result", "right_intrinsic_calib.pkl"))):
right_images = glob.glob(os.path.join(current_file_path, "images", "right_*"))
intrinsic_calib(right_images, "right_intrinsic_calib")
right_calib = load_obj( "right_intrinsic_calib")
left_calib = load_obj( "left_intrinsic_calib")
img_shape = left_calib["shape"][::-1]
objptsL = left_calib["objpoints"]
objptsR = right_calib["objpoints"]
objpoints = objptsL
imgpointsL = left_calib["imgpoints"]
imgpointsR = right_calib["imgpoints"]
mtxL, mtxR = left_calib["mtx"] , right_calib["mtx"]
distL, distR = left_calib["dist"] , right_calib["dist"]
#print(criteria_stereo)
#print(img_shape)
#print( left_calib["shape"])
#print(objptsL)
#print(objptsR)
#print(objptsL== objptsR)
flags = 0
flags |= cv2.CALIB_FIX_INTRINSIC
#flags |= cv2.CALIB_FIX_PRINCIPAL_POINT
#flags |= cv2.CALIB_USE_INTRINSIC_GUESS
#flags |= cv2.CALIB_FIX_FOCAL_LENGTH
#flags |= cv2.CALIB_FIX_ASPECT_RATIO
#flags |= cv2.CALIB_ZERO_TANGENT_DIST
#flags |= cv2.CALIB_RATIONAL_MODEL
#flags |= cv2.CALIB_SAME_FOCAL_LENGTH
#flags |= cv2.CALIB_FIX_K3
#flags |= cv2.CALIB_FIX_K4
#flags |= cv2.CALIB_FIX_K5
retS, MLS, dLS, MRS, dRS, R, T, E, F= cv2.stereoCalibrate(objpoints,
imgpointsL,
imgpointsR,
mtxL,
distL,
mtxR,
distR,
img_shape,
criteria_stereo,
flags= flags)
# StereoRectify function
rectify_scale= 0 # if 0 image croped, if 1 image nor croped
RL, RR, PL, PR, Q, roiL, roiR= cv2.stereoRectify(MLS, dLS, MRS, dRS,
img_shape, R, T,
rectify_scale,(0,0)) # last paramater is alpha, if 0= croped, if 1= not croped
# initUndistortRectifyMap function
Left_Stereo_Map= cv2.initUndistortRectifyMap(MLS, dLS, RL, PL,
img_shape, cv2.CV_16SC2) # cv2.CV_16SC2 this format enables us the programme to work faster
Right_Stereo_Map= cv2.initUndistortRectifyMap(MRS, dRS, RR, PR,
img_shape, cv2.CV_16SC2)
#*******************************************
return Left_Stereo_Map, Right_Stereo_Map
if __name__=="__main__":
#left_images = glob.glob(os.path.join(current_file_path, "images","left_*"))
#right_images = glob.glob(os.path.join(current_file_path, "images", "right_*"))
#intrinsic_calib(left_images, "left_intrinsic_calib")
# intrinsic_calib(right_images, "right_intrinsic_calib")
stereo_calib() | [
"cv2.initUndistortRectifyMap",
"cv2.findChessboardCorners",
"pickle.dump",
"cv2.drawChessboardCorners",
"cv2.stereoRectify",
"cv2.stereoCalibrate",
"pickle.load",
"os.path.join",
"os.path.normpath",
"cv2.getOptimalNewCameraMatrix",
"numpy.zeros",
"cv2.calibrateCamera",
"os.path.abspath",
"... | [((148, 173), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (163, 173), False, 'import os\n'), ((923, 986), 'numpy.zeros', 'np.zeros', (['(1, CHECKERBOARD[0] * CHECKERBOARD[1], 3)', 'np.float32'], {}), '((1, CHECKERBOARD[0] * CHECKERBOARD[1], 3), np.float32)\n', (931, 986), True, 'import numpy as np\n'), ((2289, 2360), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objpoints', 'imgpoints', 'gray.shape[::-1]', 'None', 'None'], {}), '(objpoints, imgpoints, gray.shape[::-1], None, None)\n', (2308, 2360), False, 'import cv2\n'), ((2400, 2459), 'cv2.getOptimalNewCameraMatrix', 'cv2.getOptimalNewCameraMatrix', (['mtx', 'dist', '(w, h)', '(1)', '(w, h)'], {}), '(mtx, dist, (w, h), 1, (w, h))\n', (2429, 2459), False, 'import cv2\n'), ((4885, 5010), 'cv2.stereoCalibrate', 'cv2.stereoCalibrate', (['objpoints', 'imgpointsL', 'imgpointsR', 'mtxL', 'distL', 'mtxR', 'distR', 'img_shape', 'criteria_stereo'], {'flags': 'flags'}), '(objpoints, imgpointsL, imgpointsR, mtxL, distL, mtxR,\n distR, img_shape, criteria_stereo, flags=flags)\n', (4904, 5010), False, 'import cv2\n'), ((5677, 5754), 'cv2.stereoRectify', 'cv2.stereoRectify', (['MLS', 'dLS', 'MRS', 'dRS', 'img_shape', 'R', 'T', 'rectify_scale', '(0, 0)'], {}), '(MLS, dLS, MRS, dRS, img_shape, R, T, rectify_scale, (0, 0))\n', (5694, 5754), False, 'import cv2\n'), ((5976, 6046), 'cv2.initUndistortRectifyMap', 'cv2.initUndistortRectifyMap', (['MLS', 'dLS', 'RL', 'PL', 'img_shape', 'cv2.CV_16SC2'], {}), '(MLS, dLS, RL, PL, img_shape, cv2.CV_16SC2)\n', (6003, 6046), False, 'import cv2\n'), ((6187, 6257), 'cv2.initUndistortRectifyMap', 'cv2.initUndistortRectifyMap', (['MRS', 'dRS', 'RR', 'PR', 'img_shape', 'cv2.CV_16SC2'], {}), '(MRS, dRS, RR, PR, img_shape, cv2.CV_16SC2)\n', (6214, 6257), False, 'import cv2\n'), ((300, 319), 'pickle.dump', 'pickle.dump', (['obj', 'f'], {}), '(obj, f)\n', (311, 319), False, 'import pickle\n'), ((446, 460), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (457, 460), False, 'import pickle\n'), ((1422, 1473), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', 'CHECKERBOARD', 'None'], {}), '(gray, CHECKERBOARD, None)\n', (1447, 1473), False, 'import cv2\n'), ((217, 279), 'os.path.join', 'os.path.join', (['current_file_path', '"""calib_result"""', "(name + '.pkl')"], {}), "(current_file_path, 'calib_result', name + '.pkl')\n", (229, 279), False, 'import os\n'), ((355, 417), 'os.path.join', 'os.path.join', (['current_file_path', '"""calib_result"""', "(name + '.pkl')"], {}), "(current_file_path, 'calib_result', name + '.pkl')\n", (367, 417), False, 'import os\n'), ((1157, 1180), 'os.path.normpath', 'os.path.normpath', (['fname'], {}), '(fname)\n', (1173, 1180), False, 'import os\n'), ((1208, 1227), 'os.path.join', 'os.path.join', (['fname'], {}), '(fname)\n', (1220, 1227), False, 'import os\n'), ((1944, 2005), 'cv2.cornerSubPix', 'cv2.cornerSubPix', (['gray', 'corners', '(11, 11)', '(-1, -1)', 'criteria'], {}), '(gray, corners, (11, 11), (-1, -1), criteria)\n', (1960, 2005), False, 'import cv2\n'), ((2108, 2168), 'cv2.drawChessboardCorners', 'cv2.drawChessboardCorners', (['gray', 'CHECKERBOARD', 'corners2', 'ret'], {}), '(gray, CHECKERBOARD, corners2, ret)\n', (2133, 2168), False, 'import cv2\n'), ((3144, 3191), 'os.path.join', 'os.path.join', (['current_file_path', '"""calib_result"""'], {}), "(current_file_path, 'calib_result')\n", (3156, 3191), False, 'import os\n'), ((3211, 3258), 'os.path.join', 'os.path.join', (['current_file_path', '"""calib_result"""'], {}), "(current_file_path, 'calib_result')\n", (3223, 3258), False, 'import os\n'), ((3288, 3363), 'os.path.join', 'os.path.join', (['current_file_path', '"""calib_result"""', '"""left_intrinsic_calib.pkl"""'], {}), "(current_file_path, 'calib_result', 'left_intrinsic_calib.pkl')\n", (3300, 3363), False, 'import os\n'), ((3400, 3451), 'os.path.join', 'os.path.join', (['current_file_path', '"""images"""', '"""left_*"""'], {}), "(current_file_path, 'images', 'left_*')\n", (3412, 3451), False, 'import os\n'), ((3541, 3617), 'os.path.join', 'os.path.join', (['current_file_path', '"""calib_result"""', '"""right_intrinsic_calib.pkl"""'], {}), "(current_file_path, 'calib_result', 'right_intrinsic_calib.pkl')\n", (3553, 3617), False, 'import os\n'), ((3654, 3706), 'os.path.join', 'os.path.join', (['current_file_path', '"""images"""', '"""right_*"""'], {}), "(current_file_path, 'images', 'right_*')\n", (3666, 3706), False, 'import os\n'), ((2193, 2246), 'os.path.join', 'os.path.join', (['current_file_path', '"""calib_result"""', 'name'], {}), "(current_file_path, 'calib_result', name)\n", (2205, 2246), False, 'import os\n')] |
# Copyright (c) 2020, <NAME>, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of <NAME>, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <NAME>, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Script to export a PyTorch-based Pyrado policy to C++
"""
import numpy as np
import torch as to
from rcsenv import ControlPolicy
from pyrado.policies.feed_forward.linear import LinearPolicy
from pyrado.policies.recurrent.rnn import RNNPolicy
from pyrado.spaces.box import BoxSpace
from pyrado.utils.data_types import EnvSpec
from pyrado.policies.features import FeatureStack, squared_feat, identity_feat, const_feat
def create_nonrecurrent_policy():
return LinearPolicy(
EnvSpec(
BoxSpace(-1, 1, 4),
BoxSpace(-1, 1, 3),
),
FeatureStack([const_feat, identity_feat, squared_feat]),
)
def create_recurrent_policy():
return RNNPolicy(
EnvSpec(
BoxSpace(-1, 1, 4),
BoxSpace(-1, 1, 3),
),
hidden_size=32,
num_recurrent_layers=1,
hidden_nonlin="tanh",
)
if __name__ == "__main__":
tmpfile = "/tmp/torchscriptsaved.pt"
to.set_default_dtype(to.float32) # former double
# Create a Pyrado policy
model = create_nonrecurrent_policy()
# model = create_recurrent_policy()
# Trace the Pyrado policy (inherits from PyTorch module)
traced_script_module = model.script()
print(traced_script_module.graph)
# Save the scripted module
traced_script_module.save(tmpfile)
# Load in C++
cp = ControlPolicy("torch", tmpfile)
# Print more digits
to.set_printoptions(precision=8, linewidth=200)
np.set_printoptions(precision=8, linewidth=200)
print(f"manual: {model(to.tensor([1, 2, 3, 4], dtype=to.get_default_dtype()))}")
print(f"script: {traced_script_module(to.tensor([1, 2, 3, 4], dtype=to.get_default_dtype()))}")
print(f"cpp: {cp(np.array([1, 2, 3, 4]), 3)}")
| [
"torch.get_default_dtype",
"torch.set_printoptions",
"rcsenv.ControlPolicy",
"torch.set_default_dtype",
"numpy.array",
"pyrado.policies.features.FeatureStack",
"pyrado.spaces.box.BoxSpace",
"numpy.set_printoptions"
] | [((2690, 2722), 'torch.set_default_dtype', 'to.set_default_dtype', (['to.float32'], {}), '(to.float32)\n', (2710, 2722), True, 'import torch as to\n'), ((3092, 3123), 'rcsenv.ControlPolicy', 'ControlPolicy', (['"""torch"""', 'tmpfile'], {}), "('torch', tmpfile)\n", (3105, 3123), False, 'from rcsenv import ControlPolicy\n'), ((3153, 3200), 'torch.set_printoptions', 'to.set_printoptions', ([], {'precision': '(8)', 'linewidth': '(200)'}), '(precision=8, linewidth=200)\n', (3172, 3200), True, 'import torch as to\n'), ((3205, 3252), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(8)', 'linewidth': '(200)'}), '(precision=8, linewidth=200)\n', (3224, 3252), True, 'import numpy as np\n'), ((2314, 2369), 'pyrado.policies.features.FeatureStack', 'FeatureStack', (['[const_feat, identity_feat, squared_feat]'], {}), '([const_feat, identity_feat, squared_feat])\n', (2326, 2369), False, 'from pyrado.policies.features import FeatureStack, squared_feat, identity_feat, const_feat\n'), ((2243, 2261), 'pyrado.spaces.box.BoxSpace', 'BoxSpace', (['(-1)', '(1)', '(4)'], {}), '(-1, 1, 4)\n', (2251, 2261), False, 'from pyrado.spaces.box import BoxSpace\n'), ((2275, 2293), 'pyrado.spaces.box.BoxSpace', 'BoxSpace', (['(-1)', '(1)', '(3)'], {}), '(-1, 1, 3)\n', (2283, 2293), False, 'from pyrado.spaces.box import BoxSpace\n'), ((2461, 2479), 'pyrado.spaces.box.BoxSpace', 'BoxSpace', (['(-1)', '(1)', '(4)'], {}), '(-1, 1, 4)\n', (2469, 2479), False, 'from pyrado.spaces.box import BoxSpace\n'), ((2493, 2511), 'pyrado.spaces.box.BoxSpace', 'BoxSpace', (['(-1)', '(1)', '(3)'], {}), '(-1, 1, 3)\n', (2501, 2511), False, 'from pyrado.spaces.box import BoxSpace\n'), ((3463, 3485), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (3471, 3485), True, 'import numpy as np\n'), ((3311, 3333), 'torch.get_default_dtype', 'to.get_default_dtype', ([], {}), '()\n', (3331, 3333), True, 'import torch as to\n'), ((3411, 3433), 'torch.get_default_dtype', 'to.get_default_dtype', ([], {}), '()\n', (3431, 3433), True, 'import torch as to\n')] |
import numpy as np
import pandas as pd
from typing import List
from .phantom_class import Phantom
class Beam:
"""A class used to create an X-ray beam and detector.
Attributes
----------
r : np.array
5*3 array, locates the xyz coordinates of the apex and verticies of a
pyramid shaped X-ray beam, where the apex represents the X-ray focus
(row 1) and the vertices where the beam intercepts the X-ray detector
(row 2-5)
ijk : np.array
A matrix containing vertex indices. This is required in order to
plot the beam using plotly Mesh3D. For more info, see "i", "j", and "k"
at https://plot.ly/python/reference/#mesh3d
det_r: np.array
8*3 array, where each row locates the xyz coordinate of one of the 8
corners of the cuboid shaped X-ray detector
det_ijk : np.array
same as ijk, but for plotting the X-ray detector
N : np.array
4*3 array, where each row contains a normal vector to one of the four
faces of the beam.
Methods
-------
check_hit(patient)
Calculates which of the patient phantom's entrance skin cells are hit
by the X-ray beam. For 3D phantoms, skin cells on the beams exit path
are neglected.
"""
def __init__(self, data_norm: pd.DataFrame, event: int = 0,
plot_setup: bool = False) -> None:
"""Initialize the beam and detector for a specific irradiation event.
Parameters
----------
data_norm : pd.DataFrame
Dicom RDSR information from each irradiation event. See
rdsr_normalizer.py for more information.
event : int, optional
Specifies the index of the irradiation event in the procedure
(the default is 0, which is the first event).
plot_setup : bool, optional
If True, the beam angulation info from data_norm is neglected,
and a beam of zero angulation is created insted. This is a
debugging feature used when positioning new phantoms or
implementing currently unsupported venor RDSR files (the default is
False).
"""
# Override beam angulation if plot_setup
if plot_setup:
ap1 = ap2 = ap3 = 0
else:
# Fetch rotation angles of the X-ray tube
# Positioner isocenter primary angle (Ap1)
ap1 = np.deg2rad(data_norm.Ap1[event])
# Positioner isocenter secondary angle (Ap2)
ap2 = np.deg2rad(data_norm.Ap2[event])
# Positioner isocenter detector rotation angle (Ap3)
ap3 = np.deg2rad(data_norm.Ap3[event])
R1 = np.array([[+np.cos(ap1), np.sin(ap1), +0],
[-np.sin(ap1), +np.cos(ap1), +0],
[+0, +0, +1]])
R2 = np.array([[+1, +0, +0],
[+0, +np.cos(ap2), +np.sin(ap2)],
[+0, -np.sin(ap2), +np.cos(ap2)]])
R3 = np.array([[+np.cos(ap3), +0, -np.sin(ap3)],
[+0, +1, +0],
[+np.sin(ap3), +0, +np.cos(ap3)]])
# Locate X-ray source
source = np.array([0, data_norm.DSI[event], 0])
# Create beam-detector interception point for a beam of side length 1
r = np.array([[+0.5, -1.0, +0.5],
[+0.5, -1.0, -0.5],
[-0.5, -1.0, -0.5],
[-0.5, -1.0, +0.5]])
r[:, 0] *= data_norm.FS_long[event] # Longitudinal collimation
r[:, 1] *= data_norm.DID[event] # Set source-detector distance
r[:, 2] *= data_norm.FS_lat[event] # Lateral collimation
r = np.vstack([source, r])
# Rotate beam about ap1, ap2 and ap3
r = np.matmul(np.matmul(R2, R1).T, np.matmul(R3.T, r.T)).T
self.r = r
# Manually create vertex index vector for the X-ray beam
self.ijk = np.column_stack((
[0, 0, 0, 0, 1, 1],
[1, 1, 3, 3, 2, 3],
[2, 4, 2, 4, 3, 4]))
# Create unit vectors from X-ray source to beam verticies
v = ((self.r[1:] - self.r[0, :]).T /
np.linalg.norm(self.r[1:] - self.r[0, :], axis=1)).T
# Create the four normal vectors to the faces of the beam.
self.N = np.vstack([np.cross(v[0, :], v[1, :]),
np.cross(v[1, :], v[2, :]),
np.cross(v[2, :], v[3, :]),
np.cross(v[3, :], v[0, :])])
# Create detector corners for with side length 1
# The first four rows represent the X-ray detector surface, the last
# four are there to give the detector some depth for 3D visualization.
det_r = np.array([[+0.5, -1.0, +0.5],
[+0.5, -1.0, -0.5],
[-0.5, -1.0, -0.5],
[-0.5, -1.0, +0.5],
[+0.5, -1.2, +0.5],
[+0.5, -1.2, -0.5],
[-0.5, -1.2, -0.5],
[-0.5, -1.2, +0.5]])
# Add detector dimensions
detector_width = data_norm.DSL[0]
det_r[:, 0] *= detector_width
det_r[:, 2] *= detector_width
# Place detector at actual distance
det_r[:, 1] *= data_norm.DID[event]
# Rotate detector about ap1, ap2 and ap3
det_r = np.matmul(np.matmul(R2, R1).T, det_r.T).T
self.det_r = det_r
# Manually construct vertex index vector for the X-ray detector
self.det_ijk = np.column_stack((
[0, 0, 4, 4, 0, 1, 0, 3, 3, 7, 1, 1],
[1, 2, 5, 6, 1, 5, 3, 7, 2, 2, 2, 6],
[2, 3, 6, 7, 4, 4, 4, 4, 7, 6, 6, 5]))
def check_hit(self, patient: Phantom) -> List[bool]:
"""Calculate which patient entrance skin cells are hit by the beam.
A description of this algoritm is presented in the wiki, please visit
https://dev.azure.com/Sjukhusfysiker/PySkinDose/_wiki
Parameters
----------
patient : Phantom
Patient phantom, either of type plane, cylinder or human, i.e.
instance of class Phantom
Returns
-------
List[bool]
A boolean list of the same length as the number of patient skin
cells. True for all entrance skin cells that are hit by the beam.
"""
# Create vectors from X-ray source to each phantom skin cell
v = patient.r - self.r[0, :]
# Check which skin cells lies within the beam
hits = (np.dot(v, self.N.T) <= 0).all(axis=1)
# if patient phantom is 3D, remove exit path skin cells
if patient.phantom_model != "plane":
temp1 = v[hits]
temp2 = patient.n[hits]
bool_entrance = [np.dot(temp1[i], temp2[i]) <= 0
for i in range(len(temp1))]
hits[np.where(hits)] = bool_entrance
return hits.tolist()
| [
"numpy.cross",
"numpy.where",
"numpy.column_stack",
"numpy.array",
"numpy.deg2rad",
"numpy.dot",
"numpy.matmul",
"numpy.vstack",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin"
] | [((3201, 3239), 'numpy.array', 'np.array', (['[0, data_norm.DSI[event], 0]'], {}), '([0, data_norm.DSI[event], 0])\n', (3209, 3239), True, 'import numpy as np\n'), ((3331, 3425), 'numpy.array', 'np.array', (['[[+0.5, -1.0, +0.5], [+0.5, -1.0, -0.5], [-0.5, -1.0, -0.5], [-0.5, -1.0, +0.5]\n ]'], {}), '([[+0.5, -1.0, +0.5], [+0.5, -1.0, -0.5], [-0.5, -1.0, -0.5], [-0.5,\n -1.0, +0.5]])\n', (3339, 3425), True, 'import numpy as np\n'), ((3712, 3734), 'numpy.vstack', 'np.vstack', (['[source, r]'], {}), '([source, r])\n', (3721, 3734), True, 'import numpy as np\n'), ((3953, 4030), 'numpy.column_stack', 'np.column_stack', (['([0, 0, 0, 0, 1, 1], [1, 1, 3, 3, 2, 3], [2, 4, 2, 4, 3, 4])'], {}), '(([0, 0, 0, 0, 1, 1], [1, 1, 3, 3, 2, 3], [2, 4, 2, 4, 3, 4]))\n', (3968, 4030), True, 'import numpy as np\n'), ((4769, 4947), 'numpy.array', 'np.array', (['[[+0.5, -1.0, +0.5], [+0.5, -1.0, -0.5], [-0.5, -1.0, -0.5], [-0.5, -1.0, +\n 0.5], [+0.5, -1.2, +0.5], [+0.5, -1.2, -0.5], [-0.5, -1.2, -0.5], [-0.5,\n -1.2, +0.5]]'], {}), '([[+0.5, -1.0, +0.5], [+0.5, -1.0, -0.5], [-0.5, -1.0, -0.5], [-0.5,\n -1.0, +0.5], [+0.5, -1.2, +0.5], [+0.5, -1.2, -0.5], [-0.5, -1.2, -0.5],\n [-0.5, -1.2, +0.5]])\n', (4777, 4947), True, 'import numpy as np\n'), ((5594, 5729), 'numpy.column_stack', 'np.column_stack', (['([0, 0, 4, 4, 0, 1, 0, 3, 3, 7, 1, 1], [1, 2, 5, 6, 1, 5, 3, 7, 2, 2, 2, 6],\n [2, 3, 6, 7, 4, 4, 4, 4, 7, 6, 6, 5])'], {}), '(([0, 0, 4, 4, 0, 1, 0, 3, 3, 7, 1, 1], [1, 2, 5, 6, 1, 5, 3,\n 7, 2, 2, 2, 6], [2, 3, 6, 7, 4, 4, 4, 4, 7, 6, 6, 5]))\n', (5609, 5729), True, 'import numpy as np\n'), ((2440, 2472), 'numpy.deg2rad', 'np.deg2rad', (['data_norm.Ap1[event]'], {}), '(data_norm.Ap1[event])\n', (2450, 2472), True, 'import numpy as np\n'), ((2548, 2580), 'numpy.deg2rad', 'np.deg2rad', (['data_norm.Ap2[event]'], {}), '(data_norm.Ap2[event])\n', (2558, 2580), True, 'import numpy as np\n'), ((2664, 2696), 'numpy.deg2rad', 'np.deg2rad', (['data_norm.Ap3[event]'], {}), '(data_norm.Ap3[event])\n', (2674, 2696), True, 'import numpy as np\n'), ((3824, 3844), 'numpy.matmul', 'np.matmul', (['R3.T', 'r.T'], {}), '(R3.T, r.T)\n', (3833, 3844), True, 'import numpy as np\n'), ((4193, 4242), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.r[1:] - self.r[0, :])'], {'axis': '(1)'}), '(self.r[1:] - self.r[0, :], axis=1)\n', (4207, 4242), True, 'import numpy as np\n'), ((4342, 4368), 'numpy.cross', 'np.cross', (['v[0, :]', 'v[1, :]'], {}), '(v[0, :], v[1, :])\n', (4350, 4368), True, 'import numpy as np\n'), ((4398, 4424), 'numpy.cross', 'np.cross', (['v[1, :]', 'v[2, :]'], {}), '(v[1, :], v[2, :])\n', (4406, 4424), True, 'import numpy as np\n'), ((4454, 4480), 'numpy.cross', 'np.cross', (['v[2, :]', 'v[3, :]'], {}), '(v[2, :], v[3, :])\n', (4462, 4480), True, 'import numpy as np\n'), ((4510, 4536), 'numpy.cross', 'np.cross', (['v[3, :]', 'v[0, :]'], {}), '(v[3, :], v[0, :])\n', (4518, 4536), True, 'import numpy as np\n'), ((6960, 6974), 'numpy.where', 'np.where', (['hits'], {}), '(hits)\n', (6968, 6974), True, 'import numpy as np\n'), ((2736, 2747), 'numpy.sin', 'np.sin', (['ap1'], {}), '(ap1)\n', (2742, 2747), True, 'import numpy as np\n'), ((3803, 3820), 'numpy.matmul', 'np.matmul', (['R2', 'R1'], {}), '(R2, R1)\n', (3812, 3820), True, 'import numpy as np\n'), ((5439, 5456), 'numpy.matmul', 'np.matmul', (['R2', 'R1'], {}), '(R2, R1)\n', (5448, 5456), True, 'import numpy as np\n'), ((6612, 6631), 'numpy.dot', 'np.dot', (['v', 'self.N.T'], {}), '(v, self.N.T)\n', (6618, 6631), True, 'import numpy as np\n'), ((6853, 6879), 'numpy.dot', 'np.dot', (['temp1[i]', 'temp2[i]'], {}), '(temp1[i], temp2[i])\n', (6859, 6879), True, 'import numpy as np\n'), ((2723, 2734), 'numpy.cos', 'np.cos', (['ap1'], {}), '(ap1)\n', (2729, 2734), True, 'import numpy as np\n'), ((2778, 2789), 'numpy.sin', 'np.sin', (['ap1'], {}), '(ap1)\n', (2784, 2789), True, 'import numpy as np\n'), ((2792, 2803), 'numpy.cos', 'np.cos', (['ap1'], {}), '(ap1)\n', (2798, 2803), True, 'import numpy as np\n'), ((2914, 2925), 'numpy.cos', 'np.cos', (['ap2'], {}), '(ap2)\n', (2920, 2925), True, 'import numpy as np\n'), ((2928, 2939), 'numpy.sin', 'np.sin', (['ap2'], {}), '(ap2)\n', (2934, 2939), True, 'import numpy as np\n'), ((2971, 2982), 'numpy.sin', 'np.sin', (['ap2'], {}), '(ap2)\n', (2977, 2982), True, 'import numpy as np\n'), ((2985, 2996), 'numpy.cos', 'np.cos', (['ap2'], {}), '(ap2)\n', (2991, 2996), True, 'import numpy as np\n'), ((3026, 3037), 'numpy.cos', 'np.cos', (['ap3'], {}), '(ap3)\n', (3032, 3037), True, 'import numpy as np\n'), ((3044, 3055), 'numpy.sin', 'np.sin', (['ap3'], {}), '(ap3)\n', (3050, 3055), True, 'import numpy as np\n'), ((3120, 3131), 'numpy.sin', 'np.sin', (['ap3'], {}), '(ap3)\n', (3126, 3131), True, 'import numpy as np\n'), ((3138, 3149), 'numpy.cos', 'np.cos', (['ap3'], {}), '(ap3)\n', (3144, 3149), True, 'import numpy as np\n')] |
import numpy as np
from numpy.linalg import norm
from data_manager import read_data
import time
import tqdm
import matplotlib.pyplot as plt
from conjugate_gradient import conjugate_gradient
import random
A, b = read_data('data/ML-CUP19-TR.csv')
m, n = A.shape
# Our solution
start = time.perf_counter_ns()
x, status, ite = conjugate_gradient(A, b)
done = time.perf_counter_ns()
elapsed = done - start
print("our implementation: ns spent: ", elapsed)
print("status: ", status)
print("iterations: ", ite)
print("||Ax - b|| = ", norm(np.matmul(A, x) - b))
print("||Ax - b||/||b|| =", np.divide(norm(np.matmul(A, x) - b), norm(b)))
print("||A*Ax - A*b|| =", norm(np.matmul(np.transpose(A),np.matmul(A, x)) - np.matmul(np.transpose(A),b)))
# Library Least Squares solution
start = time.perf_counter_ns()
xnp = np.linalg.lstsq(A, b, rcond=None)
done = time.perf_counter_ns()
elapsed = done - start
print("numpy.linalg.qr: ns spent: ", elapsed)
print("||Ax - b|| =", norm(np.matmul(A, xnp[0]) - b))
print("||Ax - b||/||b|| =", np.divide(norm(np.matmul(A, xnp[0]) - b), norm(b)))
print("||A*Ax - A*b|| =", norm(np.matmul(np.transpose(A),np.matmul(A, x)) - np.matmul(np.transpose(A),b)))
# Execution time over 1000 tries
cg_tries = []
for count in tqdm.tqdm(range(1000)):
start = time.perf_counter_ns()
x, _, _ = conjugate_gradient(A, b)
done = time.perf_counter_ns()
elapsed = done - start
cg_tries.append(elapsed)
cg_tries = np.array(cg_tries)
cg_tries.sort()
cg_tries = cg_tries[:-100]
cg_tries = cg_tries[100:]
print("Mean elapsed time over 1000 tries: ", cg_tries.mean())
# How CG converges with different eps
acc = []
for exponent in range(-11, 0):
acc.append(10**exponent)
acc.reverse()
A_ = np.random.rand(m, n)
b_ = np.random.rand(m)
r_min = A_.min()
r_max = A_.max()
b_min = b_.min()
b_max = b_.max()
A_ = (((A_ - r_min) / (r_max - r_min)) * (A.max() - A.min())) + A.min()
b_ = (((b_ - b_min) / (b_max - b_min)) * (b.max() - b.min())) + b.min()
iterations = []
iterations_ = []
for eps in acc:
x, _, ite = conjugate_gradient(A, b, eps = eps, maxIter=1000000)
x_, _, ite_ = conjugate_gradient(A_, b_, eps = eps, maxIter=1000000)
iterations.append(ite)
iterations_.append(ite_)
# Creating plot
print("Creating plot...")
plt.plot(iterations, acc)
plt.xlabel("Number of iterations")
plt.ylabel("Accuracy")
plt.yscale('log',basey=10)
plt.savefig("results/cg_accuracy.png")
plt.show()
plt.plot(iterations_, acc)
plt.xlabel("Number of iterations")
plt.ylabel("Accuracy")
plt.yscale('log',basey=10)
plt.savefig("results/cg_accuracy_rand.png")
plt.show()
# Different initial starting points
xcg, _, _ = conjugate_gradient(A, b, eps = 1.e-11)
xzeros = np.zeros(A.shape[1])
xrand = [x+random.randint(-5,5) for x in xnp[0]]
xrand1 = [x*random.randint(-10,10) for x in xnp[0]]
xrand2 = [random.uniform(xcg.min(), xcg.max()) for _ in range(A.shape[1])]
xrand3 = [random.uniform(xcg.min()*-10, xcg.max()*10) for _ in range(A.shape[1])]
xrand4 = [random.uniform(xcg.min()*-20, xcg.max()*20) for _ in range(A.shape[1])]
xrand5 = [random.uniform(xcg.min()*-30, xcg.max()*30) for _ in range(A.shape[1])]
x0s = [xnp[0], xcg, xrand, xzeros, xrand1, xrand2]
norms = []
iterations = []
for x0 in x0s:
#print("Starting point: ", x0)
diff = norm(xnp[0]-x0)
norms.append(diff)
print("||x - x0||", diff)
x, _, ite = conjugate_gradient(A, b, x0, eps = 1.e-11, maxIter=1000000)
iterations.append(ite)
print("iterations: ", ite)
norms, iterations = zip(*sorted(zip(norms, iterations)))
norms, iterations = (list(t) for t in zip(*sorted(zip(norms, iterations))))
# Creating plot
print("Creating plot...")
plt.plot(norms, iterations)
i = norms.index(norm(xnp[0]-np.zeros(A.shape[1])))
plt.plot(norms[i], iterations[i], 'r*')
plt.xlabel("||x-x0||")
plt.ylabel("Iterations")
plt.savefig("results/cg_x0_ite.png")
plt.show()
| [
"numpy.transpose",
"matplotlib.pyplot.savefig",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.yscale",
"numpy.zeros",
"numpy.matmul",
"numpy.linalg.lstsq",
"numpy.linalg.norm",
"conjugate_gradient.conj... | [((212, 245), 'data_manager.read_data', 'read_data', (['"""data/ML-CUP19-TR.csv"""'], {}), "('data/ML-CUP19-TR.csv')\n", (221, 245), False, 'from data_manager import read_data\n'), ((285, 307), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (305, 307), False, 'import time\n'), ((325, 349), 'conjugate_gradient.conjugate_gradient', 'conjugate_gradient', (['A', 'b'], {}), '(A, b)\n', (343, 349), False, 'from conjugate_gradient import conjugate_gradient\n'), ((357, 379), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (377, 379), False, 'import time\n'), ((779, 801), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (799, 801), False, 'import time\n'), ((808, 841), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'b'], {'rcond': 'None'}), '(A, b, rcond=None)\n', (823, 841), True, 'import numpy as np\n'), ((849, 871), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (869, 871), False, 'import time\n'), ((1442, 1460), 'numpy.array', 'np.array', (['cg_tries'], {}), '(cg_tries)\n', (1450, 1460), True, 'import numpy as np\n'), ((1721, 1741), 'numpy.random.rand', 'np.random.rand', (['m', 'n'], {}), '(m, n)\n', (1735, 1741), True, 'import numpy as np\n'), ((1747, 1764), 'numpy.random.rand', 'np.random.rand', (['m'], {}), '(m)\n', (1761, 1764), True, 'import numpy as np\n'), ((2271, 2296), 'matplotlib.pyplot.plot', 'plt.plot', (['iterations', 'acc'], {}), '(iterations, acc)\n', (2279, 2296), True, 'import matplotlib.pyplot as plt\n'), ((2297, 2331), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of iterations"""'], {}), "('Number of iterations')\n", (2307, 2331), True, 'import matplotlib.pyplot as plt\n'), ((2332, 2354), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (2342, 2354), True, 'import matplotlib.pyplot as plt\n'), ((2355, 2382), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {'basey': '(10)'}), "('log', basey=10)\n", (2365, 2382), True, 'import matplotlib.pyplot as plt\n'), ((2383, 2421), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""results/cg_accuracy.png"""'], {}), "('results/cg_accuracy.png')\n", (2394, 2421), True, 'import matplotlib.pyplot as plt\n'), ((2422, 2432), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2430, 2432), True, 'import matplotlib.pyplot as plt\n'), ((2433, 2459), 'matplotlib.pyplot.plot', 'plt.plot', (['iterations_', 'acc'], {}), '(iterations_, acc)\n', (2441, 2459), True, 'import matplotlib.pyplot as plt\n'), ((2460, 2494), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of iterations"""'], {}), "('Number of iterations')\n", (2470, 2494), True, 'import matplotlib.pyplot as plt\n'), ((2495, 2517), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (2505, 2517), True, 'import matplotlib.pyplot as plt\n'), ((2518, 2545), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {'basey': '(10)'}), "('log', basey=10)\n", (2528, 2545), True, 'import matplotlib.pyplot as plt\n'), ((2546, 2589), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""results/cg_accuracy_rand.png"""'], {}), "('results/cg_accuracy_rand.png')\n", (2557, 2589), True, 'import matplotlib.pyplot as plt\n'), ((2590, 2600), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2598, 2600), True, 'import matplotlib.pyplot as plt\n'), ((2650, 2685), 'conjugate_gradient.conjugate_gradient', 'conjugate_gradient', (['A', 'b'], {'eps': '(1e-11)'}), '(A, b, eps=1e-11)\n', (2668, 2685), False, 'from conjugate_gradient import conjugate_gradient\n'), ((2698, 2718), 'numpy.zeros', 'np.zeros', (['A.shape[1]'], {}), '(A.shape[1])\n', (2706, 2718), True, 'import numpy as np\n'), ((3689, 3716), 'matplotlib.pyplot.plot', 'plt.plot', (['norms', 'iterations'], {}), '(norms, iterations)\n', (3697, 3716), True, 'import matplotlib.pyplot as plt\n'), ((3768, 3807), 'matplotlib.pyplot.plot', 'plt.plot', (['norms[i]', 'iterations[i]', '"""r*"""'], {}), "(norms[i], iterations[i], 'r*')\n", (3776, 3807), True, 'import matplotlib.pyplot as plt\n'), ((3808, 3830), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""||x-x0||"""'], {}), "('||x-x0||')\n", (3818, 3830), True, 'import matplotlib.pyplot as plt\n'), ((3831, 3855), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Iterations"""'], {}), "('Iterations')\n", (3841, 3855), True, 'import matplotlib.pyplot as plt\n'), ((3856, 3892), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""results/cg_x0_ite.png"""'], {}), "('results/cg_x0_ite.png')\n", (3867, 3892), True, 'import matplotlib.pyplot as plt\n'), ((3893, 3903), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3901, 3903), True, 'import matplotlib.pyplot as plt\n'), ((1279, 1301), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (1299, 1301), False, 'import time\n'), ((1316, 1340), 'conjugate_gradient.conjugate_gradient', 'conjugate_gradient', (['A', 'b'], {}), '(A, b)\n', (1334, 1340), False, 'from conjugate_gradient import conjugate_gradient\n'), ((1352, 1374), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (1372, 1374), False, 'import time\n'), ((2042, 2092), 'conjugate_gradient.conjugate_gradient', 'conjugate_gradient', (['A', 'b'], {'eps': 'eps', 'maxIter': '(1000000)'}), '(A, b, eps=eps, maxIter=1000000)\n', (2060, 2092), False, 'from conjugate_gradient import conjugate_gradient\n'), ((2113, 2165), 'conjugate_gradient.conjugate_gradient', 'conjugate_gradient', (['A_', 'b_'], {'eps': 'eps', 'maxIter': '(1000000)'}), '(A_, b_, eps=eps, maxIter=1000000)\n', (2131, 2165), False, 'from conjugate_gradient import conjugate_gradient\n'), ((3306, 3323), 'numpy.linalg.norm', 'norm', (['(xnp[0] - x0)'], {}), '(xnp[0] - x0)\n', (3310, 3323), False, 'from numpy.linalg import norm\n'), ((3391, 3447), 'conjugate_gradient.conjugate_gradient', 'conjugate_gradient', (['A', 'b', 'x0'], {'eps': '(1e-11)', 'maxIter': '(1000000)'}), '(A, b, x0, eps=1e-11, maxIter=1000000)\n', (3409, 3447), False, 'from conjugate_gradient import conjugate_gradient\n'), ((620, 627), 'numpy.linalg.norm', 'norm', (['b'], {}), '(b)\n', (624, 627), False, 'from numpy.linalg import norm\n'), ((1065, 1072), 'numpy.linalg.norm', 'norm', (['b'], {}), '(b)\n', (1069, 1072), False, 'from numpy.linalg import norm\n'), ((2752, 2773), 'random.randint', 'random.randint', (['(-5)', '(5)'], {}), '(-5, 5)\n', (2766, 2773), False, 'import random\n'), ((2805, 2828), 'random.randint', 'random.randint', (['(-10)', '(10)'], {}), '(-10, 10)\n', (2819, 2828), False, 'import random\n'), ((533, 548), 'numpy.matmul', 'np.matmul', (['A', 'x'], {}), '(A, x)\n', (542, 548), True, 'import numpy as np\n'), ((968, 988), 'numpy.matmul', 'np.matmul', (['A', 'xnp[0]'], {}), '(A, xnp[0])\n', (977, 988), True, 'import numpy as np\n'), ((3745, 3765), 'numpy.zeros', 'np.zeros', (['A.shape[1]'], {}), '(A.shape[1])\n', (3753, 3765), True, 'import numpy as np\n'), ((598, 613), 'numpy.matmul', 'np.matmul', (['A', 'x'], {}), '(A, x)\n', (607, 613), True, 'import numpy as np\n'), ((671, 686), 'numpy.transpose', 'np.transpose', (['A'], {}), '(A)\n', (683, 686), True, 'import numpy as np\n'), ((687, 702), 'numpy.matmul', 'np.matmul', (['A', 'x'], {}), '(A, x)\n', (696, 702), True, 'import numpy as np\n'), ((716, 731), 'numpy.transpose', 'np.transpose', (['A'], {}), '(A)\n', (728, 731), True, 'import numpy as np\n'), ((1038, 1058), 'numpy.matmul', 'np.matmul', (['A', 'xnp[0]'], {}), '(A, xnp[0])\n', (1047, 1058), True, 'import numpy as np\n'), ((1116, 1131), 'numpy.transpose', 'np.transpose', (['A'], {}), '(A)\n', (1128, 1131), True, 'import numpy as np\n'), ((1132, 1147), 'numpy.matmul', 'np.matmul', (['A', 'x'], {}), '(A, x)\n', (1141, 1147), True, 'import numpy as np\n'), ((1161, 1176), 'numpy.transpose', 'np.transpose', (['A'], {}), '(A)\n', (1173, 1176), True, 'import numpy as np\n')] |
# This file is part of wallriori
# (c) <NAME>
# The code is released under the MIT Licence.
# See LICENCE.txt and the Legal section in the README for more information
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from functools import partial
from ..lawsofthewall import Spalding
__all__ = ["WallModel", "LOTWWallModel", "IntegratedLOTWWallModel", "LSQRWallModel"]
class WallModel:
def __init__(self, h, nu):
self.h = h
self.nu = nu
@property
def h(self):
return self.__h
@property
def nu(self):
return self.__nu
@h.setter
def h(self, val):
self.__h = val
@nu.setter
def nu(self, val):
self.__nu = val
class LOTWWallModel(WallModel):
def __init__(self, h, nu, law, rootFinder):
WallModel.__init__(self, h, nu)
self.law = law
self.rootFinder = rootFinder
@property
def law(self):
return self.__law
@property
def rootFinder(self):
return self.__rootFinder
@law.setter
def law(self, value):
self.__law = value
@rootFinder.setter
def rootFinder(self, value):
self.__rootFinder = value
def nut(self, guess, sampledU, wallGradU):
magGradU = np.abs(wallGradU)
uTau = self.utau(guess, sampledU)
return np.max([0.0, uTau**2/magGradU - self.nu])
def utau(self, guess, sampledU):
f = partial(self.law.value, sampledU, self.h, self.nu)
d = partial(self.law.derivative, sampledU, self.h, self.nu)
self.rootFinder.f = f
self.rootFinder.d = d
return np.max([0, self.rootFinder.solve(guess)])
class IntegratedLOTWWallModel(WallModel):
def __init__(self, h1, h2, nu, law, rootFinder):
WallModel.__init__(self, (h1 + h2)/2, nu)
self.h1 = h1
self.h2 = h2
self.law = law
self.rootFinder = rootFinder
@property
def h1(self):
return self.__h1
@property
def h2(self):
return self.__h2
@property
def law(self):
return self.__law
@property
def rootFinder(self):
return self.__rootFinder
@h1.setter
def h1(self, value):
self.__h1 = value
@h2.setter
def h2(self, value):
self.__h2 = value
@law.setter
def law(self, value):
self.__law = value
@rootFinder.setter
def rootFinder(self, value):
self.__rootFinder = value
def nut(self, guess, sampledU, wallGradU):
magGradU = np.abs(wallGradU)
uTau = self.utau(guess, sampledU)
return np.max([0.0, uTau**2/magGradU - self.nu])
def utau(self, guess, sampledU):
f = partial(self.law.value, sampledU, self.h1, self.h2, self.nu)
d = partial(self.law.derivative, sampledU, self.h1, self.h2, self.nu)
self.rootFinder.f = f
self.rootFinder.d = d
return np.max([0, self.rootFinder.solve(guess)])
class LSQRWallModel(WallModel):
def __init__(self, h, nu, rootFinder):
"""
Model with dynamic kappa and B coefficients for the log-law.
Parameters
----------
h : ndarray
Array of wall-normal distances, where the velocity values
are sampled.
nu : float
Kinematic viscosity.
"""
WallModel.__init__(self, h, nu)
self.rootFinder = rootFinder
@property
def rootFinder(self):
return self.__rootFinder
@rootFinder.setter
def rootFinder(self, value):
self.__rootFinder = value
def nut(self, guess, sampledU, wallGradU):
"""
Compute the nut needed to enforce the correct shear stress.
Parameters
----------
guess : float
Initial guess for the friction velocity.
sampledU : 1d array
Sampled velocity values.
wallGradU
The wall-normal velocity gradient.
Returns
-------
float
The value of turbulent viscosity
"""
magGradU = np.abs(wallGradU)
uTau = self.utau(guess, sampledU)
return np.max([0.0, uTau**2/magGradU - self.nu])
def kappa_and_b(self, uTau, sampledU):
"""
Compute kappa and B using the formula in the paper.
Parameters
----------
uTau : float
A guess for the friction velocity
sampledU : 1d array
The sampled values of velocity
Returns
-------
(scalar, scalar)
The values of kappa and b
"""
from numpy import sum, log
# u* and y* in the paper
u = sampledU/uTau
y = self.h*uTau/self.nu
n = self.h.size
kappaNom = n*sum(log(y)**2) - sum(log(y))**2
kappaDenom = n*sum(u*log(y)) - sum(u)*sum(log(y))
kappa = kappaNom/(kappaDenom + 1e-12)
b = 1/n*np.sum(u - 1/kappa*np.log(y))
return kappa, b
def kappa_and_b_builtin(self, uTau, sampledU):
"""
Compute kappa and B using np.polyfit.
Can be used to check the manual implementation.
Parameters
----------
uTau : float
A guess for the friction velocity
sampledU : 1d array
The sampled values of velocity
Returns
-------
(scalar, scalar)
The values of kappa and b
"""
# u* and y* in the paper
u = sampledU/uTau
y = self.h*uTau/self.nu
kappaInv, b = np.polyfit(np.log(y), u, deg=1)
return 1/kappaInv, b
def utau_iteration(self, uTau, sampledU, index):
"""
Perform one iteration of computing the friction velocity.
Parameters
----------
uTau : float
A guess for the friction velocity
sampledU : 1d array
The sampled values of velocity
index : int
Index of the velocity and y value to use in Spalding's law.
Returns
-------
"""
kappa, b = self.kappa_and_b(uTau, sampledU)
law = Spalding(kappa, b)
f = partial(law.value, sampledU[index], self.h[index], self.nu)
d = partial(law.derivative, sampledU[index], self.h[index], self.nu)
self.rootFinder.f = f
self.rootFinder.d = d
return np.max([0, self.rootFinder.solve(uTau)])
def utau(self, guess, sampledU, index, nIter, eps=1, verbose=True):
"""
Compute the friction velocity.
Parameters
----------
guess : float
Initial guess for the friction velocity
sampledU : 1d array
The sampled velocity values
index : int
The index of the velocity and y value to use in Spalding's law.
nIter : int
The amount of iterations to compute the friction velocity.
eps : float
Under-relaxation factor, defaults 1, i.e. no under-relaxation.
verbose : bool
Wether to print results from each iteration, defaults to True
Returns
-------
float
The friction velocity.
"""
uTau = guess
for i in range(nIter):
uTauNew = self.utau_iteration(uTau, sampledU, index)
uTau = eps*uTauNew + (1- eps)*uTau
if verbose:
print("Iteration", i, "uTau", uTau)
return np.max([0, self.rootFinder.solve(guess)])
| [
"numpy.abs",
"numpy.log",
"numpy.max",
"numpy.sum",
"functools.partial"
] | [((1323, 1340), 'numpy.abs', 'np.abs', (['wallGradU'], {}), '(wallGradU)\n', (1329, 1340), True, 'import numpy as np\n'), ((1398, 1443), 'numpy.max', 'np.max', (['[0.0, uTau ** 2 / magGradU - self.nu]'], {}), '([0.0, uTau ** 2 / magGradU - self.nu])\n', (1404, 1443), True, 'import numpy as np\n'), ((1490, 1540), 'functools.partial', 'partial', (['self.law.value', 'sampledU', 'self.h', 'self.nu'], {}), '(self.law.value, sampledU, self.h, self.nu)\n', (1497, 1540), False, 'from functools import partial\n'), ((1554, 1609), 'functools.partial', 'partial', (['self.law.derivative', 'sampledU', 'self.h', 'self.nu'], {}), '(self.law.derivative, sampledU, self.h, self.nu)\n', (1561, 1609), False, 'from functools import partial\n'), ((2592, 2609), 'numpy.abs', 'np.abs', (['wallGradU'], {}), '(wallGradU)\n', (2598, 2609), True, 'import numpy as np\n'), ((2667, 2712), 'numpy.max', 'np.max', (['[0.0, uTau ** 2 / magGradU - self.nu]'], {}), '([0.0, uTau ** 2 / magGradU - self.nu])\n', (2673, 2712), True, 'import numpy as np\n'), ((2759, 2819), 'functools.partial', 'partial', (['self.law.value', 'sampledU', 'self.h1', 'self.h2', 'self.nu'], {}), '(self.law.value, sampledU, self.h1, self.h2, self.nu)\n', (2766, 2819), False, 'from functools import partial\n'), ((2833, 2898), 'functools.partial', 'partial', (['self.law.derivative', 'sampledU', 'self.h1', 'self.h2', 'self.nu'], {}), '(self.law.derivative, sampledU, self.h1, self.h2, self.nu)\n', (2840, 2898), False, 'from functools import partial\n'), ((4132, 4149), 'numpy.abs', 'np.abs', (['wallGradU'], {}), '(wallGradU)\n', (4138, 4149), True, 'import numpy as np\n'), ((4207, 4252), 'numpy.max', 'np.max', (['[0.0, uTau ** 2 / magGradU - self.nu]'], {}), '([0.0, uTau ** 2 / magGradU - self.nu])\n', (4213, 4252), True, 'import numpy as np\n'), ((6209, 6268), 'functools.partial', 'partial', (['law.value', 'sampledU[index]', 'self.h[index]', 'self.nu'], {}), '(law.value, sampledU[index], self.h[index], self.nu)\n', (6216, 6268), False, 'from functools import partial\n'), ((6282, 6346), 'functools.partial', 'partial', (['law.derivative', 'sampledU[index]', 'self.h[index]', 'self.nu'], {}), '(law.derivative, sampledU[index], self.h[index], self.nu)\n', (6289, 6346), False, 'from functools import partial\n'), ((5612, 5621), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (5618, 5621), True, 'import numpy as np\n'), ((4897, 4903), 'numpy.sum', 'sum', (['u'], {}), '(u)\n', (4900, 4903), False, 'from numpy import sum, log\n'), ((4847, 4853), 'numpy.log', 'log', (['y'], {}), '(y)\n', (4850, 4853), False, 'from numpy import sum, log\n'), ((4908, 4914), 'numpy.log', 'log', (['y'], {}), '(y)\n', (4911, 4914), False, 'from numpy import sum, log\n'), ((4830, 4836), 'numpy.log', 'log', (['y'], {}), '(y)\n', (4833, 4836), False, 'from numpy import sum, log\n'), ((4887, 4893), 'numpy.log', 'log', (['y'], {}), '(y)\n', (4890, 4893), False, 'from numpy import sum, log\n'), ((4998, 5007), 'numpy.log', 'np.log', (['y'], {}), '(y)\n', (5004, 5007), True, 'import numpy as np\n')] |
import numpy as np
from models.datastructures import BoundaryType
def WaveEquation1D(grid, x0, boundary_cond, c, sigma0, num_reflections=4):
""" Analytical solution with Dirichlet or Neumann boundaries
num_reflections: number of reflections in the solution
"""
assert(boundary_cond == BoundaryType.DIRICHLET or boundary_cond == BoundaryType.NEUMANN)
amp_sign = 1
x_mesh = grid[0]
t_mesh = grid[1]
xmin = np.min(x_mesh)
xmax = np.max(x_mesh)
L = ( xmax - xmin )
# 1D wave function definition: x0 is an array containing 2 elements for positive and negative travelling waves, respectively
pfunc = lambda x,t,x0: 0.5*np.exp(-0.5*(np.array((x-x0[0] - c*t))/sigma0)**2) + \
0.5*np.exp(-0.5*(np.array((x-x0[1] + c*t))/sigma0)**2)
# initial wave solution (no reflections)
p = pfunc(x_mesh,t_mesh,[x0,x0])
if num_reflections <= 0:
return p
# calculate starting positions for reflected waves for superposition
x0_rel = (x0 - xmin) / L # relative position
for i in range(num_reflections):
if np.mod(i,2) == 0:
x0_min = xmin - i*L - L*x0_rel # x0 for positive travelling wave
x0_max = xmax + (i+1)*L - L*x0_rel # x0 for negative travelling wave
else:
x0_min = xmin - i*L - (L - L*x0_rel) # x0 for positive travelling wave
x0_max = xmax + i*L + L*x0_rel # x0 for negative travelling wave
if boundary_cond == BoundaryType.DIRICHLET:
amp_sign = -1*amp_sign
p = p + amp_sign*pfunc(x_mesh,t_mesh,[x0_min,x0_max])
return p
def WaveEquation2D(grid, X0, boundary_cond, c, sigma0, num_reflections=4):
""" Analytical solution with Dirichlet or Neumann boundaries
num_reflections: number of reflections in the solution
"""
assert(boundary_cond == BoundaryType.DIRICHLET or boundary_cond == BoundaryType.NEUMANN)
amp_sign = 1
x_mesh = grid[0]
y_mesh = grid[1]
t_mesh = grid[2]
xmin = np.min(x_mesh)
xmax = np.max(x_mesh)
ymin = np.min(y_mesh)
ymax = np.max(y_mesh)
x0 = X0[0]
y0 = X0[1]
Lx = ( xmax - xmin )
Ly = ( ymax - ymin )
# 2D wave function definition: x0 and y0 are arrays containing 2 elements each for positive and negative travelling waves, respectively
pfunc = lambda x,y,t,x0,y0: 0.5*np.exp(-0.5*(np.array((x-x0[0] - c*t))/sigma0)**2)*np.exp(-0.5*(np.array((y-y0[0] - c*t))/sigma0)**2) + \
0.5*np.exp(-0.5*(np.array((x-x0[1] + c*t))/sigma0)**2)*np.exp(-0.5*(np.array((y-y0[1] + c*t))/sigma0)**2)
# initial wave solution (no reflections)
p = pfunc(x_mesh,y_mesh,t_mesh,[x0,x0],[y0,y0])
if num_reflections <= 0:
return p
# calculate starting positions for reflected waves for superposition
x0_rel = (x0 - xmin) / Lx # relative position
y0_rel = (y0 - ymin) / Ly # relative position
for i in range(num_reflections):
if np.mod(i,2) == 0:
# x0 for positive travelling wave
x0_min = xmin - i*Lx - Lx*x0_rel
y0_min = ymin - i*Ly - Ly*y0_rel
# x0 for negative travelling wave
x0_max = xmax + (i+1)*Lx - Lx*x0_rel
y0_max = ymax + (i+1)*Ly - Ly*y0_rel
else:
# x0 for positive travelling wave
x0_min = xmin - i*Lx - (Lx - Lx*x0_rel)
y0_min = ymin - i*Ly - (Ly - Ly*y0_rel)
# x0 for negative travelling wave
x0_max = xmax + i*Lx + Lx*x0_rel
y0_max = ymax + i*Ly + Ly*y0_rel
if boundary_cond == BoundaryType.DIRICHLET:
amp_sign = -1*amp_sign
p = p + amp_sign*pfunc(x_mesh,y_mesh,t_mesh,[x0_min,x0_max],[y0_min,y0_max])
return p
def generateSolutionData(grid, x0_sources, c, sigma0, boundary_cond):
p_data = []
spatial_dim = np.asarray(x0_sources).shape[1]
for i, x0 in enumerate(x0_sources):
if spatial_dim == 1:
p_sol = WaveEquation1D(grid,x0,boundary_cond,c,sigma0)
elif spatial_dim == 2:
p_sol = WaveEquation2D(grid,x0,boundary_cond,c,sigma0)
else:
raise NotImplementedError()
p_data.append(np.asarray(p_sol))
return np.asarray(p_data) | [
"numpy.asarray",
"numpy.max",
"numpy.array",
"numpy.min",
"numpy.mod"
] | [((444, 458), 'numpy.min', 'np.min', (['x_mesh'], {}), '(x_mesh)\n', (450, 458), True, 'import numpy as np\n'), ((470, 484), 'numpy.max', 'np.max', (['x_mesh'], {}), '(x_mesh)\n', (476, 484), True, 'import numpy as np\n'), ((2089, 2103), 'numpy.min', 'np.min', (['x_mesh'], {}), '(x_mesh)\n', (2095, 2103), True, 'import numpy as np\n'), ((2115, 2129), 'numpy.max', 'np.max', (['x_mesh'], {}), '(x_mesh)\n', (2121, 2129), True, 'import numpy as np\n'), ((2141, 2155), 'numpy.min', 'np.min', (['y_mesh'], {}), '(y_mesh)\n', (2147, 2155), True, 'import numpy as np\n'), ((2167, 2181), 'numpy.max', 'np.max', (['y_mesh'], {}), '(y_mesh)\n', (2173, 2181), True, 'import numpy as np\n'), ((4335, 4353), 'numpy.asarray', 'np.asarray', (['p_data'], {}), '(p_data)\n', (4345, 4353), True, 'import numpy as np\n'), ((1114, 1126), 'numpy.mod', 'np.mod', (['i', '(2)'], {}), '(i, 2)\n', (1120, 1126), True, 'import numpy as np\n'), ((3057, 3069), 'numpy.mod', 'np.mod', (['i', '(2)'], {}), '(i, 2)\n', (3063, 3069), True, 'import numpy as np\n'), ((3961, 3983), 'numpy.asarray', 'np.asarray', (['x0_sources'], {}), '(x0_sources)\n', (3971, 3983), True, 'import numpy as np\n'), ((4304, 4321), 'numpy.asarray', 'np.asarray', (['p_sol'], {}), '(p_sol)\n', (4314, 4321), True, 'import numpy as np\n'), ((688, 715), 'numpy.array', 'np.array', (['(x - x0[0] - c * t)'], {}), '(x - x0[0] - c * t)\n', (696, 715), True, 'import numpy as np\n'), ((774, 801), 'numpy.array', 'np.array', (['(x - x0[1] + c * t)'], {}), '(x - x0[1] + c * t)\n', (782, 801), True, 'import numpy as np\n'), ((2509, 2536), 'numpy.array', 'np.array', (['(y - y0[0] - c * t)'], {}), '(y - y0[0] - c * t)\n', (2517, 2536), True, 'import numpy as np\n'), ((2651, 2678), 'numpy.array', 'np.array', (['(y - y0[1] + c * t)'], {}), '(y - y0[1] + c * t)\n', (2659, 2678), True, 'import numpy as np\n'), ((2458, 2485), 'numpy.array', 'np.array', (['(x - x0[0] - c * t)'], {}), '(x - x0[0] - c * t)\n', (2466, 2485), True, 'import numpy as np\n'), ((2600, 2627), 'numpy.array', 'np.array', (['(x - x0[1] + c * t)'], {}), '(x - x0[1] + c * t)\n', (2608, 2627), True, 'import numpy as np\n')] |
def test_read_hdf5():
import os.path
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from read_hdf5 import read_hdf5
import os
import numpy as np
import h5py
# Make tmp mat file and save
fs = np.uint16(np.array([100]))
ecg = np.uint16(np.array([2,3]))
pp = np.uint16(np.array([4,5]))
f = h5py.File('tmp.h5', 'w')
f.create_dataset('fs',data=fs)
f.create_dataset('ecg',data=ecg)
f.create_dataset('pp',data=pp)
f.close()
data = read_hdf5('tmp.h5',offset=0,count_read=4,init_flag=0)
os.system('rm tmp.h5')
assert np.array_equal(data,[2,4,3,5])
# Make tmp mat file and save
fs = np.uint16(np.array([100]))
ecg = np.uint16(np.array([2,3]))
pp = np.uint16(np.array([4,5]))
f = h5py.File('tmp.h5', 'w')
f.create_dataset('fs',data=fs)
f.create_dataset('ecg',data=ecg)
f.create_dataset('pp',data=pp)
f.close()
data_info = read_hdf5('tmp.h5',offset=0,count_read=2,init_flag=1)
os.system('rm tmp.h5')
assert np.array_equal(data_info,[5*2,100])
| [
"read_hdf5.read_hdf5",
"h5py.File",
"os.path.realpath",
"numpy.array",
"numpy.array_equal",
"os.system"
] | [((390, 414), 'h5py.File', 'h5py.File', (['"""tmp.h5"""', '"""w"""'], {}), "('tmp.h5', 'w')\n", (399, 414), False, 'import h5py\n'), ((547, 603), 'read_hdf5.read_hdf5', 'read_hdf5', (['"""tmp.h5"""'], {'offset': '(0)', 'count_read': '(4)', 'init_flag': '(0)'}), "('tmp.h5', offset=0, count_read=4, init_flag=0)\n", (556, 603), False, 'from read_hdf5 import read_hdf5\n'), ((605, 627), 'os.system', 'os.system', (['"""rm tmp.h5"""'], {}), "('rm tmp.h5')\n", (614, 627), False, 'import os\n'), ((639, 673), 'numpy.array_equal', 'np.array_equal', (['data', '[2, 4, 3, 5]'], {}), '(data, [2, 4, 3, 5])\n', (653, 673), True, 'import numpy as np\n'), ((821, 845), 'h5py.File', 'h5py.File', (['"""tmp.h5"""', '"""w"""'], {}), "('tmp.h5', 'w')\n", (830, 845), False, 'import h5py\n'), ((983, 1039), 'read_hdf5.read_hdf5', 'read_hdf5', (['"""tmp.h5"""'], {'offset': '(0)', 'count_read': '(2)', 'init_flag': '(1)'}), "('tmp.h5', offset=0, count_read=2, init_flag=1)\n", (992, 1039), False, 'from read_hdf5 import read_hdf5\n'), ((1041, 1063), 'os.system', 'os.system', (['"""rm tmp.h5"""'], {}), "('rm tmp.h5')\n", (1050, 1063), False, 'import os\n'), ((1075, 1114), 'numpy.array_equal', 'np.array_equal', (['data_info', '[5 * 2, 100]'], {}), '(data_info, [5 * 2, 100])\n', (1089, 1114), True, 'import numpy as np\n'), ((292, 307), 'numpy.array', 'np.array', (['[100]'], {}), '([100])\n', (300, 307), True, 'import numpy as np\n'), ((329, 345), 'numpy.array', 'np.array', (['[2, 3]'], {}), '([2, 3])\n', (337, 345), True, 'import numpy as np\n'), ((365, 381), 'numpy.array', 'np.array', (['[4, 5]'], {}), '([4, 5])\n', (373, 381), True, 'import numpy as np\n'), ((723, 738), 'numpy.array', 'np.array', (['[100]'], {}), '([100])\n', (731, 738), True, 'import numpy as np\n'), ((760, 776), 'numpy.array', 'np.array', (['[2, 3]'], {}), '([2, 3])\n', (768, 776), True, 'import numpy as np\n'), ((796, 812), 'numpy.array', 'np.array', (['[4, 5]'], {}), '([4, 5])\n', (804, 812), True, 'import numpy as np\n'), ((105, 131), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (121, 131), False, 'import os\n')] |
#Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
import xgboost as xgb
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LogisticRegression
from sklearn import svm
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.info()
train.describe(include='all')
train.head()
plt.subplot(1,4,1)
train.groupby('type').mean()['rotting_flesh'].plot(kind='bar',figsize=(7,4), color='r')
plt.subplot(1,4,2)
train.groupby('type').mean()['bone_length'].plot(kind='bar',figsize=(7,4), color='g')
plt.subplot(1,4,3)
train.groupby('type').mean()['hair_length'].plot(kind='bar',figsize=(7,4), color='y')
plt.subplot(1,4,4)
train.groupby('type').mean()['has_soul'].plot(kind='bar',figsize=(7,4), color='teal')
sns.factorplot("type", col="color", col_wrap=4, data=train, kind="count", size=2.4, aspect=.8)
#test_id will be used later, so save it
test_id = test['id']
train.drop(['id'], axis=1, inplace=True)
test.drop(['id'], axis=1, inplace=True)
#Deal with 'color' column
col = 'color'
dummies = pd.get_dummies(train[col], drop_first=False)
dummies = dummies.add_prefix("{}#".format(col))
train.drop(col, axis=1, inplace=True)
train = train.join(dummies)
dummies = pd.get_dummies(test[col], drop_first=False)
dummies = dummies.add_prefix("{}#".format(col))
test.drop(col, axis=1, inplace=True)
test = test.join(dummies)
X_train = train.drop('type', axis=1)
le = LabelEncoder()
Y_train = le.fit_transform(train.type.values)
X_test = test
clf = RandomForestClassifier(n_estimators=200)
clf = clf.fit(X_train, Y_train)
indices = np.argsort(clf.feature_importances_)[::-1]
# Print the feature ranking
print('Feature ranking:')
for f in range(X_train.shape[1]):
print('%d. feature %d %s (%f)' % (f + 1, indices[f], X_train.columns[indices[f]],
clf.feature_importances_[indices[f]]))
best_features=X_train.columns[indices[0:4]]
X = X_train[best_features]
Xt = X_test[best_features]
#Splitting data for validation
Xtrain, Xtest, ytrain, ytest = train_test_split(X, Y_train, test_size=0.20, random_state=36)
#At first I try Random Forest.
#Normally you input all parameters and their potential values and run GridSearchCV.
#My PC isn't good enough so I divide parameters in two groups and repeatedly run two GridSearchCV until I'm satisfied with the result.
forest = RandomForestClassifier(max_depth = None,
min_samples_split =5,
min_weight_fraction_leaf = 0.0,
max_leaf_nodes = 60)
parameter_grid = {'n_estimators' : [10, 20, 100, 150],
'criterion' : ['gini', 'entropy'],
'max_features' : ['auto', 'sqrt', 'log2', None]
}
grid_search = GridSearchCV(forest, param_grid=parameter_grid, cv=StratifiedKFold(5))
grid_search.fit(X, Y_train)
print('Best score: {}'.format(grid_search.best_score_))
print('Best parameters: {}'.format(grid_search.best_params_))
forest = RandomForestClassifier(n_estimators = 20,
criterion = 'entropy',
max_features = 'auto')
parameter_grid = {
'max_depth' : [None, 5, 20, 100],
'min_samples_split' : [2, 5, 7],
'min_weight_fraction_leaf' : [0.0, 0.1],
'max_leaf_nodes' : [40, 60, 80],
}
grid_search = GridSearchCV(forest, param_grid=parameter_grid, cv=StratifiedKFold(5))
grid_search.fit(X, Y_train)
print('Best score: {}'.format(grid_search.best_score_))
print('Best parameters: {}'.format(grid_search.best_params_))
#Optimal parameters
clf = RandomForestClassifier(n_estimators=20, n_jobs=-1, criterion = 'entropy', max_features = 'auto',
min_samples_split=5, min_weight_fraction_leaf=0.0,
max_leaf_nodes=60, max_depth=100)
#Calibration improves probability predictions
calibrated_clf = CalibratedClassifierCV(clf, method='isotonic', cv=5)
calibrated_clf.fit(Xtrain, ytrain)
y_val = calibrated_clf.predict_proba(Xtest)
#Prediction 'y_val' shows probabilities of classes, so at first the most probable class is chosen,
#then it is converted to classes.
print("Validation accuracy: ", sum(pd.DataFrame(y_val, columns=le.classes_).idxmax(axis=1).values == le.inverse_transform(ytest))/len(ytest))
svc = svm.SVC(kernel='linear')
svc.fit(Xtrain, ytrain)
y_val_s = svc.predict(Xtest)
print("Validation accuracy: ", sum(le.inverse_transform(y_val_s)
== le.inverse_transform(ytest))/len(ytest))
#The last model is logistic regression
logreg = LogisticRegression()
parameter_grid = {'solver' : ['newton-cg', 'lbfgs'],
'multi_class' : ['ovr', 'multinomial'],
'C' : [0.005, 0.01, 1, 10, 100, 1000],
'tol': [0.0001, 0.001, 0.005]
}
grid_search = GridSearchCV(logreg, param_grid=parameter_grid, cv=StratifiedKFold(5))
grid_search.fit(Xtrain, ytrain)
print('Best score: {}'.format(grid_search.best_score_))
print('Best parameters: {}'.format(grid_search.best_params_))
log_reg = LogisticRegression(C = 1, tol = 0.0001, solver='newton-cg', multi_class='multinomial')
log_reg.fit(Xtrain, ytrain)
y_val_l = log_reg.predict_proba(Xtest)
print("Validation accuracy: ", sum(pd.DataFrame(y_val_l, columns=le.classes_).idxmax(axis=1).values
== le.inverse_transform(ytest))/len(ytest))
#So this is it. Now fit and model on full dataset
log_reg.fit(X, Y_train)
y_pred = log_reg.predict_proba(Xt)
submission = pd.DataFrame({'id':test_id,
'type':pd.DataFrame(y_pred, columns=le.classes_).idxmax(axis=1).values})
submission.to_csv('GGG_submission.csv', index=False)
| [
"seaborn.factorplot",
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"pandas.DataFrame",
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.calibration.CalibratedClassifierCV",
"sklearn.linear_model.LogisticRegression",
"seaborn.set_style",
"n... | [((104, 130), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (117, 130), True, 'import seaborn as sns\n'), ((599, 632), 'pandas.read_csv', 'pd.read_csv', (['"""../input/train.csv"""'], {}), "('../input/train.csv')\n", (610, 632), True, 'import pandas as pd\n'), ((640, 672), 'pandas.read_csv', 'pd.read_csv', (['"""../input/test.csv"""'], {}), "('../input/test.csv')\n", (651, 672), True, 'import pandas as pd\n'), ((729, 749), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(1)'], {}), '(1, 4, 1)\n', (740, 749), True, 'import matplotlib.pyplot as plt\n'), ((836, 856), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(2)'], {}), '(1, 4, 2)\n', (847, 856), True, 'import matplotlib.pyplot as plt\n'), ((941, 961), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(3)'], {}), '(1, 4, 3)\n', (952, 961), True, 'import matplotlib.pyplot as plt\n'), ((1046, 1066), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(4)'], {}), '(1, 4, 4)\n', (1057, 1066), True, 'import matplotlib.pyplot as plt\n'), ((1151, 1250), 'seaborn.factorplot', 'sns.factorplot', (['"""type"""'], {'col': '"""color"""', 'col_wrap': '(4)', 'data': 'train', 'kind': '"""count"""', 'size': '(2.4)', 'aspect': '(0.8)'}), "('type', col='color', col_wrap=4, data=train, kind='count',\n size=2.4, aspect=0.8)\n", (1165, 1250), True, 'import seaborn as sns\n'), ((1438, 1482), 'pandas.get_dummies', 'pd.get_dummies', (['train[col]'], {'drop_first': '(False)'}), '(train[col], drop_first=False)\n', (1452, 1482), True, 'import pandas as pd\n'), ((1607, 1650), 'pandas.get_dummies', 'pd.get_dummies', (['test[col]'], {'drop_first': '(False)'}), '(test[col], drop_first=False)\n', (1621, 1650), True, 'import pandas as pd\n'), ((1804, 1818), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1816, 1818), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1885, 1925), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(200)'}), '(n_estimators=200)\n', (1907, 1925), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2424, 2484), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y_train'], {'test_size': '(0.2)', 'random_state': '(36)'}), '(X, Y_train, test_size=0.2, random_state=36)\n', (2440, 2484), False, 'from sklearn.model_selection import train_test_split\n'), ((2745, 2857), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'max_depth': 'None', 'min_samples_split': '(5)', 'min_weight_fraction_leaf': '(0.0)', 'max_leaf_nodes': '(60)'}), '(max_depth=None, min_samples_split=5,\n min_weight_fraction_leaf=0.0, max_leaf_nodes=60)\n', (2767, 2857), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3424, 3510), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(20)', 'criterion': '"""entropy"""', 'max_features': '"""auto"""'}), "(n_estimators=20, criterion='entropy', max_features=\n 'auto')\n", (3446, 3510), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((4085, 4270), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(20)', 'n_jobs': '(-1)', 'criterion': '"""entropy"""', 'max_features': '"""auto"""', 'min_samples_split': '(5)', 'min_weight_fraction_leaf': '(0.0)', 'max_leaf_nodes': '(60)', 'max_depth': '(100)'}), "(n_estimators=20, n_jobs=-1, criterion='entropy',\n max_features='auto', min_samples_split=5, min_weight_fraction_leaf=0.0,\n max_leaf_nodes=60, max_depth=100)\n", (4107, 4270), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((4388, 4440), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', (['clf'], {'method': '"""isotonic"""', 'cv': '(5)'}), "(clf, method='isotonic', cv=5)\n", (4410, 4440), False, 'from sklearn.calibration import CalibratedClassifierCV\n'), ((4801, 4825), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (4808, 4825), False, 'from sklearn import svm\n'), ((5071, 5091), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (5089, 5091), False, 'from sklearn.linear_model import LogisticRegression\n'), ((5574, 5661), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1)', 'tol': '(0.0001)', 'solver': '"""newton-cg"""', 'multi_class': '"""multinomial"""'}), "(C=1, tol=0.0001, solver='newton-cg', multi_class=\n 'multinomial')\n", (5592, 5661), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1968, 2004), 'numpy.argsort', 'np.argsort', (['clf.feature_importances_'], {}), '(clf.feature_importances_)\n', (1978, 2004), True, 'import numpy as np\n'), ((3249, 3267), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['(5)'], {}), '(5)\n', (3264, 3267), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((3893, 3911), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['(5)'], {}), '(5)\n', (3908, 3911), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((5394, 5412), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['(5)'], {}), '(5)\n', (5409, 5412), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((6091, 6132), 'pandas.DataFrame', 'pd.DataFrame', (['y_pred'], {'columns': 'le.classes_'}), '(y_pred, columns=le.classes_)\n', (6103, 6132), True, 'import pandas as pd\n'), ((4688, 4728), 'pandas.DataFrame', 'pd.DataFrame', (['y_val'], {'columns': 'le.classes_'}), '(y_val, columns=le.classes_)\n', (4700, 4728), True, 'import pandas as pd\n'), ((5763, 5805), 'pandas.DataFrame', 'pd.DataFrame', (['y_val_l'], {'columns': 'le.classes_'}), '(y_val_l, columns=le.classes_)\n', (5775, 5805), True, 'import pandas as pd\n')] |
import numpy as np
def calculate_exploration_prob(loss_history, act_explor_prob,threshold):
mean = np.mean(loss_history)
variance = 0
for i in loss_history:
variance += np.square(i-mean)
if threshold >= variance:
act_explor_prob += 0.05
else:
act_explor_prob -= 0.05
if act_explor_prob < 0:
return 0
elif act_explor_prob > 1:
return 1
else:
return act_explor_prob
| [
"numpy.mean",
"numpy.square"
] | [((104, 125), 'numpy.mean', 'np.mean', (['loss_history'], {}), '(loss_history)\n', (111, 125), True, 'import numpy as np\n'), ((192, 211), 'numpy.square', 'np.square', (['(i - mean)'], {}), '(i - mean)\n', (201, 211), True, 'import numpy as np\n')] |
# OS library
import os
from os.path import join
# SYS library
import sys
# Scientific library
import scipy.io as sio
import numpy as np
import pandas as pd
# Joblib library
### Module to performed parallel processing
from joblib import Parallel, delayed
# Multiprocessing library
import multiprocessing
# HDF5
import h5py
#######################################################################
## Define a parallel function to convert each file
def cvt2npz(f, str_f):
# Open the file
file_mat = sio.loadmat(f)
#file_mat = h5py.File(f)
# Extract the data
data = file_mat['Histogram']
# Convert the structure to array
vol_lbp_top_hist = []
### For each element in the structure
for str_elt in data.squeeze():
# We need to ravel the element such that we obtain a 1-D vector
# We can push it directly inside the array
vol_lbp_top_hist.append(str_elt.squeeze())
# We can convert everything into a nice numpy array
np.array(vol_lbp_top_hist)
# Save the npz file
np.savez(str_f, vol_lbp_top_hist=vol_lbp_top_hist)
#######################################################################
# Get the input arguments
radius = sys.argv[1]
data_folder = sys.argv[2]
store_folder = sys.argv[3]
# Read the csv file with the ground truth
gt_csv_filename = '/work/le2i/gu5306le/retinopathy/OCT/SERI/data.csv'
gt_csv = pd.read_csv(gt_csv_filename)
gt = gt_csv.values
# Get the good extension
data_filename = gt[:, 0]
store_filename = np.array([join(store_folder, f + '_nlm_flatten_lbp_' + str(radius) + '_hist.npz')
for f in data_filename])
data_filename = np.array([join(data_folder, f + '_nlm_flatten_lbptopPatch_' + str(radius) + '_.mat')
for f in data_filename])
Parallel(n_jobs=32)(delayed(cvt2npz)(df, sf)
for df, sf in zip(data_filename, store_filename))
| [
"numpy.savez",
"pandas.read_csv",
"scipy.io.loadmat",
"joblib.Parallel",
"numpy.array",
"joblib.delayed"
] | [((1396, 1424), 'pandas.read_csv', 'pd.read_csv', (['gt_csv_filename'], {}), '(gt_csv_filename)\n', (1407, 1424), True, 'import pandas as pd\n'), ((506, 520), 'scipy.io.loadmat', 'sio.loadmat', (['f'], {}), '(f)\n', (517, 520), True, 'import scipy.io as sio\n'), ((992, 1018), 'numpy.array', 'np.array', (['vol_lbp_top_hist'], {}), '(vol_lbp_top_hist)\n', (1000, 1018), True, 'import numpy as np\n'), ((1048, 1098), 'numpy.savez', 'np.savez', (['str_f'], {'vol_lbp_top_hist': 'vol_lbp_top_hist'}), '(str_f, vol_lbp_top_hist=vol_lbp_top_hist)\n', (1056, 1098), True, 'import numpy as np\n'), ((1798, 1817), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(32)'}), '(n_jobs=32)\n', (1806, 1817), False, 'from joblib import Parallel, delayed\n'), ((1818, 1834), 'joblib.delayed', 'delayed', (['cvt2npz'], {}), '(cvt2npz)\n', (1825, 1834), False, 'from joblib import Parallel, delayed\n')] |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Usefull calculus fonctions compatible with Quantity objects.
These are basically numpy function wrapped with dimensions checks.
"""
import numbers as nb
import numpy as np
import scipy
import scipy.integrate
import scipy.optimize
import sympy as sp
from .dimension import Dimension, DimensionError, SI_UNIT_SYMBOL
from .quantity import quantify, Quantity
from .utils import array_to_Q_array, decorate_with_various_unit, asqarray
def vectorize(func):
"""Allow vectorize a function of Quantity.
This function aims to extend numpy.vectorize to Quantity-function.
"""
func_vec = np.vectorize(func)
def func_Q_vec(*args, **kwargs):
res_brute = func_vec(*args, **kwargs)
res = asqarray(res_brute)
return res
return func_Q_vec
def xvectorize(func):
def vec_func(x):
res = []
for i in x:
res.append(func(i))
res = np.array(res, dtype=object)
res = asqarray(res)
return res
return vec_func
def ndvectorize(func):
def vec_func(x):
res = []
for i in x.flat:
res.append(func(i))
res = np.array(res, dtype=object)
res = asqarray(res)
res.value = res.value.reshape(x.shape)
return res
return vec_func
def trapz2(Zs, ech_x, ech_y):
"""
2D integral based on trapz.
ech_x is horizontal sampling, along row
ech_y is vertical sampling, along column
Example :
---------
#sample a 2 squared meter, in both direction with different spacing
nx = 12
ny = 30
ech_dx = np.linspace(0*m, 2*m, num=nx)
ech_dy = np.linspace(0*m, 1*m ,num=ny)
X, Y = np.meshgrid(ech_dx, ech_dy)
# make a uniform ponderation
Zs = np.ones_like(X)
print(trapz2(Zs, ech_dx, ech_dy))
#prints 2 m**2
"""
int_x = np.trapz(Zs, axis=-1, x=ech_x)
int_xy = np.trapz(int_x, axis=-1, x=ech_y)
return int_xy
def main():
pass
if __name__ == "__main__":
main() | [
"numpy.array",
"numpy.trapz",
"numpy.vectorize"
] | [((659, 677), 'numpy.vectorize', 'np.vectorize', (['func'], {}), '(func)\n', (671, 677), True, 'import numpy as np\n'), ((1937, 1967), 'numpy.trapz', 'np.trapz', (['Zs'], {'axis': '(-1)', 'x': 'ech_x'}), '(Zs, axis=-1, x=ech_x)\n', (1945, 1967), True, 'import numpy as np\n'), ((1981, 2014), 'numpy.trapz', 'np.trapz', (['int_x'], {'axis': '(-1)', 'x': 'ech_y'}), '(int_x, axis=-1, x=ech_y)\n', (1989, 2014), True, 'import numpy as np\n'), ((964, 991), 'numpy.array', 'np.array', (['res'], {'dtype': 'object'}), '(res, dtype=object)\n', (972, 991), True, 'import numpy as np\n'), ((1193, 1220), 'numpy.array', 'np.array', (['res'], {'dtype': 'object'}), '(res, dtype=object)\n', (1201, 1220), True, 'import numpy as np\n')] |
import cirq
import numpy as np
import pandas as pd
import sympy
from qnn.qnlp.phrases_database import extract_words
class CircuitsWords:
""" A class used to represent the circuits and some information associated to them
"""
def __init__(self, data: str, num_qubits: int, num_phrases: int):
""" Initializes the class and creates the gates that represent each one of the words in the vocabulary and
evaluates the parameters that use each gate (these parameters are determined by the number of qubits)
Args:
data: the path of the csv file used as database
num_qubits: the number of qubits used in the optimization
num_phrases: the number of phrases used in the optimization
"""
self.params_used = []
self.results = []
self.circuit_list = []
self.words_used = []
self.data = data
self.theta = sympy.symbols("theta:1000")
self.num_phrases = num_phrases
self.num_qubits = num_qubits
self.voc, self.df = extract_words(self.data)
circuits, q, self.gates = [], [], []
# creates the qubits on which the circuits are created
for i in range(self.num_qubits):
q.append(cirq.GridQubit(i, 0))
a = 0
# goes thought the vocabulary and creates a parameterized gate of each word
# each gate is applied on all the qubits
for k in range(len(self.voc)):
gates_words = []
for j in range(self.num_qubits):
gates_words.append(cirq.rx(self.theta[a])(q[j])) # the X gate is parameterized but with sympy.symbols
a += 1
self.gates.append(gates_words)
self.dic_gates = {self.voc[e]: self.gates[e] for e in range(len(self.voc))}
# specifies the words (e.i. the gates) used on the circuits so we can specify the parameters used
for i in self.df.transpose().values[3][:self.num_phrases]:
for k in i.split():
if k == 'nulo':
break
self.words_used.append(k)
self.words_used = list(set(self.words_used))
# specifies the parameters used on the gates
# that way we only update this parameters on the optimization
for i in self.words_used:
index_word = 0
for k in self.voc:
if k == i:
break
index_word += 1
stop = index_word * self.num_qubits
for j in list(np.arange(stop, stop + num_qubits)):
self.params_used.append(j)
def __repr__(self):
"""prints the circuits one by one"""
for i in self.circuit_list:
yield i
def create(self):
""" Creates the cirq.Circuit that are optimized. Each phrase has an equivalent circuit formed by the gates that
are equivalent to the words that the phrase has.
Returns:
list of circuits List[cirq.Circuit()]
"""
bitstring = None
data_frame = pd.read_csv(self.data, sep=',') # converts the csv into a pd.DataFrame
e = 0
# goes thought the phrases and constructs the circuits according to them
for i in data_frame.transpose().values[3][:self.num_phrases]:
if i != 'nulo':
last = data_frame.transpose().values[4][e]
o = 0
for j in data_frame.transpose().values[0]:
if j == last:
bitstring = str(data_frame.transpose().values[2][o])
else:
bitstring = None
o += 1
c = cirq.Circuit()
# goes thought the words in the phrase and appends the corresponding gates to the circuits
for k in i.split():
a = 0
for f in self.voc:
if f == k:
c.append(self.gates[a])
a += 1
e += 1
self.circuit_list.append([i, last, bitstring, c])
return self.circuit_list
def sample_run_global(self, theta_sample, repetitions):
""" Parameterizes the gates (on the circuits the gates are parameterised with with sympy.symbols and the
cirq.Resolver maps the the symbol to a float value) and runs the circuits (based on a stochastic simulation)
Args:
theta_sample:
repetitions:
Returns:
the cirq.TrialResult of the circuits
"""
circuits = []
for i in self.circuit_list:
circuits.append(i[3])
a = 1
results = []
# puts measurements on all the circuits (in case they don't have one)
for u in circuits:
if not u.has_measurements():
for i in u.all_qubits():
u.append(cirq.measure(i))
a = a + 1
# creates the resolver that maps the parameters
resolver = cirq.ParamResolver({'theta' + str(e): theta_sample[e] for e in range(len(theta_sample))})
for u in circuits:
# runs each circuit according to the parameters on the resolver
results.append(cirq.Simulator().run(program=u, param_resolver=resolver, repetitions=repetitions))
return results
| [
"qnn.qnlp.phrases_database.extract_words",
"pandas.read_csv",
"cirq.rx",
"cirq.GridQubit",
"sympy.symbols",
"cirq.Circuit",
"cirq.Simulator",
"cirq.measure",
"numpy.arange"
] | [((826, 853), 'sympy.symbols', 'sympy.symbols', (['"""theta:1000"""'], {}), "('theta:1000')\n", (839, 853), False, 'import sympy\n'), ((941, 965), 'qnn.qnlp.phrases_database.extract_words', 'extract_words', (['self.data'], {}), '(self.data)\n', (954, 965), False, 'from qnn.qnlp.phrases_database import extract_words\n'), ((2603, 2634), 'pandas.read_csv', 'pd.read_csv', (['self.data'], {'sep': '""","""'}), "(self.data, sep=',')\n", (2614, 2634), True, 'import pandas as pd\n'), ((1110, 1130), 'cirq.GridQubit', 'cirq.GridQubit', (['i', '(0)'], {}), '(i, 0)\n', (1124, 1130), False, 'import cirq\n'), ((2156, 2190), 'numpy.arange', 'np.arange', (['stop', '(stop + num_qubits)'], {}), '(stop, stop + num_qubits)\n', (2165, 2190), True, 'import numpy as np\n'), ((3077, 3091), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (3089, 3091), False, 'import cirq\n'), ((1374, 1396), 'cirq.rx', 'cirq.rx', (['self.theta[a]'], {}), '(self.theta[a])\n', (1381, 1396), False, 'import cirq\n'), ((4038, 4053), 'cirq.measure', 'cirq.measure', (['i'], {}), '(i)\n', (4050, 4053), False, 'import cirq\n'), ((4330, 4346), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (4344, 4346), False, 'import cirq\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Aging Setup Data Prep
# ### Imports
import os
import io
import sys
import re
import glob
import math
import logging
import numpy as np
import pandas as pd
from bric_analysis_libraries import standard_functions as std
# ## Data Prep
# convenience functions
def sample_from_file_name( file ):
name_search = '(.+?)'
return std.metadata_from_file_name( name_search, file, delimeter = '_', group = 1 )
def channel_from_file_name( file ):
channel_search = '^Ch(\d+)'
return std.metadata_from_file_name( channel_search, file, is_numeric = True, delimeter = '_' )
def sample_channel_index( file, metrics, sample_index, channel_index ):
"""
Creates a standard column index with the sample name and channel.
:param file: The file to optain the sample and channel from.
:param metrics: A list of metric anmes as the bottom index level.
:param sample_index: If True the sample name is used from the file name
to create an index for the data.
:param channel_index: If True the file channel is used from the file name
to create an index for the data.
:returns: A Pandas MultiIndex with levels [ 'channel', 'sample', 'metrics' ]
as specified.
"""
header = [ metrics ]
names = [ 'metrics' ]
if sample_index:
sample = sample_from_file_name( file )
header.insert( 0, [ sample ] )
names.insert( 0, 'sample' )
if channel_index:
channel = channel_from_file_name( file )
header.insert( 0, [ channel ] )
names.insert( 0, 'channel' )
return pd.MultiIndex.from_product( header, names = names )
def import_aging_datum( file, sample_index = True, channel_index = False ):
"""
Imports aging data from a _aging.txt file into a Pandas DataFrame.
:param file: The file to import from.
:param sample_index: If True the sample name is used from the file name
to create an index for the data. [Default: True]
:param channel_index: If True the file channel is used from the file name
to create an index for the data. [Default: False]
:returns: A Pandas DataFrame with the file's data.
"""
header = [ 'time', 'power', 'voltage', 'current', 'intensity', 'temperature' ]
df = pd.read_csv( file, sep = '\s+', skiprows = 1, names = header )
header = sample_channel_index( file, header, sample_index, channel_index )
df.columns = header
return df
def import_metric_datum( file, reindex = True, sample_index = True, channel_index = False ):
"""
Imports JV metric data from a _JVmetrics.txt file into a Pandas DataFrame.
:param file: The file to import from.
:param reindex: Whether to reindex columns hierarchically or leave flat.
If flat scan direction is indicated by '_rev' or '_for' trailing the metric.
If hierarchical levels for data are [ 'direction', 'metric' ],
where direction is [ 'forward', 'reverse', 'static' ], and
metric is the standard abbereviation.
[Default: True]
:param sample_index: If True the sample name is used from the file name
to create an index for the data. [Default: True]
:param channel_index: If True the file channel is used from the file name
to create an index for the data. [Default: False]
:returns: A Pandas DataFrame with the file's data.
"""
header = [
'time',
'voc_rev',
'jsc_rev',
'ff_rev',
'power_rev',
'vmpp_rev',
'jmpp_rev',
'hysteresis',
'voc_for',
'jsc_for',
'ff_for',
'power_for',
'vmpp_for',
'jmpp_for',
'scan_rate',
'intensity',
'temperature'
]
df = pd.read_csv( file, sep = '\s+', skiprows = 1, names = header )
header = sample_channel_index( file, header, sample_index, channel_index )
if not reindex:
df.columns = header
return df
# create hierarchical index
# level names
names = [ 'direction', 'metric' ]
if sample_index:
names.insert( 0, 'sample' )
if channel_index:
names.insert( 0, 'channel' )
# format tuples
values = []
for val in header.get_values():
val = list( val )
metric = val[ -1 ]
# forward
direction = metric.find( '_for' )
if direction > -1:
val[ -1 ] = metric[ :direction ]
val.insert( -1, 'forward' )
values.append( tuple( val ) )
continue
# reverse
direction = metric.find( '_rev' )
if direction > -1:
val[ -1 ] = metric[ :direction ]
val.insert( -1, 'reverse' )
values.append( tuple( val ) )
continue
# static
val[ -1 ] = metric
val.insert( -1, 'static' )
values.append( tuple( val ) )
header = pd.MultiIndex.from_tuples( values, names = names )
df.columns = header
return df.sort_index( axis = 1 )
def import_jv_datum( file, sample_index = True, channel_index = False, sep = '\s+' ):
"""
Imports aging data from a _JVs.txt file into a Pandas DataFrame.
:param file: The file to import from.
:param sample_index: If True the sample name is used from the file name
to create an index for the data. [Default: True]
:param channel_index: If True the file channel is used from the file name
to create an index for the data. [Default: False]
:param sep: The data separator. Can be a regular expression. [Default: \s+]
:returns: A Pandas DataFrame with the file's data.
"""
lines, cols = std.file_shape( file, sep = sep )
num_scans = int( lines/ 3 )
num_rows = cols
names = [ 'index', 'direction', 'metric' ]
header = [
range( num_scans ),
[ 'reverse', 'forward' ],
[ 'voltage', 'current' ]
]
if sample_index:
header.insert( 0, [ sample_from_file_name( file ) ] )
names.insert( 0, 'sample' )
if channel_index:
header.insert( 0, [ channel_from_file_name( file ) ] )
names.insert( 0, 'channel' )
header = pd.MultiIndex.from_product( header, names = names )
data = pd.DataFrame( index = np.arange( num_rows ), columns = header )
# read file in 3 line chunks ( time, voltage, current ) for transposition
# scanned reverse then forward
with open( file ) as f:
splitter = re.compile( sep )
index = 0
for time in f:
# get data
voltage = splitter.split( f.readline() )
current = splitter.split( f.readline() )
# remove empty strings
voltage = filter( ''.__ne__, voltage )
current = filter( ''.__ne__, current )
# convert to floats
voltage = list( map( float, voltage ) )
current = list( map( float, current ) )
# first index where next voltage is larger than current
direction_change = [ index for
index in ( range( len( voltage ) - 1 ) )
if voltage[ index ] < voltage[ index + 1 ] ]
if len( direction_change ) == 0:
# no forward scan
logging.warn( 'Scan {} in file {} was not complete.'.format( index, file ) )
v_rev = voltage
j_rev = current
v_for = []
j_for = []
else:
direction_change = direction_change[ 0 ]
v_rev = voltage[ : direction_change + 1 ]
j_rev = current[ : direction_change + 1 ]
v_for = voltage[ direction_change : ]
j_for = current[ direction_change : ]
# pad data for combining
datum = [ v_rev, j_rev, v_for, j_for ]
datum = list( map( np.array, datum ) )
ref = np.empty(( num_rows ))
ref[:] = np.nan
n_datum = []
for d in datum:
nd = ref.copy()
nd[ :d.shape[ 0 ] ] = d
n_datum.append( nd )
vals = np.stack( n_datum, axis = 1 )
# create dataframe for scan
df_header = [ h for h in header.get_values() if h[ 1 ] == index ]
idh = df_header[ 0 ][ :-2 ]
df_header = pd.MultiIndex.from_tuples( df_header, names = names )
df = pd.DataFrame(
data = vals,
columns = df_header,
dtype = np.float32
)
data.loc[ :, idh ] = df
index += 1
return data.dropna( how = 'all' )
def import_control_datum( file, sep = ',' ):
"""
Imports temperature and intensity control data.
:param file: File path of the program.
:param sep: The column delimeter. [Default: ,]
:returns: A pandas DataFrame with the program information.
"""
names = [
'duration',
'intensity',
'temperature_1',
'temperature_2',
'temperature_3',
'temperature_4',
'start',
'pause',
'stop'
]
df = pd.read_csv( file, names = names, usecols = list( range( 9 ) ) )
df.loc[ :, 'time' ] = df.duration.cumsum()
return df.sort_index( axis = 1 )
def import_aging_data( folder, file_pattern = '*_aging.txt', **kwargs ):
"""
Imports aging data.
:param folder: Folder path containing data files.
:param file_pattern: File pattern of data files, in glob format. [Default: *._aging.txt]
:param kwargs: Arguments passed to standard_functions import_data()
:returns: DataFrame containg imported data.
"""
return std.import_data( import_aging_datum, folder, file_pattern = file_pattern, **kwargs )
def import_metric_data( folder, file_pattern = '*_JVmetrics.txt', **kwargs ):
"""
Imports aging data.
:param folder: Folder path containing data files.
:param file_pattern: File pattern of data files, in glob format. [Default: *_JVmetrics.txt]
:param kwargs: Arguments passed to standard_functions import_data()
:returns: DataFrame containg imported data.
"""
return std.import_data( import_metric_datum, folder, file_pattern = file_pattern, **kwargs )
def import_jv_data( folder, file_pattern = '*_JVs.txt', **kwargs ):
"""
Imports aging data.
:param folder: Folder path containing data files.
:param file_pattern: File pattern of data files, in glob format. [Default: *.JVs.txt]
:param kwargs: Arguments passed to standard_functions import_data()
:returns: DataFrame containg imported data.
"""
return std.import_data( import_jv_datum, folder, file_pattern = file_pattern, **kwargs )
def assign_temperatures_to_cycles( df, ctrl ):
"""
Assigns temperature to cycles given a control DataFrame.
Assumes all temperatures are the same.
:param df: A DataFrame that has been split into cycles.
:param ctrl: The control DataFrame.
:returns: A Series of temperatures to assing to each cycle.
"""
# temperature for all channels is the same
temperatures = ctrl[[ 'time', 'temperature_1' ]].set_index( 'time' )
temperatures.loc[ 0 ] = temperatures.iloc[ 0 ] # back fill to time 0
temperatures.sort_index( inplace = True )
temp_times = temperatures.index
temps = []
for name, data in df.groupby( level = [ 'channel', 'cycle' ], axis = 1 ):
data = data.dropna()
time = data.xs( 'time', axis = 1, level = 'metric' ).iloc[[ 0, -1 ]]/ 3600
time = time.reset_index( drop = True )
# temperatures, take end value
start = time.loc[ 0 ].values[ 0 ]
end = time.loc[ 1 ].values[ 0 ]
start = temperatures.loc[ temp_times <= start ].values[ -1, 0 ]
end = temperatures.loc[ temp_times <= end ].values[ -1, 0 ]
if start != end:
logging.warning( '{}: Temperature change.'.format( name ) )
temp = pd.Series( { name: end } )
temps.append( temp )
temperatures = pd.concat( temps )
return temperatures
# ## Work
| [
"pandas.Series",
"pandas.MultiIndex.from_product",
"pandas.read_csv",
"re.compile",
"bric_analysis_libraries.standard_functions.metadata_from_file_name",
"numpy.stack",
"numpy.empty",
"pandas.DataFrame",
"bric_analysis_libraries.standard_functions.import_data",
"pandas.MultiIndex.from_tuples",
"... | [((381, 451), 'bric_analysis_libraries.standard_functions.metadata_from_file_name', 'std.metadata_from_file_name', (['name_search', 'file'], {'delimeter': '"""_"""', 'group': '(1)'}), "(name_search, file, delimeter='_', group=1)\n", (408, 451), True, 'from bric_analysis_libraries import standard_functions as std\n'), ((539, 624), 'bric_analysis_libraries.standard_functions.metadata_from_file_name', 'std.metadata_from_file_name', (['channel_search', 'file'], {'is_numeric': '(True)', 'delimeter': '"""_"""'}), "(channel_search, file, is_numeric=True,\n delimeter='_')\n", (566, 624), True, 'from bric_analysis_libraries import standard_functions as std\n'), ((1620, 1667), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['header'], {'names': 'names'}), '(header, names=names)\n', (1646, 1667), True, 'import pandas as pd\n'), ((2297, 2352), 'pandas.read_csv', 'pd.read_csv', (['file'], {'sep': '"""\\\\s+"""', 'skiprows': '(1)', 'names': 'header'}), "(file, sep='\\\\s+', skiprows=1, names=header)\n", (2308, 2352), True, 'import pandas as pd\n'), ((3783, 3838), 'pandas.read_csv', 'pd.read_csv', (['file'], {'sep': '"""\\\\s+"""', 'skiprows': '(1)', 'names': 'header'}), "(file, sep='\\\\s+', skiprows=1, names=header)\n", (3794, 3838), True, 'import pandas as pd\n'), ((4931, 4977), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['values'], {'names': 'names'}), '(values, names=names)\n', (4956, 4977), True, 'import pandas as pd\n'), ((5682, 5711), 'bric_analysis_libraries.standard_functions.file_shape', 'std.file_shape', (['file'], {'sep': 'sep'}), '(file, sep=sep)\n', (5696, 5711), True, 'from bric_analysis_libraries import standard_functions as std\n'), ((6189, 6236), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['header'], {'names': 'names'}), '(header, names=names)\n', (6215, 6236), True, 'import pandas as pd\n'), ((9741, 9826), 'bric_analysis_libraries.standard_functions.import_data', 'std.import_data', (['import_aging_datum', 'folder'], {'file_pattern': 'file_pattern'}), '(import_aging_datum, folder, file_pattern=file_pattern, **kwargs\n )\n', (9756, 9826), True, 'from bric_analysis_libraries import standard_functions as std\n'), ((10228, 10314), 'bric_analysis_libraries.standard_functions.import_data', 'std.import_data', (['import_metric_datum', 'folder'], {'file_pattern': 'file_pattern'}), '(import_metric_datum, folder, file_pattern=file_pattern, **\n kwargs)\n', (10243, 10314), True, 'from bric_analysis_libraries import standard_functions as std\n'), ((10700, 10777), 'bric_analysis_libraries.standard_functions.import_data', 'std.import_data', (['import_jv_datum', 'folder'], {'file_pattern': 'file_pattern'}), '(import_jv_datum, folder, file_pattern=file_pattern, **kwargs)\n', (10715, 10777), True, 'from bric_analysis_libraries import standard_functions as std\n'), ((12103, 12119), 'pandas.concat', 'pd.concat', (['temps'], {}), '(temps)\n', (12112, 12119), True, 'import pandas as pd\n'), ((6477, 6492), 're.compile', 're.compile', (['sep'], {}), '(sep)\n', (6487, 6492), False, 'import re\n'), ((12027, 12049), 'pandas.Series', 'pd.Series', (['{name: end}'], {}), '({name: end})\n', (12036, 12049), True, 'import pandas as pd\n'), ((6274, 6293), 'numpy.arange', 'np.arange', (['num_rows'], {}), '(num_rows)\n', (6283, 6293), True, 'import numpy as np\n'), ((7956, 7974), 'numpy.empty', 'np.empty', (['num_rows'], {}), '(num_rows)\n', (7964, 7974), True, 'import numpy as np\n'), ((8190, 8215), 'numpy.stack', 'np.stack', (['n_datum'], {'axis': '(1)'}), '(n_datum, axis=1)\n', (8198, 8215), True, 'import numpy as np\n'), ((8404, 8453), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['df_header'], {'names': 'names'}), '(df_header, names=names)\n', (8429, 8453), True, 'import pandas as pd\n'), ((8475, 8535), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'vals', 'columns': 'df_header', 'dtype': 'np.float32'}), '(data=vals, columns=df_header, dtype=np.float32)\n', (8487, 8535), True, 'import pandas as pd\n')] |
import unittest
import numpy as np
import scipy.sparse
from injector import Injector
from decai.simulation.data.featuremapping.feature_index_mapper import FeatureIndexMapper
from decai.simulation.logging_module import LoggingModule
class TestFeatureIndexMapper(unittest.TestCase):
@classmethod
def setUpClass(cls):
inj = Injector([
LoggingModule,
])
cls.f = inj.get(FeatureIndexMapper)
def test_map_dense(self):
x_train = np.random.random_sample((10, 3))
x_test = np.random.random_sample((4, x_train.shape[1]))
train, test, feature_index_mapping = self.f.map(x_train, x_test)
self.assertIs(train, x_train)
self.assertIs(test, x_test)
self.assertIsNone(feature_index_mapping)
def test_map_sparse(self):
x_train = np.array([[0, 0, 1, 1, 0], [0, 2, 0, 0, 0]])
x_test = np.array([[1, 0, 1, 0, 1], [0, 0, 3, 0, 0]])
x_train_sparse = scipy.sparse.csr_matrix((17348, 4288315073), dtype=np.uint8)
x_train_sparse[x_train.nonzero()] = x_train[x_train.nonzero()]
x_test_sparse = scipy.sparse.csr_matrix((3333, 21312344), dtype=np.uint8)
x_test_sparse[x_test.nonzero()] = x_test[x_test.nonzero()]
mapped_train, mapped_test, feature_index_mapping = self.f.map(x_train_sparse, x_test_sparse)
self.assertEqual(int, type(feature_index_mapping[0]))
self.assertEqual([1, 2, 3], feature_index_mapping)
self.assertTrue(mapped_train.sum(axis=0).all(),
"Every column should have at least one non-zero value.")
x_train_expected = np.zeros((x_train_sparse.shape[0], len(feature_index_mapping)), dtype=np.uint8)
x_train_expected[0, 1] = 1
x_train_expected[0, 2] = 1
x_train_expected[1, 0] = 2
self.assertTrue(np.array_equal(x_train_expected, mapped_train), mapped_train)
x_test_expected = np.zeros((x_test_sparse.shape[0], len(feature_index_mapping)), dtype=np.uint8)
x_test_expected[0, 1] = 1
x_test_expected[1, 1] = 3
self.assertTrue(np.array_equal(x_test_expected, mapped_test), mapped_test)
| [
"numpy.array_equal",
"numpy.array",
"injector.Injector",
"numpy.random.random_sample"
] | [((341, 366), 'injector.Injector', 'Injector', (['[LoggingModule]'], {}), '([LoggingModule])\n', (349, 366), False, 'from injector import Injector\n'), ((484, 516), 'numpy.random.random_sample', 'np.random.random_sample', (['(10, 3)'], {}), '((10, 3))\n', (507, 516), True, 'import numpy as np\n'), ((534, 580), 'numpy.random.random_sample', 'np.random.random_sample', (['(4, x_train.shape[1])'], {}), '((4, x_train.shape[1]))\n', (557, 580), True, 'import numpy as np\n'), ((827, 871), 'numpy.array', 'np.array', (['[[0, 0, 1, 1, 0], [0, 2, 0, 0, 0]]'], {}), '([[0, 0, 1, 1, 0], [0, 2, 0, 0, 0]])\n', (835, 871), True, 'import numpy as np\n'), ((889, 933), 'numpy.array', 'np.array', (['[[1, 0, 1, 0, 1], [0, 0, 3, 0, 0]]'], {}), '([[1, 0, 1, 0, 1], [0, 0, 3, 0, 0]])\n', (897, 933), True, 'import numpy as np\n'), ((1835, 1881), 'numpy.array_equal', 'np.array_equal', (['x_train_expected', 'mapped_train'], {}), '(x_train_expected, mapped_train)\n', (1849, 1881), True, 'import numpy as np\n'), ((2094, 2138), 'numpy.array_equal', 'np.array_equal', (['x_test_expected', 'mapped_test'], {}), '(x_test_expected, mapped_test)\n', (2108, 2138), True, 'import numpy as np\n')] |
import numpy as np
import pickle
from pathlib import Path
import sys
import pyvista as pv
# Finding the sim root directory
cwd = Path.cwd()
for dirname in tuple(cwd.parents):
if dirname.name == '3D-CG':
sim_root_dir = dirname
continue
sys.path.append(str(sim_root_dir.joinpath('util')))
sys.path.append(str(sim_root_dir.joinpath('mesh')))
sys.path.append(str(sim_root_dir.joinpath('master')))
sys.path.append(str(sim_root_dir.joinpath('viz')))
import viz
import helper
# Read in solution array
soln = 'd8'
mu_e = 2.7e-4
sign_q = -1
q = .1e-3 # aircraft charge, in mC
C = 840e-12 # aircarft capacitance, in pF
E_inf = 1000
u_inf = 262
V = q/C
print('Reading in data')
with open('./fem_solutions/d8/d8_electrostatic_solution', 'rb') as file:
solution = pickle.load(file)
flow = pv.read('./fem_solutions/d8/flow.vtu')
E = -V*solution['Phi_grad_vol'] #+E_inf*0 # Add external electric field here
E_surf = -V*solution['Phi_grad_normal_surf'] #+E_inf*0 # Add external electric field here
mesh = solution['vol_mesh']
vE = sign_q*mu_e*E
vE_surf = sign_q*mu_e*E_surf
momentum = flow.point_data['Momentum']
rho = flow.point_data['Density'][:,None]
vFlow = momentum/rho * u_inf
print('Interpolating flowfield to high order mesh')
vectors = helper.reshape_field(mesh, vFlow, 'to_array', 'scalars', porder=1)
_, vectors = helper.interpolate_high_order(1, mesh['porder'], mesh['ndim'], lo_scalars=None, lo_vectors=vectors)
# Reshape back into the column vector of high order
vFlowHO = helper.reshape_field(mesh, vectors, 'to_column', 'scalars')
combined_v = np.concatenate((vFlowHO, vE, vFlowHO+vE, vFlowHO-vE), axis=1)
print('Visualizing')
labels = {'vectors':{0: 'V field - flow', 1: 'V field - electrostatic', 2: 'Combined - pos charge', 3: 'Combined - neg charge'}}
viz.visualize(mesh, 2, labels, 'combined', True, scalars=None, vectors=combined_v)
viz.visualize(solution['surf_mesh'], 2, {'scalars':{0: 'E dot n'}}, 'surface_plot', False, vE_surf[:,None], None, type='surface_mesh') # Can only have scalars on a surface mesh | [
"helper.interpolate_high_order",
"pathlib.Path.cwd",
"helper.reshape_field",
"pickle.load",
"viz.visualize",
"numpy.concatenate",
"pyvista.read"
] | [((130, 140), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (138, 140), False, 'from pathlib import Path\n'), ((809, 847), 'pyvista.read', 'pv.read', (['"""./fem_solutions/d8/flow.vtu"""'], {}), "('./fem_solutions/d8/flow.vtu')\n", (816, 847), True, 'import pyvista as pv\n'), ((1268, 1334), 'helper.reshape_field', 'helper.reshape_field', (['mesh', 'vFlow', '"""to_array"""', '"""scalars"""'], {'porder': '(1)'}), "(mesh, vFlow, 'to_array', 'scalars', porder=1)\n", (1288, 1334), False, 'import helper\n'), ((1348, 1452), 'helper.interpolate_high_order', 'helper.interpolate_high_order', (['(1)', "mesh['porder']", "mesh['ndim']"], {'lo_scalars': 'None', 'lo_vectors': 'vectors'}), "(1, mesh['porder'], mesh['ndim'], lo_scalars=\n None, lo_vectors=vectors)\n", (1377, 1452), False, 'import helper\n'), ((1510, 1569), 'helper.reshape_field', 'helper.reshape_field', (['mesh', 'vectors', '"""to_column"""', '"""scalars"""'], {}), "(mesh, vectors, 'to_column', 'scalars')\n", (1530, 1569), False, 'import helper\n'), ((1584, 1649), 'numpy.concatenate', 'np.concatenate', (['(vFlowHO, vE, vFlowHO + vE, vFlowHO - vE)'], {'axis': '(1)'}), '((vFlowHO, vE, vFlowHO + vE, vFlowHO - vE), axis=1)\n', (1598, 1649), True, 'import numpy as np\n'), ((1797, 1884), 'viz.visualize', 'viz.visualize', (['mesh', '(2)', 'labels', '"""combined"""', '(True)'], {'scalars': 'None', 'vectors': 'combined_v'}), "(mesh, 2, labels, 'combined', True, scalars=None, vectors=\n combined_v)\n", (1810, 1884), False, 'import viz\n'), ((1881, 2023), 'viz.visualize', 'viz.visualize', (["solution['surf_mesh']", '(2)', "{'scalars': {(0): 'E dot n'}}", '"""surface_plot"""', '(False)', 'vE_surf[:, None]', 'None'], {'type': '"""surface_mesh"""'}), "(solution['surf_mesh'], 2, {'scalars': {(0): 'E dot n'}},\n 'surface_plot', False, vE_surf[:, None], None, type='surface_mesh')\n", (1894, 2023), False, 'import viz\n'), ((784, 801), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (795, 801), False, 'import pickle\n')] |
# -*- coding: utf-8 -*-
# Adapted from lstm_text_generation.py in keras/examples
from __future__ import print_function
from keras.layers.recurrent import SimpleRNN
from keras.models import Sequential
from keras.layers import Dense, Activation
import numpy as np
INPUT_FILE = "../data/alice_in_wonderland.txt"
# extract the input as a stream of characters
print("Extracting text from input...")
fin = open(INPUT_FILE, 'rb')
lines = []
for line in fin:
line = line.strip().lower()
line = line.decode("ascii", "ignore")
if len(line) == 0:
continue
lines.append(line)
fin.close()
text = " ".join(lines)
# creating lookup tables
# Here chars is the number of features in our character "vocabulary"
chars = set([c for c in text])
nb_chars = len(chars)
char2index = dict((c, i) for i, c in enumerate(chars))
index2char = dict((i, c) for i, c in enumerate(chars))
# create inputs and labels from the text. We do this by stepping
# through the text ${step} character at a time, and extracting a
# sequence of size ${seqlen} and the next output char. For example,
# assuming an input text "The sky was falling", we would get the
# following sequence of input_chars and label_chars (first 5 only)
# The sky wa -> s
# he sky was ->
# e sky was -> f
# sky was f -> a
# sky was fa -> l
print("Creating input and label text...")
SEQLEN = 10
STEP = 1
input_chars = []
label_chars = []
for i in range(0, len(text) - SEQLEN, STEP):
input_chars.append(text[i:i + SEQLEN])
label_chars.append(text[i + SEQLEN])
# vectorize the input and label chars
# Each row of the input is represented by seqlen characters, each
# represented as a 1-hot encoding of size len(char). There are
# len(input_chars) such rows, so shape(X) is (len(input_chars),
# seqlen, nb_chars).
# Each row of output is a single character, also represented as a
# dense encoding of size len(char). Hence shape(y) is (len(input_chars),
# nb_chars).
print("Vectorizing input and label text...")
X = np.zeros((len(input_chars), SEQLEN, nb_chars), dtype=np.bool)
y = np.zeros((len(input_chars), nb_chars), dtype=np.bool)
for i, input_char in enumerate(input_chars):
for j, ch in enumerate(input_char):
X[i, j, char2index[ch]] = 1
y[i, char2index[label_chars[i]]] = 1
# Build the model. We use a single RNN with a fully connected layer
# to compute the most likely predicted output char
HIDDEN_SIZE = 128
BATCH_SIZE = 128
NUM_ITERATIONS = 25
NUM_EPOCHS_PER_ITERATION = 1
NUM_PREDS_PER_EPOCH = 100
model = Sequential()
model.add(SimpleRNN(HIDDEN_SIZE, return_sequences=False,
input_shape=(SEQLEN, nb_chars),
unroll=True))
model.add(Dense(nb_chars))
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
# We train the model in batches and test output generated at each step
for iteration in range(NUM_ITERATIONS):
print("=" * 50)
print("Iteration #: %d" % (iteration))
model.fit(X, y, batch_size=BATCH_SIZE, epochs=NUM_EPOCHS_PER_ITERATION)
# testing model
# randomly choose a row from input_chars, then use it to
# generate text from model for next 100 chars
test_idx = np.random.randint(len(input_chars))
test_chars = input_chars[test_idx]
print("Generating from seed: %s" % (test_chars))
print(test_chars, end="")
for i in range(NUM_PREDS_PER_EPOCH):
Xtest = np.zeros((1, SEQLEN, nb_chars))
for i, ch in enumerate(test_chars):
Xtest[0, i, char2index[ch]] = 1
pred = model.predict(Xtest, verbose=0)[0]
ypred = index2char[np.argmax(pred)]
print(ypred, end="")
# move forward with test_chars + ypred
test_chars = test_chars[1:] + ypred
print()
| [
"numpy.argmax",
"keras.models.Sequential",
"keras.layers.recurrent.SimpleRNN",
"numpy.zeros",
"keras.layers.Activation",
"keras.layers.Dense"
] | [((2520, 2532), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2530, 2532), False, 'from keras.models import Sequential\n'), ((2543, 2638), 'keras.layers.recurrent.SimpleRNN', 'SimpleRNN', (['HIDDEN_SIZE'], {'return_sequences': '(False)', 'input_shape': '(SEQLEN, nb_chars)', 'unroll': '(True)'}), '(HIDDEN_SIZE, return_sequences=False, input_shape=(SEQLEN,\n nb_chars), unroll=True)\n', (2552, 2638), False, 'from keras.layers.recurrent import SimpleRNN\n'), ((2686, 2701), 'keras.layers.Dense', 'Dense', (['nb_chars'], {}), '(nb_chars)\n', (2691, 2701), False, 'from keras.layers import Dense, Activation\n'), ((2713, 2734), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (2723, 2734), False, 'from keras.layers import Dense, Activation\n'), ((3423, 3454), 'numpy.zeros', 'np.zeros', (['(1, SEQLEN, nb_chars)'], {}), '((1, SEQLEN, nb_chars))\n', (3431, 3454), True, 'import numpy as np\n'), ((3620, 3635), 'numpy.argmax', 'np.argmax', (['pred'], {}), '(pred)\n', (3629, 3635), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import gizeh
import moviepy.editor as mpy
import numpy as np
import midi
RGB = lambda hx: tuple(map(lambda c: int(c, 16) / 256, [hx[1:3], hx[3:5], hx[5:7]]))
is_ebony = lambda note: (note % 12) in [1, 3, 6, 8, 10]
is_ivory = lambda note: not is_ebony(note)
position = dict()
position.update({ivory: (index + 0.5) / 52 for index, ivory in enumerate(filter(is_ivory, range(21, 109)))})
position.update({ebony: index / 52 for index, ebony in zip(filter(lambda x: x % 7 not in [2, 5], range(1, 52)),
filter(is_ebony, range(21, 109)))})
track_colors = [
(RGB('#DE935F'), RGB('#F0C674')),
(RGB('#5E8D87'), RGB('#8ABEB7')),
(RGB('#85678F'), RGB('#B294BB')),
(RGB('#5F819D'), RGB('#81A2BE'))
]
def foresee_surface(midi, size, offset, time):
surface = gizeh.Surface(*size)
foresee = 2
current, future = midi.second2tick(time), midi.second2tick(time + foresee)
for begin, end, note in midi.timeline[current:future]:
future = future or 2 * current - midi.second2tick(time - foresee)
begin, end = max(begin, current), min(end, future)
note, colors = midi.notes[note]['note'], track_colors[midi.notes[note]['track'] % 4]
rect_params = {
'lx' : size[0]/52 if is_ivory(note) else size[0]/52 * 0.7,
'ly' : size[1] * (end - begin) / (future - current) - 5,
'xy' : (size[0] * position[note] + offset[0],
size[1] * (future - end/2 - begin/2) / (future - current) + offset[1]),
'fill': colors[1] if is_ivory(note) else colors[0]
}
gizeh.rectangle(**rect_params).draw(surface)
return surface
def piano_surface(midi, size, offset, time):
surface = gizeh.Surface(*size)
current = midi.second2tick(time)
hit_note_colors = {
midi.notes[interval[2]]['note']: track_colors[midi.notes[interval[2]]['track'] % 4]
for interval in midi.timeline[current]
}
ivory_params = lambda note: {
'lx' : size[0]/52,
'ly' : size[1],
'xy' : (size[0] * position[note], size[1] / 2),
'fill' : hit_note_colors[note][1] if note in hit_note_colors.keys() else RGB('#CBCFCC'),
'stroke': RGB('#3A3E42'),
'stroke_width': 1
}
ebony_params = lambda note: {
'lx' : size[0]/52 * 0.7,
'ly' : size[1] * 2/3,
'xy' : (size[0] * position[note], size[1] / 3),
'fill': hit_note_colors[note][0] if note in hit_note_colors.keys() else RGB('#3A3E42')
}
for note in filter(is_ivory, range(21, 109)):
gizeh.rectangle(**ivory_params(note)).draw(surface)
for note in filter(is_ebony, range(21, 109)):
gizeh.rectangle(**ebony_params(note)).draw(surface)
return surface
def visualize_midi(midi, size):
piano_size = (size[0], int(size[0]/52 * 6))
piano_offset = (0, 0)
foresee_size = (size[0], size[1] - piano_size[1])
foresee_offset = (0, 0)
def make_frame(t):
foresee = foresee_surface(midi, foresee_size, foresee_offset, t).get_npimage()
piano = piano_surface(midi, piano_size, piano_offset, t).get_npimage()
return np.concatenate((foresee, piano), axis=0)
return make_frame
def midi_videoclip(sheet, size=(640, 360), iter_callback=None):
clip = mpy.VideoClip(visualize_midi(sheet, size), duration=sheet.midi.length)
# callback function is for refreshing gtk progressing bar
# the following code is altered from moviepy/Clip.py:446
if iter_callback is not None:
def my_iter_frames(fps=None, with_times=False, progress_bar=False, dtype=None):
clip.nframes = int(clip.duration * fps) + 1
def generator():
for t in np.arange(0, clip.duration, 1.0 / fps):
iter_callback(clip)
frame = clip.get_frame(t)
if (dtype is not None) and (frame.dtype != dtype):
frame = frame.astype(dtype)
if with_times:
yield t, frame
else:
yield frame
return generator()
clip.iter_frames = my_iter_frames
return clip
# Test script
# sheet = midi.Midi('midi/The Positive and Negative.mid')
# clip = midi_videoclip(sheet)
# clip.write_videofile('/tmp/test.webm', fps=20)
| [
"gizeh.rectangle",
"gizeh.Surface",
"numpy.concatenate",
"midi.second2tick",
"numpy.arange"
] | [((850, 870), 'gizeh.Surface', 'gizeh.Surface', (['*size'], {}), '(*size)\n', (863, 870), False, 'import gizeh\n'), ((1776, 1796), 'gizeh.Surface', 'gizeh.Surface', (['*size'], {}), '(*size)\n', (1789, 1796), False, 'import gizeh\n'), ((1811, 1833), 'midi.second2tick', 'midi.second2tick', (['time'], {}), '(time)\n', (1827, 1833), False, 'import midi\n'), ((909, 931), 'midi.second2tick', 'midi.second2tick', (['time'], {}), '(time)\n', (925, 931), False, 'import midi\n'), ((933, 965), 'midi.second2tick', 'midi.second2tick', (['(time + foresee)'], {}), '(time + foresee)\n', (949, 965), False, 'import midi\n'), ((3218, 3258), 'numpy.concatenate', 'np.concatenate', (['(foresee, piano)'], {'axis': '(0)'}), '((foresee, piano), axis=0)\n', (3232, 3258), True, 'import numpy as np\n'), ((1066, 1098), 'midi.second2tick', 'midi.second2tick', (['(time - foresee)'], {}), '(time - foresee)\n', (1082, 1098), False, 'import midi\n'), ((1651, 1681), 'gizeh.rectangle', 'gizeh.rectangle', ([], {}), '(**rect_params)\n', (1666, 1681), False, 'import gizeh\n'), ((3787, 3825), 'numpy.arange', 'np.arange', (['(0)', 'clip.duration', '(1.0 / fps)'], {}), '(0, clip.duration, 1.0 / fps)\n', (3796, 3825), True, 'import numpy as np\n')] |
#! /usr/bin/env python3
import numpy as np
from sklearn.metrics import adjusted_rand_score as ari
from sklearn.preprocessing import LabelEncoder as labeler
import lsbm
## Import labels
lab = np.loadtxt('../data/drosophila_labels.csv', dtype=str)
lab = labeler().fit(lab).transform(lab)
## Import embeddings
X = np.loadtxt('../data/drosophila_dase.csv', delimiter=',')
## Import adjacency matrix
A = np.loadtxt('../data/drosophila_A.csv',delimiter=',',skiprows=1,dtype=int)
from sklearn.mixture import GaussianMixture as GMM
np.random.seed(1771)
print('GMM\t Drosophila\t', np.mean([ari(lab, GMM(n_components=4).fit_predict(X)) for _ in range(1000)]))
Xs = lsbm.row_normalise(X)
np.random.seed(1771)
print('norm-GMM\t Drosophila\t', np.mean([ari(lab, GMM(n_components=4).fit_predict(Xs)) for _ in range(1000)]))
Xs = lsbm.theta_transform(X)
np.random.seed(1771)
print('sphere-GMM\t Drosophila\t', np.mean([ari(lab, GMM(n_components=4).fit_predict(Xs)) for _ in range(1000)]))
from sklearn.cluster import AgglomerativeClustering
print('HClust\t Drosophila\t', ari(lab, AgglomerativeClustering(n_clusters=4, affinity='euclidean', linkage='complete').fit_predict(X)))
from sknetwork.hierarchy import LouvainHierarchy, Paris, cut_straight
biparis = Paris()
bihlouvain = LouvainHierarchy()
dendrogram_paris = biparis.fit_transform(A)
dendrogram_louvain = bihlouvain.fit_transform(A)
z_paris = cut_straight(dendrogram_paris, n_clusters=4)
z_hlouvain = cut_straight(dendrogram_louvain, n_clusters=4)
print('Paris\t Drosophila\t',ari(z_paris, lab))
print('HLouvain\t Drosophila\t',ari(z_hlouvain, lab))
from sknetwork.clustering import Louvain
bilouvain = Louvain()
z_louvain = bilouvain.fit_transform(A)
print('Louvain\t Drosophila\t',ari(z_louvain, lab)) | [
"sklearn.preprocessing.LabelEncoder",
"sklearn.cluster.AgglomerativeClustering",
"sklearn.mixture.GaussianMixture",
"lsbm.theta_transform",
"sknetwork.clustering.Louvain",
"sklearn.metrics.adjusted_rand_score",
"lsbm.row_normalise",
"sknetwork.hierarchy.LouvainHierarchy",
"numpy.random.seed",
"skn... | [((192, 246), 'numpy.loadtxt', 'np.loadtxt', (['"""../data/drosophila_labels.csv"""'], {'dtype': 'str'}), "('../data/drosophila_labels.csv', dtype=str)\n", (202, 246), True, 'import numpy as np\n'), ((312, 368), 'numpy.loadtxt', 'np.loadtxt', (['"""../data/drosophila_dase.csv"""'], {'delimiter': '""","""'}), "('../data/drosophila_dase.csv', delimiter=',')\n", (322, 368), True, 'import numpy as np\n'), ((400, 476), 'numpy.loadtxt', 'np.loadtxt', (['"""../data/drosophila_A.csv"""'], {'delimiter': '""","""', 'skiprows': '(1)', 'dtype': 'int'}), "('../data/drosophila_A.csv', delimiter=',', skiprows=1, dtype=int)\n", (410, 476), True, 'import numpy as np\n'), ((526, 546), 'numpy.random.seed', 'np.random.seed', (['(1771)'], {}), '(1771)\n', (540, 546), True, 'import numpy as np\n'), ((658, 679), 'lsbm.row_normalise', 'lsbm.row_normalise', (['X'], {}), '(X)\n', (676, 679), False, 'import lsbm\n'), ((680, 700), 'numpy.random.seed', 'np.random.seed', (['(1771)'], {}), '(1771)\n', (694, 700), True, 'import numpy as np\n'), ((818, 841), 'lsbm.theta_transform', 'lsbm.theta_transform', (['X'], {}), '(X)\n', (838, 841), False, 'import lsbm\n'), ((842, 862), 'numpy.random.seed', 'np.random.seed', (['(1771)'], {}), '(1771)\n', (856, 862), True, 'import numpy as np\n'), ((1248, 1255), 'sknetwork.hierarchy.Paris', 'Paris', ([], {}), '()\n', (1253, 1255), False, 'from sknetwork.hierarchy import LouvainHierarchy, Paris, cut_straight\n'), ((1269, 1287), 'sknetwork.hierarchy.LouvainHierarchy', 'LouvainHierarchy', ([], {}), '()\n', (1285, 1287), False, 'from sknetwork.hierarchy import LouvainHierarchy, Paris, cut_straight\n'), ((1391, 1435), 'sknetwork.hierarchy.cut_straight', 'cut_straight', (['dendrogram_paris'], {'n_clusters': '(4)'}), '(dendrogram_paris, n_clusters=4)\n', (1403, 1435), False, 'from sknetwork.hierarchy import LouvainHierarchy, Paris, cut_straight\n'), ((1449, 1495), 'sknetwork.hierarchy.cut_straight', 'cut_straight', (['dendrogram_louvain'], {'n_clusters': '(4)'}), '(dendrogram_louvain, n_clusters=4)\n', (1461, 1495), False, 'from sknetwork.hierarchy import LouvainHierarchy, Paris, cut_straight\n'), ((1652, 1661), 'sknetwork.clustering.Louvain', 'Louvain', ([], {}), '()\n', (1659, 1661), False, 'from sknetwork.clustering import Louvain\n'), ((1525, 1542), 'sklearn.metrics.adjusted_rand_score', 'ari', (['z_paris', 'lab'], {}), '(z_paris, lab)\n', (1528, 1542), True, 'from sklearn.metrics import adjusted_rand_score as ari\n'), ((1576, 1596), 'sklearn.metrics.adjusted_rand_score', 'ari', (['z_hlouvain', 'lab'], {}), '(z_hlouvain, lab)\n', (1579, 1596), True, 'from sklearn.metrics import adjusted_rand_score as ari\n'), ((1732, 1751), 'sklearn.metrics.adjusted_rand_score', 'ari', (['z_louvain', 'lab'], {}), '(z_louvain, lab)\n', (1735, 1751), True, 'from sklearn.metrics import adjusted_rand_score as ari\n'), ((253, 262), 'sklearn.preprocessing.LabelEncoder', 'labeler', ([], {}), '()\n', (260, 262), True, 'from sklearn.preprocessing import LabelEncoder as labeler\n'), ((1070, 1149), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': '(4)', 'affinity': '"""euclidean"""', 'linkage': '"""complete"""'}), "(n_clusters=4, affinity='euclidean', linkage='complete')\n", (1093, 1149), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((593, 612), 'sklearn.mixture.GaussianMixture', 'GMM', ([], {'n_components': '(4)'}), '(n_components=4)\n', (596, 612), True, 'from sklearn.mixture import GaussianMixture as GMM\n'), ((752, 771), 'sklearn.mixture.GaussianMixture', 'GMM', ([], {'n_components': '(4)'}), '(n_components=4)\n', (755, 771), True, 'from sklearn.mixture import GaussianMixture as GMM\n'), ((916, 935), 'sklearn.mixture.GaussianMixture', 'GMM', ([], {'n_components': '(4)'}), '(n_components=4)\n', (919, 935), True, 'from sklearn.mixture import GaussianMixture as GMM\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 5 01:30:35 2020
@author: a
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from torch.autograd import Function
from matplotlib import pyplot as plt
from itertools import product
EPS = 1e-4
Sigma = {1:4,2:3,3:2.25,4:2,5:2,6:1.9,7:1.75,8:1.75,9:1.7,10:1.65}
p = 0.05
log_like_std = 0.1
class MCDO(nn.Module):
def __init__(self,in_dim,out_dim,n_layers = 1,hid_dim=50,p=0.05):
super().__init__()
self.n_layers = n_layers
self.linear_in = nn.Linear(in_dim,hid_dim)
nn.init.normal_(self.linear_in.weight,std = 1/(4*hid_dim)**0.5)
nn.init.zeros_(self.linear_in.bias)
self.dropout_in = nn.Dropout(p)
if n_layers>1:
models = list(range(3*(n_layers-1)))
for i in range(0,len(models),3):
models[i]=nn.Linear(hid_dim,hid_dim)
nn.init.normal_(models[i].weight,std = 1/(4*hid_dim)**0.5)
nn.init.zeros_(models[i].bias)
for i in range(1,len(models),3):
models[i]=nn.ReLU()
for i in range(2,len(models),3):
models[i]=nn.Dropout(p)
self.hid_layers = nn.Sequential(*models)
self.linear_out = nn.Linear(hid_dim,out_dim)
nn.init.normal_(self.linear_out.weight,std = 1/(4*out_dim)**0.5)
nn.init.zeros_(self.linear_out.bias)
def forward(self,x):
x = torch.relu(self.linear_in(x))
x = self.dropout_in(x)
if self.n_layers>1: x = self.hid_layers(x)
x = self.linear_out(x)
return x
def MCDO_loss(model,x,y, depth=1,n_samples = 32, tau = 1,H = 2):
res = 0
N = y.shape[0]
l = np.sqrt(H)/Sigma[depth]
lambd = p*l**2/N/tau
for module in model.modules():
if type(module).__name__=='Linear':
W = module.weight
b = module.bias
res+=torch.pow(torch.norm(W,p=2),2)+torch.pow(torch.norm(b,p=2),2)
res *= lambd
pred = torch.cat([model(x) for i in range(n_samples)],dim=1).mean(dim=1)
res += torch.pow(pred-y,2).mean()
return res
class LinearFunction(Function):
# Note that both forward and backward are @staticmethods
@staticmethod
# bias is an optional argument
def forward(ctx, input, mu_W, mu_b, rho_W, rho_b):
z_W = torch.normal(torch.zeros_like(mu_W), torch.ones_like(rho_W))
z_b = torch.normal(torch.zeros_like(mu_b), torch.ones_like(rho_b))
W = mu_W+z_W*torch.log(1+EPS+torch.exp(rho_W))
ctx.save_for_backward(input,W, mu_W, mu_b, rho_W, rho_b,z_W,z_b)
b = mu_b+z_b*torch.log(1+EPS+torch.exp(rho_b))
output = torch.mm(input,W)+b
return output
# This function has only a single output, so it gets only one gradient
@staticmethod
def backward(ctx, grad_output):
input,W, mu_W, mu_b, rho_W, rho_b,z_W,z_b = ctx.saved_tensors
grad_input = grad_mu_W = grad_mu_b = grad_rho_W = grad_rho_b = None
grad_input = grad_output.mm(W.t())
grad_mu_W = input.t().mm(grad_output)
grad_mu_b = grad_output.sum(0)
ex_W = torch.exp(rho_W)
grad_rho_W = grad_mu_W*z_W*ex_W*torch.pow(1+EPS+ex_W,-1)
ex_b = torch.exp(rho_b)
grad_rho_b = grad_mu_b*z_b*ex_b*torch.pow(1+EPS+ex_b,-1)
return grad_input, grad_mu_W, grad_mu_b, grad_rho_W, grad_rho_b
class Random_Linear(nn.Module):
def __init__(self,in_dim,out_dim):
super().__init__()
temp_W = torch.ones(in_dim,out_dim)
self.mu_W = torch.nn.Parameter( torch.normal(temp_W*0, temp_W/np.sqrt(4*out_dim)) )
self.mu_b = torch.nn.Parameter(torch.zeros(out_dim))
rho_W = np.log(np.exp(10**(-2.5))-1)
self.rho_W = torch.nn.Parameter(torch.ones(in_dim,out_dim)*rho_W)
rho_b = np.log(np.exp(10**(-2.5))-1)
self.rho_b = torch.nn.Parameter(torch.ones(out_dim)*rho_b)
def forward(self,x):
# if self.training:
return LinearFunction.apply(x, self.mu_W, self.mu_b, self.rho_W, self.rho_b)
# output = torch.mm(x,self.mu_W)+self.mu_b
# return output
class Bayesian_ReLU(nn.Module):
def __init__(self,in_dim,out_dim,n_layers = 1,hid_dim=50):
super().__init__()
self.n_layers = n_layers
self.linear_in = Random_Linear(in_dim,hid_dim)
if n_layers>1:
models = list(range(2*(n_layers-1)))
for i in range(0,len(models),2):
models[i]=Random_Linear(hid_dim,hid_dim)
for i in range(1,len(models),2):
models[i]=nn.ReLU()
self.hid_layers = nn.Sequential(*models)
self.linear_out = Random_Linear(hid_dim,out_dim)
def forward(self,x):
x = torch.relu(self.linear_in(x))
if self.n_layers>1: x = self.hid_layers(x)
x = self.linear_out(x)
return x
def KL(model,depth, hid_dim = 50):
res = 0
sigma_W_init = Sigma[depth]/hid_dim**0.5
sigma_b_init = 1
for module in model.modules():
if type(module).__name__=='Random_Linear':
mu_W = module.mu_W
mu_b = module.mu_b
sigma_W = torch.log(1+EPS+torch.exp(module.rho_W))
sigma_b = torch.log(1+EPS+torch.exp(module.rho_b))
res+=1/2*(torch.pow(sigma_W/sigma_W_init,2)+torch.pow(mu_W/sigma_W_init,2)+2*torch.log(torch.pow(sigma_W,-1)*sigma_W_init)).sum()
res+=1/2*(torch.pow(sigma_b/sigma_b_init,2)+torch.pow(mu_b/sigma_b_init,2)+2*torch.log(torch.pow(sigma_b,-1)*sigma_b_init)).sum()
return res
def mean_reduction(preds,y):
preds[:,[0]]-=y
return preds
def elbo_loss(model,x,y,depth,TRAIN_LENGTH, n_samples = 32,variance = 'estimate'):
assert variance in ['estimate','constant']
if variance=='estimate':
bs = x.shape[0]
res = torch.cat([mean_reduction(model(x),y) for i in range(n_samples)])
mu = res[:,0]
mask = (res[:,1]<=60).int()
std = torch.log( 1+EPS+torch.exp(res[:,1]*mask) )+res[:,1]*(1-mask)
dens = -torch.pow(mu/std,2)/2 - torch.log(std)
res = KL(model,depth)*bs/TRAIN_LENGTH-dens.sum()/n_samples
return res
if variance=='constant':
bs = x.shape[0]
res = torch.cat([mean_reduction(model(x),y) for i in range(n_samples)])
mu = res[:,0]
dens = -torch.pow(mu/log_like_std,2)/2
res = KL(model,depth)*bs/TRAIN_LENGTH-dens.sum()/n_samples
return res
def loss_vi(model, x,mu_target,var_target,n_samples = 32,variance = 'estimate'):
assert variance in ['estimate','constant']
if variance=='estimate':
bs = x.shape[0]
preds = torch.cat([model(x) for i in range(n_samples)])
mu = preds[:,0]
mask = (preds[:,1]<=60).int()
var = torch.pow(torch.log(1+EPS+torch.exp(preds[:,1]*mask))+preds[:,1]*(1-mask),2)
mu=mu.view(n_samples,bs).T.mean(dim=1)
var=var.view(n_samples,bs).T.mean(dim=1)
mu_target = mu_target.view(mu.shape)
var_target = var_target.view(var.shape)
mu_diff = mu-mu_target
var_diff = var-var_target
res = mu_diff.dot(mu_diff)+var_diff.dot(var_diff)
return res
if variance=='constant':
bs = x.shape[0]
preds = torch.cat([model(x) for i in range(n_samples)])
mu = preds[:,0]
var=mu.view(n_samples,bs).T.var(dim=1)
mu=mu.view(n_samples,bs).T.mean(dim=1)
mu_target = mu_target.view(mu.shape)
var_target = var_target.view(var.shape)
mu_diff = mu-mu_target
var_diff = var-var_target
res = mu_diff.dot(mu_diff)+var_diff.dot(var_diff)
return res
def loss_mcdo(model, x,mu_target,var_target,n_samples = 32):
bs = x.shape[0]
preds = torch.cat([model(x) for i in range(n_samples)],dim=1)
mu = preds.mean(dim=1)
var = preds.var(dim=1)
mu_target = mu_target.view(mu.shape)
var_target = var_target.view(var.shape)
mu_diff = mu-mu_target
var_diff = var-var_target
res = mu_diff@mu_diff +var_diff@var_diff
return res
def train_model(model,n_layers,optimizer,loss_func,
n_epochs,TRAIN_LENGTH,X,mu_target,
var_target=None,n_samples=32,variance = 'constant',return_losses=False):
ls = []
loss_name = loss_func.__name__
assert loss_name in ['elbo_loss','MCDO_loss','loss_vi','loss_mcdo']
for epoch in range(n_epochs):
optimizer.zero_grad()
if (loss_name=='elbo_loss'):
loss = loss_func(model,X,mu_target,n_layers,TRAIN_LENGTH,n_samples = n_samples,variance=variance)
if (loss_name=='MCDO_loss'):
loss = loss_func(model,X,mu_target, depth=n_layers,n_samples = n_samples)
if (loss_name=='loss_vi'):
assert var_target is not None
loss = loss_func(model,X,mu_target,var_target,n_samples = n_samples,variance=variance)
if (loss_name=='loss_mcdo'):
assert var_target is not None
loss = loss_func(model,X,mu_target,var_target,n_samples = n_samples)
loss.backward()
ls.append(loss.item())
optimizer.step()
if return_losses: return ls
def make_predictions(model,data,variance = 'constant',n_samples=128):
assert variance in ['constant','estimate']
bs = data.shape[0]
preds = torch.cat([model(data) for i in range(n_samples)])
mu = preds[:,0]
if variance == 'estimate':
mask = (preds[:,1]<=60).int()
std = torch.log(1+EPS+torch.exp(preds[:,1]*mask))+preds[:,1]*(1-mask)
mu = mu.view(n_samples,bs).T.mean(dim=1)
std = std.view(n_samples,bs).T.mean(dim=1)
return mu,std
std=mu.view(n_samples,bs).T.std(dim=1)
mu = mu.view(n_samples,bs).T.mean(dim=1)
return mu,std
def plot_res(target,std,mu,model_name,
x=None,kind ='mean',x_coords = None,y_coords = None):
assert kind in ['mean','var','2d']
if x is None: x = range(mu.shape[0])
if kind =='mean':
plt.figure(figsize=(12,7))
plt.plot(x,mu,label='Prediction')
plt.plot(x,target,label='Target mean')
plt.fill_between(x,y1=mu-2*std,y2=mu+2*std,alpha=0.5,label='$\mu \pm 2\sigma$ range')
plt.legend(fontsize=12)
plt.title('{} mean predictions'.format(model_name),fontsize=15)
plt.xlabel('$x$',fontsize=15)
plt.ylabel('$\mu(f(x))$',fontsize=15)
plt.grid(True)
plt.savefig('{} mean predictions'.format(model_name)+'.jpeg')
plt.show()
if kind =='var':
plt.figure(figsize=(12,7))
plt.plot(x,std**2,label = 'Prediction')
plt.plot(x,target,label = 'Target var')
plt.title('{} variance prediction'.format(model_name),fontsize=15)
plt.xlabel('$x$',fontsize=15)
plt.ylabel('$\sigma^2(f(x))$',fontsize=15)
plt.legend(fontsize=12)
plt.grid(True)
plt.savefig('{} variance prediction'.format(model_name)+'.jpeg')
plt.show()
if kind =='2d':
assert (x_coords is not None) and (y_coords is not None)
plt.figure(figsize=(8,6))
plt.contourf(target,std,mu, 100, cmap='Spectral_r')
plt.colorbar()
plt.scatter(x_coords,y_coords,color='black')
plt.title('$\sigma(f(x))$ {}'.format(model_name),fontsize=15)
plt.xlabel('$x_1$',fontsize=15)
plt.ylabel('$x_2$',fontsize=15)
plt.savefig('$\sigma(f(x))$ {}'.format(model_name)+'.jpeg')
plt.show()
| [
"torch.nn.ReLU",
"torch.nn.Dropout",
"numpy.sqrt",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"torch.nn.Sequential",
"torch.exp",
"matplotlib.pyplot.fill_between",
"torch.pow",
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"torch.nn.init.zeros... | [((597, 623), 'torch.nn.Linear', 'nn.Linear', (['in_dim', 'hid_dim'], {}), '(in_dim, hid_dim)\n', (606, 623), True, 'import torch.nn as nn\n'), ((628, 696), 'torch.nn.init.normal_', 'nn.init.normal_', (['self.linear_in.weight'], {'std': '(1 / (4 * hid_dim) ** 0.5)'}), '(self.linear_in.weight, std=1 / (4 * hid_dim) ** 0.5)\n', (643, 696), True, 'import torch.nn as nn\n'), ((697, 732), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['self.linear_in.bias'], {}), '(self.linear_in.bias)\n', (711, 732), True, 'import torch.nn as nn\n'), ((760, 773), 'torch.nn.Dropout', 'nn.Dropout', (['p'], {}), '(p)\n', (770, 773), True, 'import torch.nn as nn\n'), ((1259, 1286), 'torch.nn.Linear', 'nn.Linear', (['hid_dim', 'out_dim'], {}), '(hid_dim, out_dim)\n', (1268, 1286), True, 'import torch.nn as nn\n'), ((1291, 1360), 'torch.nn.init.normal_', 'nn.init.normal_', (['self.linear_out.weight'], {'std': '(1 / (4 * out_dim) ** 0.5)'}), '(self.linear_out.weight, std=1 / (4 * out_dim) ** 0.5)\n', (1306, 1360), True, 'import torch.nn as nn\n'), ((1361, 1397), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['self.linear_out.bias'], {}), '(self.linear_out.bias)\n', (1375, 1397), True, 'import torch.nn as nn\n'), ((1701, 1711), 'numpy.sqrt', 'np.sqrt', (['H'], {}), '(H)\n', (1708, 1711), True, 'import numpy as np\n'), ((3157, 3173), 'torch.exp', 'torch.exp', (['rho_W'], {}), '(rho_W)\n', (3166, 3173), False, 'import torch\n'), ((3258, 3274), 'torch.exp', 'torch.exp', (['rho_b'], {}), '(rho_b)\n', (3267, 3274), False, 'import torch\n'), ((3547, 3574), 'torch.ones', 'torch.ones', (['in_dim', 'out_dim'], {}), '(in_dim, out_dim)\n', (3557, 3574), False, 'import torch\n'), ((9957, 9984), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (9967, 9984), True, 'from matplotlib import pyplot as plt\n'), ((9989, 10024), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'mu'], {'label': '"""Prediction"""'}), "(x, mu, label='Prediction')\n", (9997, 10024), True, 'from matplotlib import pyplot as plt\n'), ((10028, 10068), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'target'], {'label': '"""Target mean"""'}), "(x, target, label='Target mean')\n", (10036, 10068), True, 'from matplotlib import pyplot as plt\n'), ((10072, 10177), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x'], {'y1': '(mu - 2 * std)', 'y2': '(mu + 2 * std)', 'alpha': '(0.5)', 'label': '"""$\\\\mu \\\\pm 2\\\\sigma$ range"""'}), "(x, y1=mu - 2 * std, y2=mu + 2 * std, alpha=0.5, label=\n '$\\\\mu \\\\pm 2\\\\sigma$ range')\n", (10088, 10177), True, 'from matplotlib import pyplot as plt\n'), ((10163, 10186), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (10173, 10186), True, 'from matplotlib import pyplot as plt\n'), ((10261, 10291), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$"""'], {'fontsize': '(15)'}), "('$x$', fontsize=15)\n", (10271, 10291), True, 'from matplotlib import pyplot as plt\n'), ((10296, 10335), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\mu(f(x))$"""'], {'fontsize': '(15)'}), "('$\\\\mu(f(x))$', fontsize=15)\n", (10306, 10335), True, 'from matplotlib import pyplot as plt\n'), ((10339, 10353), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (10347, 10353), True, 'from matplotlib import pyplot as plt\n'), ((10426, 10436), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10434, 10436), True, 'from matplotlib import pyplot as plt\n'), ((10462, 10489), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (10472, 10489), True, 'from matplotlib import pyplot as plt\n'), ((10494, 10535), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(std ** 2)'], {'label': '"""Prediction"""'}), "(x, std ** 2, label='Prediction')\n", (10502, 10535), True, 'from matplotlib import pyplot as plt\n'), ((10539, 10578), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'target'], {'label': '"""Target var"""'}), "(x, target, label='Target var')\n", (10547, 10578), True, 'from matplotlib import pyplot as plt\n'), ((10656, 10686), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$"""'], {'fontsize': '(15)'}), "('$x$', fontsize=15)\n", (10666, 10686), True, 'from matplotlib import pyplot as plt\n'), ((10691, 10735), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma^2(f(x))$"""'], {'fontsize': '(15)'}), "('$\\\\sigma^2(f(x))$', fontsize=15)\n", (10701, 10735), True, 'from matplotlib import pyplot as plt\n'), ((10739, 10762), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (10749, 10762), True, 'from matplotlib import pyplot as plt\n'), ((10768, 10782), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (10776, 10782), True, 'from matplotlib import pyplot as plt\n'), ((10858, 10868), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10866, 10868), True, 'from matplotlib import pyplot as plt\n'), ((10955, 10981), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (10965, 10981), True, 'from matplotlib import pyplot as plt\n'), ((10986, 11039), 'matplotlib.pyplot.contourf', 'plt.contourf', (['target', 'std', 'mu', '(100)'], {'cmap': '"""Spectral_r"""'}), "(target, std, mu, 100, cmap='Spectral_r')\n", (10998, 11039), True, 'from matplotlib import pyplot as plt\n'), ((11043, 11057), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (11055, 11057), True, 'from matplotlib import pyplot as plt\n'), ((11063, 11109), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_coords', 'y_coords'], {'color': '"""black"""'}), "(x_coords, y_coords, color='black')\n", (11074, 11109), True, 'from matplotlib import pyplot as plt\n'), ((11180, 11212), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x_1$"""'], {'fontsize': '(15)'}), "('$x_1$', fontsize=15)\n", (11190, 11212), True, 'from matplotlib import pyplot as plt\n'), ((11217, 11249), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$x_2$"""'], {'fontsize': '(15)'}), "('$x_2$', fontsize=15)\n", (11227, 11249), True, 'from matplotlib import pyplot as plt\n'), ((11319, 11329), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11327, 11329), True, 'from matplotlib import pyplot as plt\n'), ((1211, 1233), 'torch.nn.Sequential', 'nn.Sequential', (['*models'], {}), '(*models)\n', (1224, 1233), True, 'import torch.nn as nn\n'), ((2048, 2070), 'torch.pow', 'torch.pow', (['(pred - y)', '(2)'], {}), '(pred - y, 2)\n', (2057, 2070), False, 'import torch\n'), ((2337, 2359), 'torch.zeros_like', 'torch.zeros_like', (['mu_W'], {}), '(mu_W)\n', (2353, 2359), False, 'import torch\n'), ((2361, 2383), 'torch.ones_like', 'torch.ones_like', (['rho_W'], {}), '(rho_W)\n', (2376, 2383), False, 'import torch\n'), ((2413, 2435), 'torch.zeros_like', 'torch.zeros_like', (['mu_b'], {}), '(mu_b)\n', (2429, 2435), False, 'import torch\n'), ((2437, 2459), 'torch.ones_like', 'torch.ones_like', (['rho_b'], {}), '(rho_b)\n', (2452, 2459), False, 'import torch\n'), ((2671, 2689), 'torch.mm', 'torch.mm', (['input', 'W'], {}), '(input, W)\n', (2679, 2689), False, 'import torch\n'), ((3215, 3244), 'torch.pow', 'torch.pow', (['(1 + EPS + ex_W)', '(-1)'], {}), '(1 + EPS + ex_W, -1)\n', (3224, 3244), False, 'import torch\n'), ((3316, 3345), 'torch.pow', 'torch.pow', (['(1 + EPS + ex_b)', '(-1)'], {}), '(1 + EPS + ex_b, -1)\n', (3325, 3345), False, 'import torch\n'), ((3703, 3723), 'torch.zeros', 'torch.zeros', (['out_dim'], {}), '(out_dim)\n', (3714, 3723), False, 'import torch\n'), ((4629, 4651), 'torch.nn.Sequential', 'nn.Sequential', (['*models'], {}), '(*models)\n', (4642, 4651), True, 'import torch.nn as nn\n'), ((6031, 6045), 'torch.log', 'torch.log', (['std'], {}), '(std)\n', (6040, 6045), False, 'import torch\n'), ((899, 926), 'torch.nn.Linear', 'nn.Linear', (['hid_dim', 'hid_dim'], {}), '(hid_dim, hid_dim)\n', (908, 926), True, 'import torch.nn as nn\n'), ((935, 998), 'torch.nn.init.normal_', 'nn.init.normal_', (['models[i].weight'], {'std': '(1 / (4 * hid_dim) ** 0.5)'}), '(models[i].weight, std=1 / (4 * hid_dim) ** 0.5)\n', (950, 998), True, 'import torch.nn as nn\n'), ((1003, 1033), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['models[i].bias'], {}), '(models[i].bias)\n', (1017, 1033), True, 'import torch.nn as nn\n'), ((1095, 1104), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1102, 1104), True, 'import torch.nn as nn\n'), ((1164, 1177), 'torch.nn.Dropout', 'nn.Dropout', (['p'], {}), '(p)\n', (1174, 1177), True, 'import torch.nn as nn\n'), ((3747, 3765), 'numpy.exp', 'np.exp', (['(10 ** -2.5)'], {}), '(10 ** -2.5)\n', (3753, 3765), True, 'import numpy as np\n'), ((3808, 3835), 'torch.ones', 'torch.ones', (['in_dim', 'out_dim'], {}), '(in_dim, out_dim)\n', (3818, 3835), False, 'import torch\n'), ((3864, 3882), 'numpy.exp', 'np.exp', (['(10 ** -2.5)'], {}), '(10 ** -2.5)\n', (3870, 3882), True, 'import numpy as np\n'), ((3925, 3944), 'torch.ones', 'torch.ones', (['out_dim'], {}), '(out_dim)\n', (3935, 3944), False, 'import torch\n'), ((4594, 4603), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4601, 4603), True, 'import torch.nn as nn\n'), ((6297, 6328), 'torch.pow', 'torch.pow', (['(mu / log_like_std)', '(2)'], {}), '(mu / log_like_std, 2)\n', (6306, 6328), False, 'import torch\n'), ((1894, 1912), 'torch.norm', 'torch.norm', (['W'], {'p': '(2)'}), '(W, p=2)\n', (1904, 1912), False, 'import torch\n'), ((1925, 1943), 'torch.norm', 'torch.norm', (['b'], {'p': '(2)'}), '(b, p=2)\n', (1935, 1943), False, 'import torch\n'), ((3643, 3663), 'numpy.sqrt', 'np.sqrt', (['(4 * out_dim)'], {}), '(4 * out_dim)\n', (3650, 3663), True, 'import numpy as np\n'), ((5153, 5176), 'torch.exp', 'torch.exp', (['module.rho_W'], {}), '(module.rho_W)\n', (5162, 5176), False, 'import torch\n'), ((5211, 5234), 'torch.exp', 'torch.exp', (['module.rho_b'], {}), '(module.rho_b)\n', (5220, 5234), False, 'import torch\n'), ((5947, 5974), 'torch.exp', 'torch.exp', (['(res[:, 1] * mask)'], {}), '(res[:, 1] * mask)\n', (5956, 5974), False, 'import torch\n'), ((6007, 6029), 'torch.pow', 'torch.pow', (['(mu / std)', '(2)'], {}), '(mu / std, 2)\n', (6016, 6029), False, 'import torch\n'), ((9467, 9496), 'torch.exp', 'torch.exp', (['(preds[:, 1] * mask)'], {}), '(preds[:, 1] * mask)\n', (9476, 9496), False, 'import torch\n'), ((2501, 2517), 'torch.exp', 'torch.exp', (['rho_W'], {}), '(rho_W)\n', (2510, 2517), False, 'import torch\n'), ((2633, 2649), 'torch.exp', 'torch.exp', (['rho_b'], {}), '(rho_b)\n', (2642, 2649), False, 'import torch\n'), ((6788, 6817), 'torch.exp', 'torch.exp', (['(preds[:, 1] * mask)'], {}), '(preds[:, 1] * mask)\n', (6797, 6817), False, 'import torch\n'), ((5255, 5291), 'torch.pow', 'torch.pow', (['(sigma_W / sigma_W_init)', '(2)'], {}), '(sigma_W / sigma_W_init, 2)\n', (5264, 5291), False, 'import torch\n'), ((5289, 5322), 'torch.pow', 'torch.pow', (['(mu_W / sigma_W_init)', '(2)'], {}), '(mu_W / sigma_W_init, 2)\n', (5298, 5322), False, 'import torch\n'), ((5392, 5428), 'torch.pow', 'torch.pow', (['(sigma_b / sigma_b_init)', '(2)'], {}), '(sigma_b / sigma_b_init, 2)\n', (5401, 5428), False, 'import torch\n'), ((5426, 5459), 'torch.pow', 'torch.pow', (['(mu_b / sigma_b_init)', '(2)'], {}), '(mu_b / sigma_b_init, 2)\n', (5435, 5459), False, 'import torch\n'), ((5332, 5354), 'torch.pow', 'torch.pow', (['sigma_W', '(-1)'], {}), '(sigma_W, -1)\n', (5341, 5354), False, 'import torch\n'), ((5469, 5491), 'torch.pow', 'torch.pow', (['sigma_b', '(-1)'], {}), '(sigma_b, -1)\n', (5478, 5491), False, 'import torch\n')] |
class World(object):
pass
class Field(object):
pass
from typing import Union
import numpy as np
import random
import math
from tocenv.env import TOCEnv
import tocenv.components.item as items
import tocenv.components.agent as agent
import tocenv.components.skill as skills
import tocenv.components.block as block
from tocenv.components.position import Position
from tocenv.components.block import BlockType
from tocenv.components.agent import Color
from tocenv.components.util.weighted_random import get_weighted_position
class Field(object):
def __init__(self, world: World, p1: Position, p2: Position):
self.world = world
p1_x = p1.x if p1.x < p2.x else p2.x
p1_y = p1.y if p1.y < p2.y else p2.y
p2_x = p2.x if p1.x < p2.x else p1.x
p2_y = p2.y if p1.y < p2.y else p1.y
self.p1 = Position(x=p1_x, y=p1_y)
self.p2 = Position(x=p2_x, y=p2_y)
@property
def area(self):
return (self.p2.x - self.p1.x + 1) * (self.p2.y - self.p1.y + 1)
@property
def positions(self):
positions = []
for y in range(self.p1.y, self.p2.y + 1):
for x in range(self.p1.x, self.p2.x + 1):
positions.append(Position(x=x, y=y))
return positions
@property
def include(self, pos: Position):
return (self.p1.x <= pos.x <= self.p2.x) and \
(self.p1.y <= pos.y <= self.p2.y)
@property
def center(self):
center_x = (self.p1.x + self.p2.x) // 2
center_y = (self.p1.y + self.p2.y) // 2
return Position(x=center_x, y=center_y)
def is_overlap(self, field: Field):
if self.p2.y < field.p1.y or self.p1.y > field.p2.y:
return False
if self.p2.x < field.p1.x or self.p1.x > field.p2.x:
return False
return True
@property
def is_alive(self) -> bool:
for pos in self.positions:
if self.world.get_item(pos) is not None:
return True
return False
@staticmethod
def create_from_parameter(world: World, pos: Position, radius: int):
p1 = Position(pos.x - radius, pos.y - radius)
p2 = Position(pos.x + radius, pos.y + radius)
return Field(world=world, p1=p1, p2=p2)
def tick(self):
self.generate_item()
def force_spawn_item(self, ratio=0.5):
positions = self.positions
num_samples = max(math.ceil(len(positions) * ratio), 1)
sampled_position = random.sample(positions, num_samples)
for pos in sampled_position:
self.world.spawn_item(items.Apple(), Position(x=pos.x, y=pos.y))
def generate_item(self, prob=0.5 ** 4):
for y in range(self.p1.y, self.p2.y + 1):
for x in range(self.p1.x, self.p2.x + 1):
surrounded_positions = self.world.get_surrounded_positions(pos=Position(x=x, y=y), radius=3)
surrounded_items = self.world.get_surrounded_items(pos=Position(x=x, y=y), radius=3)
apple_ratio = len(surrounded_items) / len(surrounded_positions) * prob
if random.random() < apple_ratio:
self.world.spawn_item(items.Apple(), Position(x=x, y=y))
class VariousAppleField(Field):
def __init__(self, world: World, p1: Position, p2: Position, prob: float, ratio: float):
super(VariousAppleField, self).__init__(world=world, p1=p1, p2=p2)
'''
ratio: Apple re-spawn ratio for 'BlueApple' and 'RedApple' (Set for BlueApple)
'''
self.prob = prob
self.ratio = ratio
def tick(self):
self.generate_item(prob=self.prob)
def generate_item(self, prob=0.025):
empty_positions = self._get_empty_positions()
agent_positions = [iter_agent.position for iter_agent in self.world.agents]
apples = [items.BlueApple, items.RedApple]
spawned_apples = random.choices(apples, weights=(
self.world.env.apple_color_ratio, 1 - self.world.env.apple_color_ratio), k=len(empty_positions))
# for pos, item in zip(empty_positions, spawned_apples):
# surrounded_positions = self.world.get_surrounded_positions(pos=pos, radius=3)
# surrounded_items = self.world.get_surrounded_items(pos=pos, radius=3)
# apple_ratio = len(surroundedt_items) / len(surrounded_positions) * prob
# if random.random() < apple_ratio:
# self.world.spawn_item(item(), pos
def force_spawn_item(self, ratio=0.5):
positions = self.positions
num_samples = max(math.ceil(len(positions) * ratio), 1)
sampled_position = random.sample(positions, num_samples)
apples = [items.BlueApple, items.RedApple]
spawned_apples = random.choices(apples, weights=(
self.world.env.apple_color_ratio, 1 - self.world.env.apple_color_ratio), k=len(sampled_position))
for pos, item in zip(sampled_position, spawned_apples):
if random.random() < ratio:
self.world.spawn_item(item(), pos)
def _get_empty_positions(self) -> [Position]:
positions = []
for y in range(self.p1.y, self.p2.y + 1):
for x in range(self.p1.x, self.p2.x + 1):
pos = Position(x, y)
if self.world.get_item(pos) is None and \
self.world.get_agent(pos) is None:
positions.append(pos)
return positions
@staticmethod
def create_from_parameter(world: World, pos: Position, radius: int, prob: float, ratio: float):
p1 = Position(pos.x - radius, pos.y - radius)
p2 = Position(pos.x + radius, pos.y + radius)
return Field(world=world, p1=p1, p2=p2)
class World(object):
def __init__(self,
env: TOCEnv,
size: tuple,
apple_color_ratio: float,
apple_spawn_ratio: float,
patch_count: int,
patch_distance: int):
self.env = env
self.size = size
self.agents = []
self.grid = None
self.effects = None
self._build_grid()
self.on_changed_callbacks = []
self.fruits_fields = []
self.patch_count = patch_count
self.patch_distance = patch_distance
self.apple_color_ratio = apple_color_ratio
self.apple_spawn_ratio = apple_spawn_ratio
self._create_random_field()
self.durating_effects = list()
self.clear_effect()
def _build_grid(self):
self.grid = np.empty(shape=self.size, dtype=object)
def _create_random_field(self):
self.add_fruits_field(VariousAppleField.create_from_parameter(world=self, pos=Position(4, 4), radius=1,
prob=self.apple_spawn_ratio,
ratio=self.apple_color_ratio))
self.add_fruits_field(VariousAppleField.create_from_parameter(world=self, pos=Position(11, 4), radius=1,
prob=self.apple_spawn_ratio,
ratio=self.apple_color_ratio))
self.add_fruits_field(VariousAppleField.create_from_parameter(world=self, pos=Position(4, 11), radius=1,
prob=self.apple_spawn_ratio,
ratio=self.apple_color_ratio))
self.add_fruits_field(VariousAppleField.create_from_parameter(world=self, pos=Position(11, 11), radius=1,
prob=self.apple_spawn_ratio,
ratio=self.apple_color_ratio))
def spawn_agent(self, pos: Position, color: Color):
if color == Color.Purple:
spawned = agent.PurpleAgent(world=self, pos=pos)
elif color == Color.Green:
spawned = agent.GreenAgent(world=self, pos=pos)
elif color == Color.Blue:
spawned = agent.BlueAgent(world=self, pos=pos)
elif color == Color.Orange:
spawned = agent.OrangeAgent(world=self, pos=pos)
self.agents.append(spawned)
return spawned
def spawn_block(self, pos: Position):
spawned = block.Block(world=self)
self.grid[pos.y][pos.x] = block
return spawned
def spawn_item(self, item: items.Item, pos: Position) -> bool:
if not self.map_contains(pos): return False
if self.grid[pos.y][pos.x] is None:
self.grid[pos.y][pos.x] = item
return True
else:
return False
def add_fruits_field(self, field: Field):
self.fruits_fields.append(field)
field.force_spawn_item()
def get_agents(self) -> []:
return self.agents
def map_contains(self, pos: Position) -> bool:
return 0 <= pos.x < self.width and 0 <= pos.y < self.height
def contains_field(self, field: Field) -> bool:
return self.map_contains(field.p1) and self.map_contains(field.p2)
def collapsed_field_exist(self, field: Field) -> bool:
for iter_field in self.fruits_fields:
if field.is_overlap(iter_field): return True
return False
def get_item(self, pos: Position) -> Union[items.Item, None]:
return self.grid[pos.y][pos.x]
def get_agent(self, pos: Position) -> Union[agent.Agent, None]:
for iter_agent in self.agents:
if iter_agent.position == pos:
return iter_agent
return None
def remove_item(self, pos: Position) -> bool:
if self.get_item(pos):
self.grid[pos.y][pos.x] = None
return True
else:
return False
def correct_item(self, pos: Position) -> Union[items.Item, None]:
item = self.get_item(pos)
if item:
self.remove_item(pos)
return item
else:
return None
def get_surrounded_positions(self, pos: Position, radius: int) -> [Position]:
positions = pos.get_surrounded(radius=radius)
surr_positions = []
for position in positions:
if self.map_contains(position):
surr_positions.append(position)
return surr_positions
def get_surrounded_items(self, pos: Position, radius: int) -> [items.Item]:
positions = self.get_surrounded_positions(pos=pos, radius=radius)
items = []
for position in positions:
item = self.get_item(pos=position)
if item is not None:
items.append(item)
return items
def apply_effect(self, pos: Position, effect: skills.Skill) -> bool:
if self.map_contains(pos=pos):
if isinstance(effect, skills.Punish):
for iter_agents in self.agents:
if iter_agents.position == pos:
iter_agents.on_punished(effect.damage)
self.effects[pos.y][pos.x] = np.bitwise_or(int(self.effects[pos.y][pos.x]), BlockType.Punish)
if effect.effect_duration > 1:
self.durating_effects.append((pos, effect))
return True
else:
return False
def clear_effect(self):
self.effects = np.zeros(shape=self.size, dtype=np.uint64)
def tick(self):
[field.tick() for field in self.fruits_fields]
def get_alive_patches(self) -> [Field]:
fields = self.fruits_fields
alive_fields = list()
for field in fields:
if field.is_alive:
alive_fields.append(field)
return alive_fields
@property
def width(self) -> int:
return self.size[1]
@property
def height(self) -> int:
return self.size[0]
from tocenv.components.algorithm.BFS import BFS
| [
"random.sample",
"tocenv.components.agent.PurpleAgent",
"tocenv.components.item.append",
"tocenv.components.item.Apple",
"tocenv.components.block.Block",
"tocenv.components.agent.BlueAgent",
"tocenv.components.position.Position",
"numpy.zeros",
"tocenv.components.agent.OrangeAgent",
"numpy.empty",... | [((854, 878), 'tocenv.components.position.Position', 'Position', ([], {'x': 'p1_x', 'y': 'p1_y'}), '(x=p1_x, y=p1_y)\n', (862, 878), False, 'from tocenv.components.position import Position\n'), ((897, 921), 'tocenv.components.position.Position', 'Position', ([], {'x': 'p2_x', 'y': 'p2_y'}), '(x=p2_x, y=p2_y)\n', (905, 921), False, 'from tocenv.components.position import Position\n'), ((1580, 1612), 'tocenv.components.position.Position', 'Position', ([], {'x': 'center_x', 'y': 'center_y'}), '(x=center_x, y=center_y)\n', (1588, 1612), False, 'from tocenv.components.position import Position\n'), ((2138, 2178), 'tocenv.components.position.Position', 'Position', (['(pos.x - radius)', '(pos.y - radius)'], {}), '(pos.x - radius, pos.y - radius)\n', (2146, 2178), False, 'from tocenv.components.position import Position\n'), ((2192, 2232), 'tocenv.components.position.Position', 'Position', (['(pos.x + radius)', '(pos.y + radius)'], {}), '(pos.x + radius, pos.y + radius)\n', (2200, 2232), False, 'from tocenv.components.position import Position\n'), ((2502, 2539), 'random.sample', 'random.sample', (['positions', 'num_samples'], {}), '(positions, num_samples)\n', (2515, 2539), False, 'import random\n'), ((4653, 4690), 'random.sample', 'random.sample', (['positions', 'num_samples'], {}), '(positions, num_samples)\n', (4666, 4690), False, 'import random\n'), ((5595, 5635), 'tocenv.components.position.Position', 'Position', (['(pos.x - radius)', '(pos.y - radius)'], {}), '(pos.x - radius, pos.y - radius)\n', (5603, 5635), False, 'from tocenv.components.position import Position\n'), ((5649, 5689), 'tocenv.components.position.Position', 'Position', (['(pos.x + radius)', '(pos.y + radius)'], {}), '(pos.x + radius, pos.y + radius)\n', (5657, 5689), False, 'from tocenv.components.position import Position\n'), ((6572, 6611), 'numpy.empty', 'np.empty', ([], {'shape': 'self.size', 'dtype': 'object'}), '(shape=self.size, dtype=object)\n', (6580, 6611), True, 'import numpy as np\n'), ((8463, 8486), 'tocenv.components.block.Block', 'block.Block', ([], {'world': 'self'}), '(world=self)\n', (8474, 8486), True, 'import tocenv.components.block as block\n'), ((11486, 11528), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.size', 'dtype': 'np.uint64'}), '(shape=self.size, dtype=np.uint64)\n', (11494, 11528), True, 'import numpy as np\n'), ((8017, 8055), 'tocenv.components.agent.PurpleAgent', 'agent.PurpleAgent', ([], {'world': 'self', 'pos': 'pos'}), '(world=self, pos=pos)\n', (8034, 8055), True, 'import tocenv.components.agent as agent\n'), ((2611, 2624), 'tocenv.components.item.Apple', 'items.Apple', ([], {}), '()\n', (2622, 2624), True, 'import tocenv.components.item as items\n'), ((2626, 2652), 'tocenv.components.position.Position', 'Position', ([], {'x': 'pos.x', 'y': 'pos.y'}), '(x=pos.x, y=pos.y)\n', (2634, 2652), False, 'from tocenv.components.position import Position\n'), ((4987, 5002), 'random.random', 'random.random', ([], {}), '()\n', (5000, 5002), False, 'import random\n'), ((5263, 5277), 'tocenv.components.position.Position', 'Position', (['x', 'y'], {}), '(x, y)\n', (5271, 5277), False, 'from tocenv.components.position import Position\n'), ((8113, 8150), 'tocenv.components.agent.GreenAgent', 'agent.GreenAgent', ([], {'world': 'self', 'pos': 'pos'}), '(world=self, pos=pos)\n', (8129, 8150), True, 'import tocenv.components.agent as agent\n'), ((10781, 10799), 'tocenv.components.item.append', 'items.append', (['item'], {}), '(item)\n', (10793, 10799), True, 'import tocenv.components.item as items\n'), ((1230, 1248), 'tocenv.components.position.Position', 'Position', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (1238, 1248), False, 'from tocenv.components.position import Position\n'), ((3121, 3136), 'random.random', 'random.random', ([], {}), '()\n', (3134, 3136), False, 'import random\n'), ((6735, 6749), 'tocenv.components.position.Position', 'Position', (['(4)', '(4)'], {}), '(4, 4)\n', (6743, 6749), False, 'from tocenv.components.position import Position\n'), ((7048, 7063), 'tocenv.components.position.Position', 'Position', (['(11)', '(4)'], {}), '(11, 4)\n', (7056, 7063), False, 'from tocenv.components.position import Position\n'), ((7362, 7377), 'tocenv.components.position.Position', 'Position', (['(4)', '(11)'], {}), '(4, 11)\n', (7370, 7377), False, 'from tocenv.components.position import Position\n'), ((7676, 7692), 'tocenv.components.position.Position', 'Position', (['(11)', '(11)'], {}), '(11, 11)\n', (7684, 7692), False, 'from tocenv.components.position import Position\n'), ((8207, 8243), 'tocenv.components.agent.BlueAgent', 'agent.BlueAgent', ([], {'world': 'self', 'pos': 'pos'}), '(world=self, pos=pos)\n', (8222, 8243), True, 'import tocenv.components.agent as agent\n'), ((2883, 2901), 'tocenv.components.position.Position', 'Position', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (2891, 2901), False, 'from tocenv.components.position import Position\n'), ((2984, 3002), 'tocenv.components.position.Position', 'Position', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (2992, 3002), False, 'from tocenv.components.position import Position\n'), ((3194, 3207), 'tocenv.components.item.Apple', 'items.Apple', ([], {}), '()\n', (3205, 3207), True, 'import tocenv.components.item as items\n'), ((3209, 3227), 'tocenv.components.position.Position', 'Position', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (3217, 3227), False, 'from tocenv.components.position import Position\n'), ((8302, 8340), 'tocenv.components.agent.OrangeAgent', 'agent.OrangeAgent', ([], {'world': 'self', 'pos': 'pos'}), '(world=self, pos=pos)\n', (8319, 8340), True, 'import tocenv.components.agent as agent\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for nussl/utils.py
"""
import unittest
import nussl
import numpy as np
from scipy import signal
class TestUtils(unittest.TestCase):
"""
"""
def test_find_peak_indices(self):
array = np.arange(0, 100)
peak = nussl.find_peak_indices(array, 1)[0]
assert peak == 99
array = np.arange(0, 100).reshape(10, 10)
peak = nussl.find_peak_indices(array, 3, min_dist=0)
assert peak == [[9, 9], [9, 8], [9, 7]]
def test_find_peak_values(self):
array = np.arange(0, 100)
peak = nussl.find_peak_values(array, 1)[0]
assert peak == 99
array = np.arange(0, 100).reshape(10, 10)
peak = nussl.find_peak_values(array, 3, min_dist=0)
assert peak == [99, 98, 97]
def test_add_mismatched_arrays(self):
long_array = np.ones((20,))
short_array = np.arange(10)
expected_result = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=float)
# Test basic cases
result = nussl.add_mismatched_arrays(long_array, short_array)
assert all(np.equal(result, expected_result))
result = nussl.add_mismatched_arrays(short_array, long_array)
assert all(np.equal(result, expected_result))
expected_result = expected_result[:len(short_array)]
result = nussl.add_mismatched_arrays(long_array, short_array, truncate=True)
assert all(np.equal(result, expected_result))
result = nussl.add_mismatched_arrays(short_array, long_array, truncate=True)
assert all(np.equal(result, expected_result))
# Test complex casting
short_array = np.arange(10, dtype=complex)
expected_result = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=complex)
result = nussl.add_mismatched_arrays(long_array, short_array)
assert all(np.equal(result, expected_result))
result = nussl.add_mismatched_arrays(short_array, long_array)
assert all(np.equal(result, expected_result))
expected_result = expected_result[:len(short_array)]
result = nussl.add_mismatched_arrays(long_array, short_array, truncate=True)
assert all(np.equal(result, expected_result))
result = nussl.add_mismatched_arrays(short_array, long_array, truncate=True)
assert all(np.equal(result, expected_result))
# Test case where arrays are equal length
short_array = np.ones((15,))
expected_result = short_array * 2
result = nussl.add_mismatched_arrays(short_array, short_array)
assert all(np.equal(result, expected_result))
result = nussl.add_mismatched_arrays(short_array, short_array, truncate=True)
assert all(np.equal(result, expected_result))
| [
"numpy.ones",
"numpy.equal",
"numpy.array",
"nussl.find_peak_values",
"nussl.find_peak_indices",
"nussl.add_mismatched_arrays",
"numpy.arange"
] | [((262, 279), 'numpy.arange', 'np.arange', (['(0)', '(100)'], {}), '(0, 100)\n', (271, 279), True, 'import numpy as np\n'), ((424, 469), 'nussl.find_peak_indices', 'nussl.find_peak_indices', (['array', '(3)'], {'min_dist': '(0)'}), '(array, 3, min_dist=0)\n', (447, 469), False, 'import nussl\n'), ((572, 589), 'numpy.arange', 'np.arange', (['(0)', '(100)'], {}), '(0, 100)\n', (581, 589), True, 'import numpy as np\n'), ((733, 777), 'nussl.find_peak_values', 'nussl.find_peak_values', (['array', '(3)'], {'min_dist': '(0)'}), '(array, 3, min_dist=0)\n', (755, 777), False, 'import nussl\n'), ((878, 892), 'numpy.ones', 'np.ones', (['(20,)'], {}), '((20,))\n', (885, 892), True, 'import numpy as np\n'), ((915, 928), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (924, 928), True, 'import numpy as np\n'), ((955, 1043), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]'], {'dtype': 'float'}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n dtype=float)\n', (963, 1043), True, 'import numpy as np\n'), ((1121, 1173), 'nussl.add_mismatched_arrays', 'nussl.add_mismatched_arrays', (['long_array', 'short_array'], {}), '(long_array, short_array)\n', (1148, 1173), False, 'import nussl\n'), ((1246, 1298), 'nussl.add_mismatched_arrays', 'nussl.add_mismatched_arrays', (['short_array', 'long_array'], {}), '(short_array, long_array)\n', (1273, 1298), False, 'import nussl\n'), ((1433, 1500), 'nussl.add_mismatched_arrays', 'nussl.add_mismatched_arrays', (['long_array', 'short_array'], {'truncate': '(True)'}), '(long_array, short_array, truncate=True)\n', (1460, 1500), False, 'import nussl\n'), ((1573, 1640), 'nussl.add_mismatched_arrays', 'nussl.add_mismatched_arrays', (['short_array', 'long_array'], {'truncate': '(True)'}), '(short_array, long_array, truncate=True)\n', (1600, 1640), False, 'import nussl\n'), ((1749, 1777), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'complex'}), '(10, dtype=complex)\n', (1758, 1777), True, 'import numpy as np\n'), ((1804, 1894), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]'], {'dtype': 'complex'}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n dtype=complex)\n', (1812, 1894), True, 'import numpy as np\n'), ((1945, 1997), 'nussl.add_mismatched_arrays', 'nussl.add_mismatched_arrays', (['long_array', 'short_array'], {}), '(long_array, short_array)\n', (1972, 1997), False, 'import nussl\n'), ((2070, 2122), 'nussl.add_mismatched_arrays', 'nussl.add_mismatched_arrays', (['short_array', 'long_array'], {}), '(short_array, long_array)\n', (2097, 2122), False, 'import nussl\n'), ((2257, 2324), 'nussl.add_mismatched_arrays', 'nussl.add_mismatched_arrays', (['long_array', 'short_array'], {'truncate': '(True)'}), '(long_array, short_array, truncate=True)\n', (2284, 2324), False, 'import nussl\n'), ((2397, 2464), 'nussl.add_mismatched_arrays', 'nussl.add_mismatched_arrays', (['short_array', 'long_array'], {'truncate': '(True)'}), '(short_array, long_array, truncate=True)\n', (2424, 2464), False, 'import nussl\n'), ((2592, 2606), 'numpy.ones', 'np.ones', (['(15,)'], {}), '((15,))\n', (2599, 2606), True, 'import numpy as np\n'), ((2667, 2720), 'nussl.add_mismatched_arrays', 'nussl.add_mismatched_arrays', (['short_array', 'short_array'], {}), '(short_array, short_array)\n', (2694, 2720), False, 'import nussl\n'), ((2793, 2861), 'nussl.add_mismatched_arrays', 'nussl.add_mismatched_arrays', (['short_array', 'short_array'], {'truncate': '(True)'}), '(short_array, short_array, truncate=True)\n', (2820, 2861), False, 'import nussl\n'), ((295, 328), 'nussl.find_peak_indices', 'nussl.find_peak_indices', (['array', '(1)'], {}), '(array, 1)\n', (318, 328), False, 'import nussl\n'), ((605, 637), 'nussl.find_peak_values', 'nussl.find_peak_values', (['array', '(1)'], {}), '(array, 1)\n', (627, 637), False, 'import nussl\n'), ((1193, 1226), 'numpy.equal', 'np.equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (1201, 1226), True, 'import numpy as np\n'), ((1318, 1351), 'numpy.equal', 'np.equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (1326, 1351), True, 'import numpy as np\n'), ((1520, 1553), 'numpy.equal', 'np.equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (1528, 1553), True, 'import numpy as np\n'), ((1660, 1693), 'numpy.equal', 'np.equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (1668, 1693), True, 'import numpy as np\n'), ((2017, 2050), 'numpy.equal', 'np.equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (2025, 2050), True, 'import numpy as np\n'), ((2142, 2175), 'numpy.equal', 'np.equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (2150, 2175), True, 'import numpy as np\n'), ((2344, 2377), 'numpy.equal', 'np.equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (2352, 2377), True, 'import numpy as np\n'), ((2484, 2517), 'numpy.equal', 'np.equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (2492, 2517), True, 'import numpy as np\n'), ((2740, 2773), 'numpy.equal', 'np.equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (2748, 2773), True, 'import numpy as np\n'), ((2881, 2914), 'numpy.equal', 'np.equal', (['result', 'expected_result'], {}), '(result, expected_result)\n', (2889, 2914), True, 'import numpy as np\n'), ((375, 392), 'numpy.arange', 'np.arange', (['(0)', '(100)'], {}), '(0, 100)\n', (384, 392), True, 'import numpy as np\n'), ((684, 701), 'numpy.arange', 'np.arange', (['(0)', '(100)'], {}), '(0, 100)\n', (693, 701), True, 'import numpy as np\n')] |
# python clone of tagtime http://messymatters.com/tagtime/
# set cron job with $crontab -e
# * * * * * DISPLAY=:1 python3 /path/to/prompt.py 2> /tmp/err
# this fires once a minute
# set debugging = True first to make sure cron job fires
# check /tmp/err for problems if it doesn't fire
# or use a cron alternative as desired.
# just needs to fire every minute or so.
import pymsgbox
import datetime
import numpy
import time
debugging = False
avg_delay = 30 # minutes
time_path = "/home/zephyr/workspace/biohacking/flow/time.txt"
log_path = "/home/zephyr/workspace/biohacking/flow/log.txt"
prompt = 'What are you doing right at this moment?'
if debugging == False:
# check if we're due for a log
curr_time = int(time.time())
with open(time_path, "r") as f:
data = int(float(f.read().split("\n")[0]))
if data > curr_time:
exit()
# prompt user
response = pymsgbox.prompt(prompt)
# record response as follows:
# Sep 7 18:10 | response
ts = datetime.datetime.now().strftime("%b %d %H:%M")
with open(log_path, "a") as myfile:
myfile.write(str(ts) + " | " + response + "\n")
# select next time we should prompt user
next_delay = numpy.random.exponential(avg_delay)*60
next_time = str(curr_time + next_delay)
with open(time_path, "w") as f:
f.write(next_time)
| [
"datetime.datetime.now",
"time.time",
"numpy.random.exponential",
"pymsgbox.prompt"
] | [((877, 900), 'pymsgbox.prompt', 'pymsgbox.prompt', (['prompt'], {}), '(prompt)\n', (892, 900), False, 'import pymsgbox\n'), ((1155, 1190), 'numpy.random.exponential', 'numpy.random.exponential', (['avg_delay'], {}), '(avg_delay)\n', (1179, 1190), False, 'import numpy\n'), ((726, 737), 'time.time', 'time.time', ([], {}), '()\n', (735, 737), False, 'import time\n'), ((963, 986), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (984, 986), False, 'import datetime\n')] |
from matplotlib import pyplot as plt # Pyplot for nice graphs
import numpy as np # NumPy
from numpy import linalg as LA
from Functions import ImportSystem
from progress.bar import Bar
# Retrieve unit cell
xyz, shiftx, shifty, filename = ImportSystem(1)
repx = int(input('Repetition in x? '))
repy = int(input('Repetition in y? '))
xyztemp = xyz
for i in range(repx):
shiftarr = xyz + np.array([shiftx*(i+1), 0, 0])
xyztemp = np.append(xyz, shiftarr, axis=0)
print(xyz.shape)
xyz = xyztemp
xyztemp = xyz
for i in range(repy):
shiftarr = xyz + np.array([0, shifty*(i+1), 0])
xyztemp = np.append(xyz, shiftarr, axis=0)
print(xyz.shape)
xyz = xyztemp
xlin = np.array([[0, 0]])
ylin = np.array([[0, 0]])
zlin = np.array([[0, 0]])
# bar = Bar('Gathering connections ', max=xyz.shape[0]+xyz.shape[0])
for i in range(xyz.shape[0]):
for j in range(xyz.shape[0]):
if LA.norm(np.subtract(xyz[i], xyz[j])) < 1.6:
TmpArr = np.array([[xyz[i, 0], xyz[j, 0]]])
xlin = np.append(xlin, TmpArr, axis=0)
TmpArr = np.array([[xyz[i, 1], xyz[j, 1]]])
ylin = np.append(ylin, TmpArr, axis=0)
TmpArr = np.array([[xyz[i, 2], xyz[j, 2]]])
zlin = np.append(zlin, TmpArr, axis=0)
# bar.next()
# bar.finish()
fig = plt.figure(figsize=(15,15))
for i in range(xlin.shape[0]):
plt.plot(xlin[i], ylin[i])
plt.scatter(xyz[:, 0], xyz[:, 1], s=300)
plt.gca().set_aspect('equal', adjustable='box')
plt.ylabel('[Å]')
plt.xlabel('[Å]')
plt.show()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"Functions.ImportSystem",
"numpy.subtract",
"numpy.append",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.show"
] | [((263, 278), 'Functions.ImportSystem', 'ImportSystem', (['(1)'], {}), '(1)\n', (275, 278), False, 'from Functions import ImportSystem\n'), ((705, 723), 'numpy.array', 'np.array', (['[[0, 0]]'], {}), '([[0, 0]])\n', (713, 723), True, 'import numpy as np\n'), ((731, 749), 'numpy.array', 'np.array', (['[[0, 0]]'], {}), '([[0, 0]])\n', (739, 749), True, 'import numpy as np\n'), ((757, 775), 'numpy.array', 'np.array', (['[[0, 0]]'], {}), '([[0, 0]])\n', (765, 775), True, 'import numpy as np\n'), ((1335, 1363), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (1345, 1363), True, 'from matplotlib import pyplot as plt\n'), ((1425, 1465), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xyz[:, 0]', 'xyz[:, 1]'], {'s': '(300)'}), '(xyz[:, 0], xyz[:, 1], s=300)\n', (1436, 1465), True, 'from matplotlib import pyplot as plt\n'), ((1514, 1531), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""[Å]"""'], {}), "('[Å]')\n", (1524, 1531), True, 'from matplotlib import pyplot as plt\n'), ((1532, 1549), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""[Å]"""'], {}), "('[Å]')\n", (1542, 1549), True, 'from matplotlib import pyplot as plt\n'), ((1550, 1560), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1558, 1560), True, 'from matplotlib import pyplot as plt\n'), ((460, 492), 'numpy.append', 'np.append', (['xyz', 'shiftarr'], {'axis': '(0)'}), '(xyz, shiftarr, axis=0)\n', (469, 492), True, 'import numpy as np\n'), ((630, 662), 'numpy.append', 'np.append', (['xyz', 'shiftarr'], {'axis': '(0)'}), '(xyz, shiftarr, axis=0)\n', (639, 662), True, 'import numpy as np\n'), ((1398, 1424), 'matplotlib.pyplot.plot', 'plt.plot', (['xlin[i]', 'ylin[i]'], {}), '(xlin[i], ylin[i])\n', (1406, 1424), True, 'from matplotlib import pyplot as plt\n'), ((415, 449), 'numpy.array', 'np.array', (['[shiftx * (i + 1), 0, 0]'], {}), '([shiftx * (i + 1), 0, 0])\n', (423, 449), True, 'import numpy as np\n'), ((585, 619), 'numpy.array', 'np.array', (['[0, shifty * (i + 1), 0]'], {}), '([0, shifty * (i + 1), 0])\n', (593, 619), True, 'import numpy as np\n'), ((1466, 1475), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1473, 1475), True, 'from matplotlib import pyplot as plt\n'), ((989, 1023), 'numpy.array', 'np.array', (['[[xyz[i, 0], xyz[j, 0]]]'], {}), '([[xyz[i, 0], xyz[j, 0]]])\n', (997, 1023), True, 'import numpy as np\n'), ((1043, 1074), 'numpy.append', 'np.append', (['xlin', 'TmpArr'], {'axis': '(0)'}), '(xlin, TmpArr, axis=0)\n', (1052, 1074), True, 'import numpy as np\n'), ((1096, 1130), 'numpy.array', 'np.array', (['[[xyz[i, 1], xyz[j, 1]]]'], {}), '([[xyz[i, 1], xyz[j, 1]]])\n', (1104, 1130), True, 'import numpy as np\n'), ((1150, 1181), 'numpy.append', 'np.append', (['ylin', 'TmpArr'], {'axis': '(0)'}), '(ylin, TmpArr, axis=0)\n', (1159, 1181), True, 'import numpy as np\n'), ((1203, 1237), 'numpy.array', 'np.array', (['[[xyz[i, 2], xyz[j, 2]]]'], {}), '([[xyz[i, 2], xyz[j, 2]]])\n', (1211, 1237), True, 'import numpy as np\n'), ((1257, 1288), 'numpy.append', 'np.append', (['zlin', 'TmpArr'], {'axis': '(0)'}), '(zlin, TmpArr, axis=0)\n', (1266, 1288), True, 'import numpy as np\n'), ((932, 959), 'numpy.subtract', 'np.subtract', (['xyz[i]', 'xyz[j]'], {}), '(xyz[i], xyz[j])\n', (943, 959), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from visdom import Visdom
import numpy as np
import math
import os.path
import getpass
from sys import platform as _platform
from six.moves import urllib
viz = Visdom(port=8098,env='main')
assert viz.check_connection()
viz.close()
win = viz.line(
X = np.array([0,1]),
Y = np.array([0,1]),
opts = dict(
# xtickmin = -2,
# xtickmax = 2,
# xtickstep = 1,
# ytickmin = -3,
# ytickmax = 5,
# ytickstep = 1,
markersysmbol = 'dot',
markersize = 5,
showlegend = False,
),
name = '1'
)
viz.line(
X = np.array([0,1]),
Y = np.array([1,2]),
opts = dict(markercolor = np.array([50]),markersysmbol = 'dot',),
win = win,
update = 'new',
name = '2',
)
for i in range(10000):
viz.line(
X = np.array([i]),
Y = np.array([i * 2]),
win = win,
name = '1',
update='append'
)
viz.line(
X = np.array([i]),
Y = np.array([i*10]),
win = win,
name = '2',
update='append'
)
| [
"numpy.array",
"visdom.Visdom"
] | [((324, 353), 'visdom.Visdom', 'Visdom', ([], {'port': '(8098)', 'env': '"""main"""'}), "(port=8098, env='main')\n", (330, 353), False, 'from visdom import Visdom\n'), ((426, 442), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (434, 442), True, 'import numpy as np\n'), ((449, 465), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (457, 465), True, 'import numpy as np\n'), ((708, 724), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (716, 724), True, 'import numpy as np\n'), ((731, 747), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (739, 747), True, 'import numpy as np\n'), ((908, 921), 'numpy.array', 'np.array', (['[i]'], {}), '([i])\n', (916, 921), True, 'import numpy as np\n'), ((930, 947), 'numpy.array', 'np.array', (['[i * 2]'], {}), '([i * 2])\n', (938, 947), True, 'import numpy as np\n'), ((1021, 1034), 'numpy.array', 'np.array', (['[i]'], {}), '([i])\n', (1029, 1034), True, 'import numpy as np\n'), ((1043, 1061), 'numpy.array', 'np.array', (['[i * 10]'], {}), '([i * 10])\n', (1051, 1061), True, 'import numpy as np\n'), ((776, 790), 'numpy.array', 'np.array', (['[50]'], {}), '([50])\n', (784, 790), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
from torch.utils.data import Dataset
import numpy as np
RICO_LABELS_LOWER = [
'text',
'image',
'icon',
'list item',
'text button',
'toolbar',
'web view',
'input',
'card',
'advertisement',
'background image',
'drawer',
'radio button',
'checkbox',
'multi-tab',
'pager indicator',
'modal',
'on/off switch',
'slider',
'map view',
'button bar',
'video',
'bottom navigation',
'number stepper',
'date picker'
]
class LayoutDataset(Dataset):
def __init__(self, cfg, dataset, real=False):
self.dataset = dataset
self.max_ele_num = cfg.max_ele_num
self.max_label_num = cfg.max_label_num
self.real = real
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
fn = self.dataset[idx]["fn"]
if self.real:
label = self.dataset[idx]["label"]
for i in range(len(label)):
if label[i]>self.max_label_num:
label[i] = self.max_label_num
if label[i]<1:
label[i] = 1
box = self.dataset[idx]["box"]
else:
label = self.dataset[idx]["pred_label"]
for i in range(len(label)):
if label[i]>self.max_label_num:
label[i] = self.max_label_num
if label[i]<1:
label[i] = 1
box = self.dataset[idx]["pred_box"]
pad_label, pad_box, label_mask = self.padding_data(label,box)
sample = {'fn':fn, 'label':pad_label, 'box':pad_box, 'label_mask':label_mask}
return sample
def padding_data(self, label, box):
pad_label = np.zeros([self.max_ele_num+2], dtype='int64')
pad_box = np.zeros([self.max_ele_num+2, 4], dtype='float32')
label_mask = np.zeros([self.max_ele_num+2], dtype='int64')
for i in range(len(label)+2):
label_mask[i] = 1
# sos label is the max label index +1
# eos label is the max label index +2
pad_label[0] = self.max_label_num + 1
pad_label[1:len(label)+1] = np.array(label)
pad_label[len(label)+1] = self.max_label_num + 2
pad_box[1:len(box)+1] = np.array(box)
return pad_label, pad_box, label_mask
class TransformerWithToken(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward, num_layers):
super().__init__()
self.token = nn.Parameter(torch.randn(1, 1, d_model))
token_mask = torch.zeros(1, 1, dtype=torch.bool)
self.register_buffer('token_mask', token_mask)
self.core = nn.TransformerEncoder(
nn.TransformerEncoderLayer(
d_model=d_model, nhead=nhead,
dim_feedforward=dim_feedforward,
), num_layers=num_layers)
def forward(self, x, src_key_padding_mask):
# x: [N, B, E]
# padding_mask: [B, N]
# `False` for valid values
# `True` for padded values
B = x.size(1)
token = self.token.expand(-1, B, -1)
x = torch.cat([token, x], dim=0)
token_mask = self.token_mask.expand(B, -1)
padding_mask = torch.cat([token_mask, src_key_padding_mask], dim=1)
x = self.core(x, src_key_padding_mask=padding_mask)
return x
class FidNet(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
d_model = cfg.d_model #256
nhead = cfg.n_heads #4
num_layers = cfg.n_layers_encoder #4
max_bbox = cfg.max_ele_num + 2 #20
num_label = cfg.max_label_num + 3 #25
# encoder
self.emb_label = nn.Embedding(num_label, d_model)
self.fc_bbox = nn.Linear(4, d_model)
self.enc_fc_in = nn.Linear(d_model * 2, d_model)
self.enc_transformer = TransformerWithToken(d_model=d_model,
dim_feedforward=d_model // 2,
nhead=nhead, num_layers=num_layers)
self.fc_out_disc = nn.Linear(d_model, 1)
# decoder
self.pos_token = nn.Parameter(torch.rand(max_bbox, 1, d_model))
self.dec_fc_in = nn.Linear(d_model * 2, d_model)
te = nn.TransformerEncoderLayer(d_model=d_model, nhead=nhead,
dim_feedforward=d_model // 2)
self.dec_transformer = nn.TransformerEncoder(te, num_layers=num_layers)
self.fc_out_cls = nn.Linear(d_model, num_label)
self.fc_out_bbox = nn.Linear(d_model, 4)
def extract_features(self, bbox, label, padding_mask):
b = self.fc_bbox(bbox)
l = self.emb_label(label)
x = self.enc_fc_in(torch.cat([b, l], dim=-1))
x = torch.relu(x).permute(1, 0, 2)
x = self.enc_transformer(x, padding_mask)
return x[0]
def forward(self, bbox, label, padding_mask):
B, N, _ = bbox.size()
x = self.extract_features(bbox, label, padding_mask)
logit_disc = self.fc_out_disc(x).squeeze(-1)
x = x.unsqueeze(0).expand(N, -1, -1)
t = self.pos_token[:N].expand(-1, B, -1)
x = torch.cat([x, t], dim=-1)
x = torch.relu(self.dec_fc_in(x))
x = self.dec_transformer(x, src_key_padding_mask=padding_mask)
x = x.permute(1, 0, 2)
# logit_cls: [M, L] bbox_pred: [M, 4]
logit_cls = self.fc_out_cls(x)
bbox_pred = torch.sigmoid(self.fc_out_bbox(x))
return logit_disc, logit_cls, bbox_pred
def parsing_text_to_label_set(text):
text = text.replace('.', '')
text = text.replace(',', '')
label_set = []
label_with_number = []
word_list = text.split(' ')
word_list_len = len(word_list)
num_flag = False
number = 0
i = 0
while i < word_list_len:
if word_list[i].isdigit():
num_flag = True
number = int(word_list[i])
i += 1
continue
if num_flag == True:
# for label with two words
if i < (word_list_len - 1):
words = word_list[i].lower() + ' ' + word_list[i+1].lower()
if words in RICO_LABELS_LOWER:
index = RICO_LABELS_LOWER.index(words) + 1
if index not in label_set:
label_set.append(index)
for j in range(number):
label_with_number.append(index)
num_flag = False
i += 2
continue
if word_list[i].lower() in RICO_LABELS_LOWER:
index = RICO_LABELS_LOWER.index(word_list[i].lower()) + 1
if index not in label_set:
label_set.append(index)
for j in range(number):
label_with_number.append(index)
num_flag = False
i += 1
continue
i += 1
return label_set, label_with_number
def _test_parsing_text_to_label_set():
with open('../data/layout_nl10.txt','r', encoding='utf-8') as f:
nl_list = f.read().rstrip('\n').split('\n')
for item in nl_list:
item = item.split('\t')
fn = item[0]
number = item[2]
text = item[1]
_, label_with_number = parsing_text_to_label_set(text)
if len(label_with_number) != int(number):
print(fn,label_with_number)
input()
import pickle
from copy import deepcopy
def SOA(text, label):
'''
input sample:
text: "A page with 1 Background Image, 1 Text, 1 Text Button, 1 Image, 1 Text Button, 1 Icon and 2 Text Button. "
label: [11, 1, 5, 2, 5, 3, 5, 5]
'''
matched_number = 0
_, label_with_number = parsing_text_to_label_set(text)
nl_labels = deepcopy(label_with_number)
ui_labels = deepcopy(label)
for i in range(len(ui_labels)):
for j in ui_labels:
if j in nl_labels:
ui_labels.remove(j)
nl_labels.remove(j)
matched_number += 1
return matched_number
def _check_nl_layout_semantic_align():
with open('fid_data/RICO_10_filtered.pkl','rb')as f:
layout = pickle.load(f)
with open('../data/dataset/layout_nl_10.txt','r', encoding='utf-8') as f:
text = f.read().rstrip('\n').split('\n')
text_dataset = {}
text_file_list = []
for item in text:
fn = item.split('\t')[0]
text_dataset[fn] = [str(item.split('\t')[1])]
text_file_list.append(fn)
output = []
for item in layout:
if str(item['fn']) in text_file_list:
item['text'] = text_dataset[str(item['fn'])]
output.append(item)
total_layout = len(output)
correct_layout = 0
total_ele = 0
correct_ele = 0
wrong = 0
for l in output:
text = l['text'][0]
label = l['label']
matched_number = SOA(text, label)
# print(label,ori_label,matched_number)
# input()
if matched_number == len(label):
correct_layout += 1
else:
wrong += 1
print(l['fn'], text, label)
input()
total_ele += len(label)
correct_ele += matched_number
SOA_layout = correct_layout / total_layout
SOA_ele = correct_ele / total_ele
return SOA_layout, SOA_ele, wrong
# SOA_layout, SOA_ele, wrong = _check_nl_layout_semantic_align()
# print(SOA_layout, SOA_ele, wrong) | [
"torch.nn.TransformerEncoder",
"torch.nn.Embedding",
"torch.rand",
"pickle.load",
"torch.relu",
"numpy.array",
"numpy.zeros",
"torch.nn.Linear",
"copy.deepcopy",
"torch.nn.TransformerEncoderLayer",
"torch.zeros",
"torch.cat",
"torch.randn"
] | [((8072, 8099), 'copy.deepcopy', 'deepcopy', (['label_with_number'], {}), '(label_with_number)\n', (8080, 8099), False, 'from copy import deepcopy\n'), ((8116, 8131), 'copy.deepcopy', 'deepcopy', (['label'], {}), '(label)\n', (8124, 8131), False, 'from copy import deepcopy\n'), ((1912, 1959), 'numpy.zeros', 'np.zeros', (['[self.max_ele_num + 2]'], {'dtype': '"""int64"""'}), "([self.max_ele_num + 2], dtype='int64')\n", (1920, 1959), True, 'import numpy as np\n'), ((1976, 2028), 'numpy.zeros', 'np.zeros', (['[self.max_ele_num + 2, 4]'], {'dtype': '"""float32"""'}), "([self.max_ele_num + 2, 4], dtype='float32')\n", (1984, 2028), True, 'import numpy as np\n'), ((2048, 2095), 'numpy.zeros', 'np.zeros', (['[self.max_ele_num + 2]'], {'dtype': '"""int64"""'}), "([self.max_ele_num + 2], dtype='int64')\n", (2056, 2095), True, 'import numpy as np\n'), ((2346, 2361), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (2354, 2361), True, 'import numpy as np\n'), ((2452, 2465), 'numpy.array', 'np.array', (['box'], {}), '(box)\n', (2460, 2465), True, 'import numpy as np\n'), ((2734, 2769), 'torch.zeros', 'torch.zeros', (['(1)', '(1)'], {'dtype': 'torch.bool'}), '(1, 1, dtype=torch.bool)\n', (2745, 2769), False, 'import torch\n'), ((3300, 3328), 'torch.cat', 'torch.cat', (['[token, x]'], {'dim': '(0)'}), '([token, x], dim=0)\n', (3309, 3328), False, 'import torch\n'), ((3404, 3456), 'torch.cat', 'torch.cat', (['[token_mask, src_key_padding_mask]'], {'dim': '(1)'}), '([token_mask, src_key_padding_mask], dim=1)\n', (3413, 3456), False, 'import torch\n'), ((3888, 3920), 'torch.nn.Embedding', 'nn.Embedding', (['num_label', 'd_model'], {}), '(num_label, d_model)\n', (3900, 3920), True, 'import torch.nn as nn\n'), ((3944, 3965), 'torch.nn.Linear', 'nn.Linear', (['(4)', 'd_model'], {}), '(4, d_model)\n', (3953, 3965), True, 'import torch.nn as nn\n'), ((3991, 4022), 'torch.nn.Linear', 'nn.Linear', (['(d_model * 2)', 'd_model'], {}), '(d_model * 2, d_model)\n', (4000, 4022), True, 'import torch.nn as nn\n'), ((4291, 4312), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(1)'], {}), '(d_model, 1)\n', (4300, 4312), True, 'import torch.nn as nn\n'), ((4429, 4460), 'torch.nn.Linear', 'nn.Linear', (['(d_model * 2)', 'd_model'], {}), '(d_model * 2, d_model)\n', (4438, 4460), True, 'import torch.nn as nn\n'), ((4475, 4566), 'torch.nn.TransformerEncoderLayer', 'nn.TransformerEncoderLayer', ([], {'d_model': 'd_model', 'nhead': 'nhead', 'dim_feedforward': '(d_model // 2)'}), '(d_model=d_model, nhead=nhead, dim_feedforward=\n d_model // 2)\n', (4501, 4566), True, 'import torch.nn as nn\n'), ((4633, 4681), 'torch.nn.TransformerEncoder', 'nn.TransformerEncoder', (['te'], {'num_layers': 'num_layers'}), '(te, num_layers=num_layers)\n', (4654, 4681), True, 'import torch.nn as nn\n'), ((4709, 4738), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'num_label'], {}), '(d_model, num_label)\n', (4718, 4738), True, 'import torch.nn as nn\n'), ((4766, 4787), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(4)'], {}), '(d_model, 4)\n', (4775, 4787), True, 'import torch.nn as nn\n'), ((5392, 5417), 'torch.cat', 'torch.cat', (['[x, t]'], {'dim': '(-1)'}), '([x, t], dim=-1)\n', (5401, 5417), False, 'import torch\n'), ((8478, 8492), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8489, 8492), False, 'import pickle\n'), ((2685, 2711), 'torch.randn', 'torch.randn', (['(1)', '(1)', 'd_model'], {}), '(1, 1, d_model)\n', (2696, 2711), False, 'import torch\n'), ((2881, 2975), 'torch.nn.TransformerEncoderLayer', 'nn.TransformerEncoderLayer', ([], {'d_model': 'd_model', 'nhead': 'nhead', 'dim_feedforward': 'dim_feedforward'}), '(d_model=d_model, nhead=nhead, dim_feedforward=\n dim_feedforward)\n', (2907, 2975), True, 'import torch.nn as nn\n'), ((4370, 4402), 'torch.rand', 'torch.rand', (['max_bbox', '(1)', 'd_model'], {}), '(max_bbox, 1, d_model)\n', (4380, 4402), False, 'import torch\n'), ((4940, 4965), 'torch.cat', 'torch.cat', (['[b, l]'], {'dim': '(-1)'}), '([b, l], dim=-1)\n', (4949, 4965), False, 'import torch\n'), ((4979, 4992), 'torch.relu', 'torch.relu', (['x'], {}), '(x)\n', (4989, 4992), False, 'import torch\n')] |
"""
Octave
======
Module for working with octaves.
The following is an example on how to use :class:`acoustics.octave.Octave`.
.. literalinclude:: ../examples/octave.py
"""
from __future__ import division
import numpy as np
REFERENCE = 1000.0
"""
Reference frequency.
"""
def band_of_frequency(f, order=1, ref=REFERENCE):
"""
Calculate the band ``n`` from a given center frequency.
:param f: Frequency :math:`f`.
:param order: Band order.
:param ref: Reference center frequency :math:`f_0`.
"""
return np.round( ( np.log2(f/ref) - 1.0/order ) * order)
def frequency_of_band(n, order=1, ref=REFERENCE):
"""
Calculate center frequency of band ``n``.
:param n: band ``n``.
:param order: Order of octave.
:param ref: Reference center frequency.
"""
return ref * 10.0**(3.0/order/10.0) * 2.0**(n/order)
def upper_frequency(center, order=1):
"""
Upper frequency of frequency band given a center frequency and order.
:param centr: Center frequencies.
:param order: Fraction of octave.
.. math:: f_u = f_c \cdot 2^{\\frac{+1}{2N}}
"""
return center * 2.0**(+1.0/(2.0*order))
def lower_frequency(center, order=1):
"""
Lower frequency of frequency band given a center frequency and order.
:param center: Center frequencies.
:param order: Fraction of octave.
.. math:: f_l = f_c \cdot 2^{\\frac{-1}{2N}}
"""
return center * 2.0**(-1.0/(2.0*order))
class Octave(object):
"""
Class to calculate octave center frequencies.
"""
def __init__(self, order=1, interval=None, fmin=None, fmax=None, unique=False, reference=REFERENCE):
self.reference = reference
"""
Reference center frequency :math:`f_{c,0}`.
"""
self.order = order
"""
Fraction of octave.
"""
if (interval is not None) and (fmin is not None or fmax is not None):
raise AttributeError
self._interval = interval
"""Interval"""
self._fmin = fmin
"""Minimum frequency of a range."""
self._fmax = fmax
"""Maximum frequency of a range."""
self.unique = unique
"""Whether or not to calculate the requested values for every value of ``interval``."""
@property
def fmin(self):
"""Minimum frequency of an interval."""
if self._fmin is not None:
return self._fmin
elif self._interval is not None:
return self.interval.min()
@fmin.setter
def fmin(self, x):
if self.interval is not None:
pass # Warning, remove interval first.
else:
self._fmin = x
@property
def fmax(self):
"""Maximum frequency of an interval."""
if self._fmax is not None:
return self._fmax
elif self._interval is not None:
return self.interval.max()
@fmax.setter
def fmax(self, x):
if self.interval is not None:
pass
else:
self._fmax = x
@property
def interval(self):
"""Interval."""
return self._interval
@interval.setter
def interval(self, x):
if self._fmin or self._fmax:
pass
else:
self._interval = x if isinstance(x, np.ndarray) else np.array(x)
def _n(self, f):
"""
Calculate the band ``n`` from a given frequency.
:param f: Frequency
See also :func:`band_of_frequency`.
"""
return band_of_frequency(f, order=self.order, ref=self.reference)
def _fc(self, n):
"""
Calculate center frequency of band ``n``.
:param n: band ``n`.
See also :func:`frequency_of_band`.
"""
return frequency_of_band(n, order=self.order, ref=self.reference)
@property
def n(self):
"""
Return band ``n`` for a given frequency.
"""
if self.interval is not None and self.unique:
return self._n(self.interval)
else:
return np.arange(self._n(self.fmin), self._n(self.fmax)+1)
@property
def center(self):
"""
Return center frequencies :math:`f_c`.
.. math:: f_c = f_{ref} \cdot 2^{n/N} \\cdot 10^{\\frac{3}{10N}}
"""
n = self.n
return self._fc(n)
@property
def bandwidth(self):
"""
Bandwidth of bands.
.. math:: B = f_u - f_l
"""
return self.upper - self.lower
@property
def lower(self):
"""
Lower frequency limits of bands.
.. math:: f_l = f_c \cdot 2^{\\frac{-1}{2N}}
See also :func:`lower_frequency`.
"""
return lower_frequency(self.center, self.order)
@property
def upper(self):
"""
Upper frequency limits of bands.
.. math:: f_u = f_c \cdot 2^{\\frac{+1}{2N}}
See also :func:`upper_frequency`.
"""
return upper_frequency(self.center, self.order)
###def center_frequency_octave(frequencies, order=1):
###"""
###Calculate the center frequencies :math:`f_c` of the octaves that (partially) cover ``frequencies``.
###:param frequencies: An iterable containing frequencies.
###:param order: An integer indicating the octave order. E.g., for 1/3-octaves use ``order=3``
###Center frequencies are calculated using:
###.. math:: f_c = 1000 \cdot 2^{n/N} \\cdot 10^{\\frac{3}{10N}}
###"""
###n = lambda fc, N: np.log2(fc/1000.0) - 1.0/N # To calculate the nth octave from a given frequency.
###n_min = np.floor(n(np.min(frequencies), order))
###n_max = np.ceil(n(np.max(frequencies), order))
###n = np.arange(n_min, n_max+1)
###fc = 1000.0 * 10.0**(3.0/order/10.0) * 2.0**(n)
###return fc
| [
"numpy.array",
"numpy.log2"
] | [((550, 566), 'numpy.log2', 'np.log2', (['(f / ref)'], {}), '(f / ref)\n', (557, 566), True, 'import numpy as np\n'), ((3467, 3478), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (3475, 3478), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 31 15:36:31 2019
@author: gaetandissez
Important note:
We initialize factor matrices once and for all so that each new model uses the same ones as the previous ones.
It makes the results more stable because they depend on the initialization.
"""
import numpy as np
import sklearn.metrics as metrics
from spherecluster import SphericalKMeans
from scipy import sparse
class NMTF1:
#First load and convert to numpy arrays the data
R12 = sparse.load_npz('./tmp/R12.npz').toarray()
eps = 1e-8
n1, n2 = R12.shape
def update(self, A, num, den):
return A*(num / (den + NMTF1.eps))**0.5
vupdate = np.vectorize(update)
def __init__(self, parameters, mask):
self.K = parameters
self.M = mask
self.iter = 0
def initialize(self):
self.R12_train = np.multiply(NMTF1.R12, self.M)
"""spherical k-means"""
skm1 = SphericalKMeans(n_clusters=self.K[0])
skm1.fit(self.R12_train.transpose())
skm2 = SphericalKMeans(n_clusters=self.K[1])
skm2.fit(self.R12_train)
self.G1 = skm1.cluster_centers_.transpose()
self.G2 = skm2.cluster_centers_.transpose()
self.S12 = np.linalg.multi_dot([self.G1.transpose(), self.R12_train, self.G2])
#Save the factor matrices for the mext models
NMTF1.G1 = self.G1
NMTF1.G2 = self.G2
def iterate(self):
Gt2G2 = np.dot(self.G2.transpose(), self.G2)
G2Gt2 = np.dot(self.G2, self.G2.transpose())
R12G2 = np.dot(self.R12_train, self.G2)
R12G2St12 = np.dot(R12G2, self.S12.transpose())
G1G1tR12G2St12 = np.linalg.multi_dot([self.G1, self.G1.transpose(), R12G2St12])
Rt12G1S12 = np.linalg.multi_dot([self.R12_train.transpose(), self.G1, self.S12])
G2Gt2Rt12G1S12 = np.dot(G2Gt2, Rt12G1S12)
Gt1R12G2 = np.dot(self.G1.transpose(),R12G2)
Gt1G1S12Gt2G2 = np.linalg.multi_dot([self.G1.transpose(), self.G1, self.S12, Gt2G2])
self.G1 = NMTF1.vupdate(self, self.G1, R12G2St12, G1G1tR12G2St12)
self.G2 = NMTF1.vupdate(self, self.G2, Rt12G1S12, G2Gt2Rt12G1S12)
self.S12 = NMTF1.vupdate(self, self.S12, Gt1R12G2, Gt1G1S12Gt2G2)
self.iter += 1
def validate(self, metric='aps'):
n, m = NMTF1.R12.shape
R12_found = np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()])
R12_2 = []
R12_found_2 = []
for i in range(n):
for j in range(m):
if self.M[i, j] == 0:
R12_2.append(NMTF1.R12[i, j])
R12_found_2.append(R12_found[i, j])
if metric == 'auroc':
fpr, tpr, threshold = metrics.roc_curve(R12_2, R12_found_2)
return metrics.auc(fpr, tpr)
if metric == 'aps':
return metrics.average_precision_score(R12_2, R12_found_2)
def loss(self):
J = np.linalg.norm(self.R12_train - np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()]), ord='fro')**2
return J
def __repr__(self):
return 'Model NMTF with (k1, k2) = ({}, {})'.format(self.K[0], self.K[1])
class NMTF2:
#First load and convert to numpy arrays the data
R12 = sparse.load_npz('./tmp/R12.npz').toarray()
R23 = sparse.load_npz('./tmp/R23.npz').toarray()
eps = 1e-8
n1, n2 = R12.shape
_, n3 = R23.shape
def update(self, A, num, den):
return A*(num / (den + NMTF2.eps))**0.5
vupdate = np.vectorize(update)
def __init__(self, parameters, mask):
self.K = parameters
self.M = mask
self.iter = 0
def initialize(self):
self.R12_train = np.multiply(NMTF2.R12, self.M)
"""spherical k-means"""
skm3 = SphericalKMeans(n_clusters=self.K[2])
skm3.fit(NMTF2.R23)
#Reload matrices that have already been used before
self.G1 = NMTF1.G1
self.G2 = NMTF1.G2
self.G3 = skm3.cluster_centers_.transpose()
self.S12 = np.linalg.multi_dot([self.G1.transpose(), self.R12_train, self.G2])
self.S23 = np.linalg.multi_dot([self.G2.transpose(), NMTF2.R23, self.G3])
#Save G3 for the next models
NMTF2.G3 = self.G3
def iterate(self):
Gt2G2 = np.dot(self.G2.transpose(), self.G2)
G2Gt2 = np.dot(self.G2, self.G2.transpose())
G3Gt3 = np.dot(self.G3, self.G3.transpose())
Gt3G3 = np.dot(self.G3.transpose(), self.G3)
R12G2 = np.dot(self.R12_train, self.G2)
R23G3 = np.dot(NMTF2.R23, self.G3)
R12G2St12 = np.dot(R12G2, self.S12.transpose())
G1G1tR12G2St12 = np.linalg.multi_dot([self.G1, self.G1.transpose(), R12G2St12])
Rt12G1S12 = np.linalg.multi_dot([self.R12_train.transpose(), self.G1, self.S12])
G2Gt2Rt12G1S12 = np.dot(G2Gt2, Rt12G1S12)
R23G3St23 = np.dot(R23G3, self.S23.transpose())
G2Gt2R23G3St23 = np.dot(G2Gt2, R23G3St23)
Rt23G2S23 = np.linalg.multi_dot([NMTF2.R23.transpose(),self.G2, self.S23])
G3Gt3Rt23G2S23 = np.dot(G3Gt3,Rt23G2S23)
Gt1R12G2 = np.dot(self.G1.transpose(),R12G2)
Gt2R23G3 = np.dot(self.G2.transpose(),R23G3)
Gt1G1S12Gt2G2 = np.linalg.multi_dot([self.G1.transpose(), self.G1, self.S12, Gt2G2])
Gt2G2S23Gt3G3 = np.linalg.multi_dot([Gt2G2, self.S23, Gt3G3])
self.G1 = NMTF2.vupdate(self, self.G1, R12G2St12, G1G1tR12G2St12)
self.G2 = NMTF2.vupdate(self, self.G2, Rt12G1S12 + R23G3St23, G2Gt2Rt12G1S12 + G2Gt2R23G3St23)
self.G3 = NMTF2.vupdate(self, self.G3, Rt23G2S23, G3Gt3Rt23G2S23)
self.S12 = NMTF2.vupdate(self, self.S12, Gt1R12G2, Gt1G1S12Gt2G2)
self.S23 = NMTF2.vupdate(self, self.S23, Gt2R23G3, Gt2G2S23Gt3G3)
self.iter += 1
def validate(self, metric='aps'):
n, m = NMTF2.R12.shape
R12_found = np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()])
R12_2 = []
R12_found_2 = []
for i in range(n):
for j in range(m):
if self.M[i, j] == 0:
R12_2.append(NMTF2.R12[i, j])
R12_found_2.append(R12_found[i, j])
if metric == 'auroc':
fpr, tpr, threshold = metrics.roc_curve(R12_2, R12_found_2)
return metrics.auc(fpr, tpr)
if metric == 'aps':
return metrics.average_precision_score(R12_2, R12_found_2)
def loss(self):
J = np.linalg.norm(self.R12_train - np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF2.R23 - np.linalg.multi_dot([self.G2, self.S23, self.G3.transpose()]), ord='fro')**2
return J
def __repr__(self):
return 'Model NMTF with (k1, k2, k3) = ({}, {}, {})'.format(self.K[0], self.K[1], self.K[2])
class NMTF3:
#First load and convert to numpy arrays the data
R12 = sparse.load_npz('./tmp/R12.npz').toarray()
R23 = sparse.load_npz('./tmp/R23.npz').toarray()
R34 = sparse.load_npz('./tmp/R34.npz').toarray()
eps = 1e-8
n1, n2 = R12.shape
n3, n4 = R34.shape
def update(self, A, num, den):
return A*(num / (den + NMTF3.eps))**0.5
vupdate = np.vectorize(update)
def __init__(self, parameters, mask):
self.K = parameters
self.M = mask
self.iter = 0
def initialize(self):
self.R12_train = np.multiply(NMTF3.R12, self.M)
"""spherical k-means"""
skm4 = SphericalKMeans(n_clusters=self.K[3])
skm4.fit(NMTF3.R34)
self.G4 = skm4.cluster_centers_.transpose()
#Use the same matrices as those precedently computed
self.G1 = NMTF1.G1
self.G2 = NMTF1.G2
self.G3 = NMTF2.G3
self.S12 = np.linalg.multi_dot([self.G1.transpose(), self.R12_train, self.G2])
self.S23 = np.linalg.multi_dot([self.G2.transpose(), NMTF3.R23, self.G3])
self.S34 = np.linalg.multi_dot([self.G3.transpose(), NMTF3.R34, self.G4])
#Save G4 for next models
NMTF3.G4 = self.G4
def iterate(self):
Gt2G2 = np.dot(self.G2.transpose(), self.G2)
G2Gt2 = np.dot(self.G2, self.G2.transpose())
G3Gt3 = np.dot(self.G3, self.G3.transpose())
Gt3G3 = np.dot(self.G3.transpose(), self.G3)
G4Gt4 = np.dot(self.G4, self.G4.transpose())
R12G2 = np.dot(self.R12_train, self.G2)
R23G3 = np.dot(NMTF3.R23, self.G3)
R34G4 = np.dot(NMTF3.R34, self.G4)
R12G2St12 = np.dot(R12G2, self.S12.transpose())
G1G1tR12G2St12 = np.linalg.multi_dot([self.G1, self.G1.transpose(), R12G2St12])
Rt12G1S12 = np.linalg.multi_dot([self.R12_train.transpose(), self.G1, self.S12])
G2Gt2Rt12G1S12 = np.dot(G2Gt2, Rt12G1S12)
R23G3St23 = np.dot(R23G3, self.S23.transpose())
G2Gt2R23G3St23 = np.dot(G2Gt2, R23G3St23)
Rt23G2S23 = np.linalg.multi_dot([NMTF3.R23.transpose(),self.G2, self.S23])
G3Gt3Rt23G2S23 = np.dot(G3Gt3,Rt23G2S23)
R34G4St34 = np.dot(R34G4, self.S34.transpose())
G3Gt3R34G4St34 = np.dot(G3Gt3,R34G4St34)
Rt34G3S34 = np.linalg.multi_dot([NMTF3.R34.transpose(),self.G3, self.S34])
G4Gt4Rt34G3S34 = np.dot(G4Gt4,Rt34G3S34)
Gt1R12G2 = np.dot(self.G1.transpose(),R12G2)
Gt2R23G3 = np.dot(self.G2.transpose(),R23G3)
Gt3R34G4 = np.dot(self.G3.transpose(),R34G4)
Gt1G1S12Gt2G2 = np.linalg.multi_dot([self.G1.transpose(), self.G1, self.S12, Gt2G2])
Gt2G2S23Gt3G3 = np.linalg.multi_dot([Gt2G2, self.S23, Gt3G3])
Gt3G3S34Gt4G4 = np.linalg.multi_dot([Gt3G3, self.S34, self.G4.transpose(), self.G4])
self.G1 = NMTF3.vupdate(self, self.G1, R12G2St12, G1G1tR12G2St12)
self.G2 = NMTF3.vupdate(self, self.G2, Rt12G1S12 + R23G3St23, G2Gt2Rt12G1S12 + G2Gt2R23G3St23)
self.G3 = NMTF3.vupdate(self, self.G3, Rt23G2S23 + R34G4St34, G3Gt3Rt23G2S23 + G3Gt3R34G4St34)
self.G4 = NMTF3.vupdate(self, self.G4, Rt34G3S34, G4Gt4Rt34G3S34)
self.S12 = NMTF3.vupdate(self, self.S12, Gt1R12G2, Gt1G1S12Gt2G2)
self.S23 = NMTF3.vupdate(self, self.S23, Gt2R23G3, Gt2G2S23Gt3G3)
self.S34 = NMTF3.vupdate(self, self.S34, Gt3R34G4, Gt3G3S34Gt4G4)
self.iter += 1
def validate(self, metric='aps'):
n, m = NMTF3.R12.shape
R12_found = np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()])
R12_2 = []
R12_found_2 = []
for i in range(n):
for j in range(m):
if self.M[i, j] == 0:
R12_2.append(NMTF3.R12[i, j])
R12_found_2.append(R12_found[i, j])
if metric == 'auroc':
fpr, tpr, threshold = metrics.roc_curve(R12_2, R12_found_2)
return metrics.auc(fpr, tpr)
if metric == 'aps':
return metrics.average_precision_score(R12_2, R12_found_2)
def loss(self):
J = np.linalg.norm(self.R12_train - np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF3.R23 - np.linalg.multi_dot([self.G2, self.S23, self.G3.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF3.R34 - np.linalg.multi_dot([self.G3, self.S34, self.G4.transpose()]), ord='fro')**2
return J
def __repr__(self):
return 'Model NMTF with (k1, k2, k3, k4) = ({}, {}, {}, {})'.format(self.K[0], self.K[1], self.K[2], self.K[3])
class NMTF4:
#First load and convert to numpy arrays the data
R12 = sparse.load_npz('./tmp/R12.npz').toarray()
R23 = sparse.load_npz('./tmp/R23.npz').toarray()
R34 = sparse.load_npz('./tmp/R34.npz').toarray()
W3 = sparse.load_npz('./tmp/W3.npz').toarray()
W4 = sparse.load_npz('./tmp/W4.npz').toarray()
L3 = sparse.load_npz('./tmp/L3.npz').toarray()
L4 = sparse.load_npz('./tmp/L4.npz').toarray()
D3 = L3 + W3
D4 = L4 + W4
eps = 1e-8
n1, n2 = R12.shape
n3, n4 = R34.shape
def update(self, A, num, den):
return A*(num / (den + NMTF4.eps))**0.5
vupdate = np.vectorize(update)
def __init__(self, parameters, mask):
self.K = parameters
self.M = mask
self.iter = 0
def initialize(self):
self.R12_train = np.multiply(NMTF4.R12, self.M)
"""spherical k-means"""
#Only use the initial factors of the former model
self.G1 = NMTF1.G1
self.G2 = NMTF1.G2
self.G3 = NMTF2.G3
self.G4 = NMTF3.G4
self.S12 = np.linalg.multi_dot([self.G1.transpose(), self.R12_train, self.G2])
self.S23 = np.linalg.multi_dot([self.G2.transpose(), NMTF4.R23, self.G3])
self.S34 = np.linalg.multi_dot([self.G3.transpose(), NMTF4.R34, self.G4])
def iterate(self):
Gt2G2 = np.dot(self.G2.transpose(), self.G2)
G2Gt2 = np.dot(self.G2, self.G2.transpose())
G3Gt3 = np.dot(self.G3, self.G3.transpose())
Gt3G3 = np.dot(self.G3.transpose(), self.G3)
G4Gt4 = np.dot(self.G4, self.G4.transpose())
R12G2 = np.dot(self.R12_train, self.G2)
R23G3 = np.dot(NMTF4.R23, self.G3)
R34G4 = np.dot(NMTF4.R34, self.G4)
W3G3 = np.dot(NMTF4.W3, self.G3)
W4G4 = np.dot(NMTF4.W4, self.G4)
D3G3 = np.dot(NMTF4.D3, self.G3)
D4G4 = np.dot(NMTF4.D4, self.G4)
G3Gt3D3G3 = np.dot(G3Gt3, D3G3)
G4Gt4D4G4 = np.dot(G4Gt4, D4G4)
G3Gt3W3G3 = np.dot(G3Gt3, W3G3)
G4Gt4W4G4 = np.dot(G4Gt4, W4G4)
R12G2St12 = np.dot(R12G2, self.S12.transpose())
G1G1tR12G2St12 = np.linalg.multi_dot([self.G1, self.G1.transpose(), R12G2St12])
Rt12G1S12 = np.linalg.multi_dot([self.R12_train.transpose(), self.G1, self.S12])
G2Gt2Rt12G1S12 = np.dot(G2Gt2, Rt12G1S12)
R23G3St23 = np.dot(R23G3, self.S23.transpose())
G2Gt2R23G3St23 = np.dot(G2Gt2, R23G3St23)
Rt23G2S23 = np.linalg.multi_dot([NMTF4.R23.transpose(),self.G2, self.S23])
G3Gt3Rt23G2S23 = np.dot(G3Gt3,Rt23G2S23)
R34G4St34 = np.dot(R34G4, self.S34.transpose())
G3Gt3R34G4St34 = np.dot(G3Gt3,R34G4St34)
Rt34G3S34 = np.linalg.multi_dot([NMTF4.R34.transpose(),self.G3, self.S34])
G4Gt4Rt34G3S34 = np.dot(G4Gt4,Rt34G3S34)
Gt1R12G2 = np.dot(self.G1.transpose(),R12G2)
Gt2R23G3 = np.dot(self.G2.transpose(),R23G3)
Gt3R34G4 = np.dot(self.G3.transpose(),R34G4)
Gt1G1S12Gt2G2 = np.linalg.multi_dot([self.G1.transpose(), self.G1, self.S12, Gt2G2])
Gt2G2S23Gt3G3 = np.linalg.multi_dot([Gt2G2, self.S23, Gt3G3])
Gt3G3S34Gt4G4 = np.linalg.multi_dot([Gt3G3, self.S34, self.G4.transpose(), self.G4])
self.G1 = NMTF4.vupdate(self, self.G1, R12G2St12, G1G1tR12G2St12)
self.G2 = NMTF4.vupdate(self, self.G2, Rt12G1S12 + R23G3St23, G2Gt2Rt12G1S12 + G2Gt2R23G3St23)
self.G3 = NMTF4.vupdate(self, self.G3, Rt23G2S23 + R34G4St34 + W3G3 + G3Gt3D3G3, G3Gt3Rt23G2S23 + G3Gt3R34G4St34 + G3Gt3W3G3 + D3G3)
self.G4 = NMTF4.vupdate(self, self.G4, Rt34G3S34 + W4G4 + G4Gt4D4G4, G4Gt4Rt34G3S34 + G4Gt4W4G4 + D4G4)
self.S12 = NMTF4.vupdate(self, self.S12, Gt1R12G2, Gt1G1S12Gt2G2)
self.S23 = NMTF4.vupdate(self, self.S23, Gt2R23G3, Gt2G2S23Gt3G3)
self.S34 = NMTF4.vupdate(self, self.S34, Gt3R34G4, Gt3G3S34Gt4G4)
self.iter += 1
def validate(self, metric='aps'):
n, m = NMTF4.R12.shape
R12_found = np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()])
R12_2 = []
R12_found_2 = []
for i in range(n):
for j in range(m):
if self.M[i, j] == 0:
R12_2.append(NMTF4.R12[i, j])
R12_found_2.append(R12_found[i, j])
if metric == 'auroc':
fpr, tpr, threshold = metrics.roc_curve(R12_2, R12_found_2)
return metrics.auc(fpr, tpr)
if metric == 'aps':
return metrics.average_precision_score(R12_2, R12_found_2)
def loss(self):
Gt3L3G3 = np.linalg.multi_dot([self.G3.transpose(), NMTF4.L3, self.G3])
Gt4L4G4 = np.linalg.multi_dot([self.G4.transpose(), NMTF4.L4, self.G4])
J = np.linalg.norm(self.R12_train - np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF4.R23 - np.linalg.multi_dot([self.G2, self.S23, self.G3.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF4.R34 - np.linalg.multi_dot([self.G3, self.S34, self.G4.transpose()]), ord='fro')**2
J += np.trace(Gt3L3G3) + np.trace(Gt4L4G4)
return J
def __repr__(self):
return 'Model NMTF with (k1, k2, k3, k4) = ({}, {}, {}, {})'.format(self.K[0], self.K[1], self.K[2], self.K[3])
class NMTF5:
#First load and convert to numpy arrays the data
R12 = sparse.load_npz('./tmp/R12.npz').toarray()
R23 = sparse.load_npz('./tmp/R23.npz').toarray()
R34 = sparse.load_npz('./tmp/R34.npz').toarray()
R25 = sparse.load_npz('./tmp/R25.npz').toarray()
W3 = sparse.load_npz('./tmp/W3.npz').toarray()
W4 = sparse.load_npz('./tmp/W4.npz').toarray()
L3 = sparse.load_npz('./tmp/L3.npz').toarray()
L4 = sparse.load_npz('./tmp/L4.npz').toarray()
D3 = L3 + W3
D4 = L4 + W4
eps = 1e-8
n1, n2 = R12.shape
n3, n4 = R34.shape
n5 = R25.shape[1]
def update(self, A, num, den):
return A*(num / (den + NMTF5.eps))**0.5
vupdate = np.vectorize(update)
def __init__(self, parameters, mask):
self.K = parameters
self.M = mask
self.iter = 0
def initialize(self):
self.R12_train = np.multiply(NMTF5.R12, self.M)
"""spherical k-means"""
skm5 = SphericalKMeans(n_clusters=self.K[4])
skm5.fit(NMTF5.R25)
self.G1 = NMTF1.G1
self.G2 = NMTF1.G2
self.G3 = NMTF2.G3
self.G4 = NMTF3.G4
self.G5 = skm5.cluster_centers_.transpose()
self.S12 = np.linalg.multi_dot([self.G1.transpose(), self.R12_train, self.G2])
self.S23 = np.linalg.multi_dot([self.G2.transpose(), NMTF5.R23, self.G3])
self.S34 = np.linalg.multi_dot([self.G3.transpose(), NMTF5.R34, self.G4])
self.S25 = np.linalg.multi_dot([self.G2.transpose(), NMTF5.R25, self.G5])
def iterate(self):
Gt2G2 = np.dot(self.G2.transpose(), self.G2)
G2Gt2 = np.dot(self.G2, self.G2.transpose())
G3Gt3 = np.dot(self.G3, self.G3.transpose())
Gt3G3 = np.dot(self.G3.transpose(), self.G3)
G4Gt4 = np.dot(self.G4, self.G4.transpose())
R12G2 = np.dot(self.R12_train, self.G2)
R23G3 = np.dot(NMTF5.R23, self.G3)
R34G4 = np.dot(NMTF5.R34, self.G4)
R25G5 = np.dot(NMTF5.R25, self.G5)
W3G3 = np.dot(NMTF5.W3, self.G3)
W4G4 = np.dot(NMTF5.W4, self.G4)
D3G3 = np.dot(NMTF5.D3, self.G3)
D4G4 = np.dot(NMTF5.D4, self.G4)
G3Gt3D3G3 = np.dot(G3Gt3, D3G3)
G4Gt4D4G4 = np.dot(G4Gt4, D4G4)
G3Gt3W3G3 = np.dot(G3Gt3, W3G3)
G4Gt4W4G4 = np.dot(G4Gt4, W4G4)
R12G2St12 = np.dot(R12G2, self.S12.transpose())
G1G1tR12G2St12 = np.linalg.multi_dot([self.G1, self.G1.transpose(), R12G2St12])
Rt12G1S12 = np.linalg.multi_dot([self.R12_train.transpose(), self.G1, self.S12])
G2Gt2Rt12G1S12 = np.dot(G2Gt2, Rt12G1S12)
R23G3St23 = np.dot(R23G3, self.S23.transpose())
G2Gt2R23G3St23 = np.dot(G2Gt2, R23G3St23)
Rt23G2S23 = np.linalg.multi_dot([NMTF5.R23.transpose(),self.G2, self.S23])
G3Gt3Rt23G2S23 = np.dot(G3Gt3,Rt23G2S23)
R34G4St34 = np.dot(R34G4, self.S34.transpose())
G3Gt3R34G4St34 = np.dot(G3Gt3,R34G4St34)
Rt34G3S34 = np.linalg.multi_dot([NMTF5.R34.transpose(),self.G3, self.S34])
G4Gt4Rt34G3S34 = np.dot(G4Gt4,Rt34G3S34)
Rt25G2S25 = np.linalg.multi_dot([NMTF5.R25.transpose(), self.G2, self.S25])
G5G5tRt25G2S25 = np.linalg.multi_dot([self.G5, self.G5.transpose(), Rt25G2S25])
R25G5St25 = np.dot(R25G5, self.S25.transpose())
G2Gt2R25G5St25 = np.dot(G2Gt2, R25G5St25)
Gt1R12G2 = np.dot(self.G1.transpose(),R12G2)
Gt2R23G3 = np.dot(self.G2.transpose(),R23G3)
Gt3R34G4 = np.dot(self.G3.transpose(),R34G4)
Gt2R25G5 = np.dot(self.G2.transpose(), R25G5)
Gt1G1S12Gt2G2 = np.linalg.multi_dot([self.G1.transpose(), self.G1, self.S12, Gt2G2])
Gt2G2S23Gt3G3 = np.linalg.multi_dot([Gt2G2, self.S23, Gt3G3])
Gt3G3S34Gt4G4 = np.linalg.multi_dot([Gt3G3, self.S34, self.G4.transpose(), self.G4])
Gt2G2S25Gt5G5 = np.linalg.multi_dot([Gt2G2, self.S25, self.G5.transpose(), self.G5])
self.G1 = NMTF5.vupdate(self, self.G1, R12G2St12, G1G1tR12G2St12)
self.G2 = NMTF5.vupdate(self, self.G2, Rt12G1S12 + R23G3St23 + R25G5St25, G2Gt2Rt12G1S12 + G2Gt2R23G3St23 + G2Gt2R25G5St25)
self.G3 = NMTF5.vupdate(self, self.G3, Rt23G2S23 + R34G4St34 + W3G3 + G3Gt3D3G3, G3Gt3Rt23G2S23 + G3Gt3R34G4St34 + G3Gt3W3G3 + D3G3)
self.G4 = NMTF5.vupdate(self, self.G4, Rt34G3S34 + W4G4 + G4Gt4D4G4, G4Gt4Rt34G3S34 + G4Gt4W4G4 + D4G4)
self.G5 = NMTF5.vupdate(self, self.G5, Rt25G2S25, G5G5tRt25G2S25)
self.S12 = NMTF5.vupdate(self, self.S12, Gt1R12G2, Gt1G1S12Gt2G2)
self.S23 = NMTF5.vupdate(self, self.S23, Gt2R23G3, Gt2G2S23Gt3G3)
self.S34 = NMTF5.vupdate(self, self.S34, Gt3R34G4, Gt3G3S34Gt4G4)
self.S25 = NMTF5.vupdate(self, self.S25, Gt2R25G5, Gt2G2S25Gt5G5)
self.iter += 1
def validate(self, metric='aps'):
n, m = NMTF5.R12.shape
R12_found = np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()])
R12_2 = []
R12_found_2 = []
for i in range(n):
for j in range(m):
if self.M[i, j] == 0:
R12_2.append(NMTF5.R12[i, j])
R12_found_2.append(R12_found[i, j])
if metric == 'auroc':
fpr, tpr, threshold = metrics.roc_curve(R12_2, R12_found_2)
return metrics.auc(fpr, tpr)
if metric == 'aps':
return metrics.average_precision_score(R12_2, R12_found_2)
def loss(self):
Gt3L3G3 = np.linalg.multi_dot([self.G3.transpose(), NMTF5.L3, self.G3])
Gt4L4G4 = np.linalg.multi_dot([self.G4.transpose(), NMTF5.L4, self.G4])
J = np.linalg.norm(self.R12_train - np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF5.R23 - np.linalg.multi_dot([self.G2, self.S23, self.G3.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF5.R34 - np.linalg.multi_dot([self.G3, self.S34, self.G4.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF5.R25 - np.linalg.multi_dot([self.G2, self.S25, self.G5.transpose()]), ord='fro')**2
J += np.trace(Gt3L3G3) + np.trace(Gt4L4G4)
return J
def __repr__(self):
return 'Model NMTF with (k1, k2, k3, k4, k5) = ({}, {}, {}, {}, {})'.format(self.K[0], self.K[1], self.K[2], self.K[3], self.K[4])
| [
"numpy.trace",
"numpy.multiply",
"numpy.linalg.multi_dot",
"sklearn.metrics.average_precision_score",
"scipy.sparse.load_npz",
"sklearn.metrics.auc",
"spherecluster.SphericalKMeans",
"numpy.dot",
"sklearn.metrics.roc_curve",
"numpy.vectorize"
] | [((709, 729), 'numpy.vectorize', 'np.vectorize', (['update'], {}), '(update)\n', (721, 729), True, 'import numpy as np\n'), ((3664, 3684), 'numpy.vectorize', 'np.vectorize', (['update'], {}), '(update)\n', (3676, 3684), True, 'import numpy as np\n'), ((7504, 7524), 'numpy.vectorize', 'np.vectorize', (['update'], {}), '(update)\n', (7516, 7524), True, 'import numpy as np\n'), ((12512, 12532), 'numpy.vectorize', 'np.vectorize', (['update'], {}), '(update)\n', (12524, 12532), True, 'import numpy as np\n'), ((18017, 18037), 'numpy.vectorize', 'np.vectorize', (['update'], {}), '(update)\n', (18029, 18037), True, 'import numpy as np\n'), ((915, 945), 'numpy.multiply', 'np.multiply', (['NMTF1.R12', 'self.M'], {}), '(NMTF1.R12, self.M)\n', (926, 945), True, 'import numpy as np\n'), ((1002, 1039), 'spherecluster.SphericalKMeans', 'SphericalKMeans', ([], {'n_clusters': 'self.K[0]'}), '(n_clusters=self.K[0])\n', (1017, 1039), False, 'from spherecluster import SphericalKMeans\n'), ((1100, 1137), 'spherecluster.SphericalKMeans', 'SphericalKMeans', ([], {'n_clusters': 'self.K[1]'}), '(n_clusters=self.K[1])\n', (1115, 1137), False, 'from spherecluster import SphericalKMeans\n'), ((1649, 1680), 'numpy.dot', 'np.dot', (['self.R12_train', 'self.G2'], {}), '(self.R12_train, self.G2)\n', (1655, 1680), True, 'import numpy as np\n'), ((1947, 1971), 'numpy.dot', 'np.dot', (['G2Gt2', 'Rt12G1S12'], {}), '(G2Gt2, Rt12G1S12)\n', (1953, 1971), True, 'import numpy as np\n'), ((3874, 3904), 'numpy.multiply', 'np.multiply', (['NMTF2.R12', 'self.M'], {}), '(NMTF2.R12, self.M)\n', (3885, 3904), True, 'import numpy as np\n'), ((3957, 3994), 'spherecluster.SphericalKMeans', 'SphericalKMeans', ([], {'n_clusters': 'self.K[2]'}), '(n_clusters=self.K[2])\n', (3972, 3994), False, 'from spherecluster import SphericalKMeans\n'), ((4717, 4748), 'numpy.dot', 'np.dot', (['self.R12_train', 'self.G2'], {}), '(self.R12_train, self.G2)\n', (4723, 4748), True, 'import numpy as np\n'), ((4765, 4791), 'numpy.dot', 'np.dot', (['NMTF2.R23', 'self.G3'], {}), '(NMTF2.R23, self.G3)\n', (4771, 4791), True, 'import numpy as np\n'), ((5059, 5083), 'numpy.dot', 'np.dot', (['G2Gt2', 'Rt12G1S12'], {}), '(G2Gt2, Rt12G1S12)\n', (5065, 5083), True, 'import numpy as np\n'), ((5165, 5189), 'numpy.dot', 'np.dot', (['G2Gt2', 'R23G3St23'], {}), '(G2Gt2, R23G3St23)\n', (5171, 5189), True, 'import numpy as np\n'), ((5298, 5322), 'numpy.dot', 'np.dot', (['G3Gt3', 'Rt23G2S23'], {}), '(G3Gt3, Rt23G2S23)\n', (5304, 5322), True, 'import numpy as np\n'), ((5562, 5607), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['[Gt2G2, self.S23, Gt3G3]'], {}), '([Gt2G2, self.S23, Gt3G3])\n', (5581, 5607), True, 'import numpy as np\n'), ((7709, 7739), 'numpy.multiply', 'np.multiply', (['NMTF3.R12', 'self.M'], {}), '(NMTF3.R12, self.M)\n', (7720, 7739), True, 'import numpy as np\n'), ((7796, 7833), 'spherecluster.SphericalKMeans', 'SphericalKMeans', ([], {'n_clusters': 'self.K[3]'}), '(n_clusters=self.K[3])\n', (7811, 7833), False, 'from spherecluster import SphericalKMeans\n'), ((8725, 8756), 'numpy.dot', 'np.dot', (['self.R12_train', 'self.G2'], {}), '(self.R12_train, self.G2)\n', (8731, 8756), True, 'import numpy as np\n'), ((8773, 8799), 'numpy.dot', 'np.dot', (['NMTF3.R23', 'self.G3'], {}), '(NMTF3.R23, self.G3)\n', (8779, 8799), True, 'import numpy as np\n'), ((8816, 8842), 'numpy.dot', 'np.dot', (['NMTF3.R34', 'self.G4'], {}), '(NMTF3.R34, self.G4)\n', (8822, 8842), True, 'import numpy as np\n'), ((9102, 9126), 'numpy.dot', 'np.dot', (['G2Gt2', 'Rt12G1S12'], {}), '(G2Gt2, Rt12G1S12)\n', (9108, 9126), True, 'import numpy as np\n'), ((9208, 9232), 'numpy.dot', 'np.dot', (['G2Gt2', 'R23G3St23'], {}), '(G2Gt2, R23G3St23)\n', (9214, 9232), True, 'import numpy as np\n'), ((9341, 9365), 'numpy.dot', 'np.dot', (['G3Gt3', 'Rt23G2S23'], {}), '(G3Gt3, Rt23G2S23)\n', (9347, 9365), True, 'import numpy as np\n'), ((9446, 9470), 'numpy.dot', 'np.dot', (['G3Gt3', 'R34G4St34'], {}), '(G3Gt3, R34G4St34)\n', (9452, 9470), True, 'import numpy as np\n'), ((9578, 9602), 'numpy.dot', 'np.dot', (['G4Gt4', 'Rt34G3S34'], {}), '(G4Gt4, Rt34G3S34)\n', (9584, 9602), True, 'import numpy as np\n'), ((9887, 9932), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['[Gt2G2, self.S23, Gt3G3]'], {}), '([Gt2G2, self.S23, Gt3G3])\n', (9906, 9932), True, 'import numpy as np\n'), ((12722, 12752), 'numpy.multiply', 'np.multiply', (['NMTF4.R12', 'self.M'], {}), '(NMTF4.R12, self.M)\n', (12733, 12752), True, 'import numpy as np\n'), ((13526, 13557), 'numpy.dot', 'np.dot', (['self.R12_train', 'self.G2'], {}), '(self.R12_train, self.G2)\n', (13532, 13557), True, 'import numpy as np\n'), ((13574, 13600), 'numpy.dot', 'np.dot', (['NMTF4.R23', 'self.G3'], {}), '(NMTF4.R23, self.G3)\n', (13580, 13600), True, 'import numpy as np\n'), ((13617, 13643), 'numpy.dot', 'np.dot', (['NMTF4.R34', 'self.G4'], {}), '(NMTF4.R34, self.G4)\n', (13623, 13643), True, 'import numpy as np\n'), ((13661, 13686), 'numpy.dot', 'np.dot', (['NMTF4.W3', 'self.G3'], {}), '(NMTF4.W3, self.G3)\n', (13667, 13686), True, 'import numpy as np\n'), ((13702, 13727), 'numpy.dot', 'np.dot', (['NMTF4.W4', 'self.G4'], {}), '(NMTF4.W4, self.G4)\n', (13708, 13727), True, 'import numpy as np\n'), ((13743, 13768), 'numpy.dot', 'np.dot', (['NMTF4.D3', 'self.G3'], {}), '(NMTF4.D3, self.G3)\n', (13749, 13768), True, 'import numpy as np\n'), ((13784, 13809), 'numpy.dot', 'np.dot', (['NMTF4.D4', 'self.G4'], {}), '(NMTF4.D4, self.G4)\n', (13790, 13809), True, 'import numpy as np\n'), ((13830, 13849), 'numpy.dot', 'np.dot', (['G3Gt3', 'D3G3'], {}), '(G3Gt3, D3G3)\n', (13836, 13849), True, 'import numpy as np\n'), ((13870, 13889), 'numpy.dot', 'np.dot', (['G4Gt4', 'D4G4'], {}), '(G4Gt4, D4G4)\n', (13876, 13889), True, 'import numpy as np\n'), ((13910, 13929), 'numpy.dot', 'np.dot', (['G3Gt3', 'W3G3'], {}), '(G3Gt3, W3G3)\n', (13916, 13929), True, 'import numpy as np\n'), ((13950, 13969), 'numpy.dot', 'np.dot', (['G4Gt4', 'W4G4'], {}), '(G4Gt4, W4G4)\n', (13956, 13969), True, 'import numpy as np\n'), ((14237, 14261), 'numpy.dot', 'np.dot', (['G2Gt2', 'Rt12G1S12'], {}), '(G2Gt2, Rt12G1S12)\n', (14243, 14261), True, 'import numpy as np\n'), ((14343, 14367), 'numpy.dot', 'np.dot', (['G2Gt2', 'R23G3St23'], {}), '(G2Gt2, R23G3St23)\n', (14349, 14367), True, 'import numpy as np\n'), ((14476, 14500), 'numpy.dot', 'np.dot', (['G3Gt3', 'Rt23G2S23'], {}), '(G3Gt3, Rt23G2S23)\n', (14482, 14500), True, 'import numpy as np\n'), ((14581, 14605), 'numpy.dot', 'np.dot', (['G3Gt3', 'R34G4St34'], {}), '(G3Gt3, R34G4St34)\n', (14587, 14605), True, 'import numpy as np\n'), ((14713, 14737), 'numpy.dot', 'np.dot', (['G4Gt4', 'Rt34G3S34'], {}), '(G4Gt4, Rt34G3S34)\n', (14719, 14737), True, 'import numpy as np\n'), ((15022, 15067), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['[Gt2G2, self.S23, Gt3G3]'], {}), '([Gt2G2, self.S23, Gt3G3])\n', (15041, 15067), True, 'import numpy as np\n'), ((18227, 18257), 'numpy.multiply', 'np.multiply', (['NMTF5.R12', 'self.M'], {}), '(NMTF5.R12, self.M)\n', (18238, 18257), True, 'import numpy as np\n'), ((18323, 18360), 'spherecluster.SphericalKMeans', 'SphericalKMeans', ([], {'n_clusters': 'self.K[4]'}), '(n_clusters=self.K[4])\n', (18338, 18360), False, 'from spherecluster import SphericalKMeans\n'), ((19234, 19265), 'numpy.dot', 'np.dot', (['self.R12_train', 'self.G2'], {}), '(self.R12_train, self.G2)\n', (19240, 19265), True, 'import numpy as np\n'), ((19282, 19308), 'numpy.dot', 'np.dot', (['NMTF5.R23', 'self.G3'], {}), '(NMTF5.R23, self.G3)\n', (19288, 19308), True, 'import numpy as np\n'), ((19325, 19351), 'numpy.dot', 'np.dot', (['NMTF5.R34', 'self.G4'], {}), '(NMTF5.R34, self.G4)\n', (19331, 19351), True, 'import numpy as np\n'), ((19368, 19394), 'numpy.dot', 'np.dot', (['NMTF5.R25', 'self.G5'], {}), '(NMTF5.R25, self.G5)\n', (19374, 19394), True, 'import numpy as np\n'), ((19419, 19444), 'numpy.dot', 'np.dot', (['NMTF5.W3', 'self.G3'], {}), '(NMTF5.W3, self.G3)\n', (19425, 19444), True, 'import numpy as np\n'), ((19460, 19485), 'numpy.dot', 'np.dot', (['NMTF5.W4', 'self.G4'], {}), '(NMTF5.W4, self.G4)\n', (19466, 19485), True, 'import numpy as np\n'), ((19501, 19526), 'numpy.dot', 'np.dot', (['NMTF5.D3', 'self.G3'], {}), '(NMTF5.D3, self.G3)\n', (19507, 19526), True, 'import numpy as np\n'), ((19542, 19567), 'numpy.dot', 'np.dot', (['NMTF5.D4', 'self.G4'], {}), '(NMTF5.D4, self.G4)\n', (19548, 19567), True, 'import numpy as np\n'), ((19588, 19607), 'numpy.dot', 'np.dot', (['G3Gt3', 'D3G3'], {}), '(G3Gt3, D3G3)\n', (19594, 19607), True, 'import numpy as np\n'), ((19628, 19647), 'numpy.dot', 'np.dot', (['G4Gt4', 'D4G4'], {}), '(G4Gt4, D4G4)\n', (19634, 19647), True, 'import numpy as np\n'), ((19668, 19687), 'numpy.dot', 'np.dot', (['G3Gt3', 'W3G3'], {}), '(G3Gt3, W3G3)\n', (19674, 19687), True, 'import numpy as np\n'), ((19708, 19727), 'numpy.dot', 'np.dot', (['G4Gt4', 'W4G4'], {}), '(G4Gt4, W4G4)\n', (19714, 19727), True, 'import numpy as np\n'), ((19995, 20019), 'numpy.dot', 'np.dot', (['G2Gt2', 'Rt12G1S12'], {}), '(G2Gt2, Rt12G1S12)\n', (20001, 20019), True, 'import numpy as np\n'), ((20101, 20125), 'numpy.dot', 'np.dot', (['G2Gt2', 'R23G3St23'], {}), '(G2Gt2, R23G3St23)\n', (20107, 20125), True, 'import numpy as np\n'), ((20234, 20258), 'numpy.dot', 'np.dot', (['G3Gt3', 'Rt23G2S23'], {}), '(G3Gt3, Rt23G2S23)\n', (20240, 20258), True, 'import numpy as np\n'), ((20339, 20363), 'numpy.dot', 'np.dot', (['G3Gt3', 'R34G4St34'], {}), '(G3Gt3, R34G4St34)\n', (20345, 20363), True, 'import numpy as np\n'), ((20471, 20495), 'numpy.dot', 'np.dot', (['G4Gt4', 'Rt34G3S34'], {}), '(G4Gt4, Rt34G3S34)\n', (20477, 20495), True, 'import numpy as np\n'), ((20748, 20772), 'numpy.dot', 'np.dot', (['G2Gt2', 'R25G5St25'], {}), '(G2Gt2, R25G5St25)\n', (20754, 20772), True, 'import numpy as np\n'), ((21112, 21157), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['[Gt2G2, self.S23, Gt3G3]'], {}), '([Gt2G2, self.S23, Gt3G3])\n', (21131, 21157), True, 'import numpy as np\n'), ((525, 557), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""./tmp/R12.npz"""'], {}), "('./tmp/R12.npz')\n", (540, 557), False, 'from scipy import sparse\n'), ((2857, 2894), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['R12_2', 'R12_found_2'], {}), '(R12_2, R12_found_2)\n', (2874, 2894), True, 'import sklearn.metrics as metrics\n'), ((2914, 2935), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (2925, 2935), True, 'import sklearn.metrics as metrics\n'), ((2983, 3034), 'sklearn.metrics.average_precision_score', 'metrics.average_precision_score', (['R12_2', 'R12_found_2'], {}), '(R12_2, R12_found_2)\n', (3014, 3034), True, 'import sklearn.metrics as metrics\n'), ((3391, 3423), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""./tmp/R12.npz"""'], {}), "('./tmp/R12.npz')\n", (3406, 3423), False, 'from scipy import sparse\n'), ((3444, 3476), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""./tmp/R23.npz"""'], {}), "('./tmp/R23.npz')\n", (3459, 3476), False, 'from scipy import sparse\n'), ((6516, 6553), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['R12_2', 'R12_found_2'], {}), '(R12_2, R12_found_2)\n', (6533, 6553), True, 'import sklearn.metrics as metrics\n'), ((6573, 6594), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (6584, 6594), True, 'import sklearn.metrics as metrics\n'), ((6642, 6693), 'sklearn.metrics.average_precision_score', 'metrics.average_precision_score', (['R12_2', 'R12_found_2'], {}), '(R12_2, R12_found_2)\n', (6673, 6693), True, 'import sklearn.metrics as metrics\n'), ((7186, 7218), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""./tmp/R12.npz"""'], {}), "('./tmp/R12.npz')\n", (7201, 7218), False, 'from scipy import sparse\n'), ((7239, 7271), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""./tmp/R23.npz"""'], {}), "('./tmp/R23.npz')\n", (7254, 7271), False, 'from scipy import sparse\n'), ((7292, 7324), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""./tmp/R34.npz"""'], {}), "('./tmp/R34.npz')\n", (7307, 7324), False, 'from scipy import sparse\n'), ((11137, 11174), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['R12_2', 'R12_found_2'], {}), '(R12_2, R12_found_2)\n', (11154, 11174), True, 'import sklearn.metrics as metrics\n'), ((11194, 11215), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (11205, 11215), True, 'import sklearn.metrics as metrics\n'), ((11263, 11314), 'sklearn.metrics.average_precision_score', 'metrics.average_precision_score', (['R12_2', 'R12_found_2'], {}), '(R12_2, R12_found_2)\n', (11294, 11314), True, 'import sklearn.metrics as metrics\n'), ((11943, 11975), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""./tmp/R12.npz"""'], {}), "('./tmp/R12.npz')\n", (11958, 11975), False, 'from scipy import sparse\n'), ((11996, 12028), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""./tmp/R23.npz"""'], {}), "('./tmp/R23.npz')\n", (12011, 12028), False, 'from scipy import sparse\n'), ((12049, 12081), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""./tmp/R34.npz"""'], {}), "('./tmp/R34.npz')\n", (12064, 12081), False, 'from scipy import sparse\n'), ((12105, 12136), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""./tmp/W3.npz"""'], {}), "('./tmp/W3.npz')\n", (12120, 12136), False, 'from scipy import sparse\n'), ((12156, 12187), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""./tmp/W4.npz"""'], {}), "('./tmp/W4.npz')\n", (12171, 12187), False, 'from scipy import sparse\n'), ((12207, 12238), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""./tmp/L3.npz"""'], {}), "('./tmp/L3.npz')\n", (12222, 12238), False, 'from scipy import sparse\n'), ((12258, 12289), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""./tmp/L4.npz"""'], {}), "('./tmp/L4.npz')\n", (12273, 12289), False, 'from scipy import sparse\n'), ((16348, 16385), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['R12_2', 'R12_found_2'], {}), '(R12_2, R12_found_2)\n', (16365, 16385), True, 'import sklearn.metrics as metrics\n'), ((16405, 16426), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (16416, 16426), True, 'import sklearn.metrics as metrics\n'), ((16474, 16525), 'sklearn.metrics.average_precision_score', 'metrics.average_precision_score', (['R12_2', 'R12_found_2'], {}), '(R12_2, R12_found_2)\n', (16505, 16525), True, 'import sklearn.metrics as metrics\n'), ((17092, 17109), 'numpy.trace', 'np.trace', (['Gt3L3G3'], {}), '(Gt3L3G3)\n', (17100, 17109), True, 'import numpy as np\n'), ((17112, 17129), 'numpy.trace', 'np.trace', (['Gt4L4G4'], {}), '(Gt4L4G4)\n', (17120, 17129), True, 'import numpy as np\n'), ((17375, 17407), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""./tmp/R12.npz"""'], {}), "('./tmp/R12.npz')\n", (17390, 17407), False, 'from scipy import sparse\n'), ((17428, 17460), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""./tmp/R23.npz"""'], {}), "('./tmp/R23.npz')\n", (17443, 17460), False, 'from scipy import sparse\n'), ((17481, 17513), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""./tmp/R34.npz"""'], {}), "('./tmp/R34.npz')\n", (17496, 17513), False, 'from scipy import sparse\n'), ((17534, 17566), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""./tmp/R25.npz"""'], {}), "('./tmp/R25.npz')\n", (17549, 17566), False, 'from scipy import sparse\n'), ((17586, 17617), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""./tmp/W3.npz"""'], {}), "('./tmp/W3.npz')\n", (17601, 17617), False, 'from scipy import sparse\n'), ((17637, 17668), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""./tmp/W4.npz"""'], {}), "('./tmp/W4.npz')\n", (17652, 17668), False, 'from scipy import sparse\n'), ((17688, 17719), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""./tmp/L3.npz"""'], {}), "('./tmp/L3.npz')\n", (17703, 17719), False, 'from scipy import sparse\n'), ((17739, 17770), 'scipy.sparse.load_npz', 'sparse.load_npz', (['"""./tmp/L4.npz"""'], {}), "('./tmp/L4.npz')\n", (17754, 17770), False, 'from scipy import sparse\n'), ((22709, 22746), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['R12_2', 'R12_found_2'], {}), '(R12_2, R12_found_2)\n', (22726, 22746), True, 'import sklearn.metrics as metrics\n'), ((22766, 22787), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (22777, 22787), True, 'import sklearn.metrics as metrics\n'), ((22835, 22886), 'sklearn.metrics.average_precision_score', 'metrics.average_precision_score', (['R12_2', 'R12_found_2'], {}), '(R12_2, R12_found_2)\n', (22866, 22886), True, 'import sklearn.metrics as metrics\n'), ((23579, 23596), 'numpy.trace', 'np.trace', (['Gt3L3G3'], {}), '(Gt3L3G3)\n', (23587, 23596), True, 'import numpy as np\n'), ((23599, 23616), 'numpy.trace', 'np.trace', (['Gt4L4G4'], {}), '(Gt4L4G4)\n', (23607, 23616), True, 'import numpy as np\n')] |
"""edited from https://github.com/mightydeveloper/Deep-Compression-PyTorch"""
import torch
import numpy as np
from sklearn.cluster import KMeans, MiniBatchKMeans, AffinityPropagation, DBSCAN
# from sklearn.cluster import OPTICS
from scipy.sparse import csc_matrix, csr_matrix
def apply_weight_sharing(model, bits=10, c_name ='kmeans'):
"""
Applies weight sharing to the given model
"""
for name, param in model.named_parameters():
if 'weight' in name.lower() and 'layernorm' not in name.lower() \
and 'word_emb' not in name.lower()\
and 'att' not in name.lower()\
and 'corenet.3' in name.lower()\
and len(param.data.size()) != 1:
dev = param.device
weight = param.data.cpu().numpy()
shape = weight.shape
mat = csr_matrix(weight) if shape[0] < shape[1] else csc_matrix(weight)
min_ = min(mat.data)
max_ = max(mat.data)
space = np.linspace(min_, max_, num=2**bits)
print(name)
if c_name == 'kmeans':
if len(mat.data) < 10e5:
kmeans = KMeans(n_clusters=len(space), init=space.reshape(-1, 1), n_init=100,
precompute_distances=True, algorithm="full")
else:
kmeans = MiniBatchKMeans(n_clusters=len(space), init=space.reshape(-1, 1), n_init=100)
kmeans.fit(mat.data.reshape(-1, 1))
new_weight = kmeans.cluster_centers_[kmeans.labels_].reshape(-1)
new_weight = new_weight.reshape(param.size())
elif c_name == 'digitize':
new_weight = space[np.digitize(mat.data.reshape(-1, 1), bins=space, right=True)]\
.reshape(param.size()).astype('float32')
# print(new_weight[:100])
elif c_name == 'dbscan':
dbscan = DBSCAN(n_clusters=len(space), init=space.reshape(-1, 1), n_init=1)
dbscan
elif 'affinity' in c_name:
ap = AffinityPropagation(n_clusters=len(space), init=space.reshape(-1, 1), n_init=1)
ap.fit(mat.data.reshape(-1, 1))
new_weight = ap.cluster_centers_[kmeans.labels_].reshape(-1)
new_weight = new_weight.reshape(param.size())
else:
new_weight = param.data.cpu().numpy()
param.data = torch.from_numpy(new_weight).to(dev)
return model
| [
"scipy.sparse.csr_matrix",
"numpy.linspace",
"scipy.sparse.csc_matrix",
"torch.from_numpy"
] | [((1000, 1038), 'numpy.linspace', 'np.linspace', (['min_', 'max_'], {'num': '(2 ** bits)'}), '(min_, max_, num=2 ** bits)\n', (1011, 1038), True, 'import numpy as np\n'), ((848, 866), 'scipy.sparse.csr_matrix', 'csr_matrix', (['weight'], {}), '(weight)\n', (858, 866), False, 'from scipy.sparse import csc_matrix, csr_matrix\n'), ((895, 913), 'scipy.sparse.csc_matrix', 'csc_matrix', (['weight'], {}), '(weight)\n', (905, 913), False, 'from scipy.sparse import csc_matrix, csr_matrix\n'), ((2450, 2478), 'torch.from_numpy', 'torch.from_numpy', (['new_weight'], {}), '(new_weight)\n', (2466, 2478), False, 'import torch\n')] |
# Copyright (C) 2019 <NAME>, <NAME>, <NAME>, <NAME>
# All rights reserved.
# This code is licensed under BSD 3-Clause License.
import sys
import os
import numpy as np
if __name__ == '__main__':
xyz_list_path = sys.argv[1]
xyzs = [xyz for xyz in os.listdir(xyz_list_path) if xyz.endswith('_predict_3.xyz')]
v = np.full([2466, 1], 'v')
for xyz in xyzs:
print(xyz)
obj_path = xyz.replace('.xyz', '.obj')
xyzf = np.loadtxt(os.path.join(xyz_list_path, xyz))
face = np.loadtxt('/home/wc/workspace/P2MPP/data/face3.obj', dtype='|S32')
out = np.vstack((np.hstack((v, xyzf)), face))
np.savetxt(os.path.join(xyz_list_path, obj_path), out, fmt='%s', delimiter=' ')
| [
"os.listdir",
"numpy.hstack",
"os.path.join",
"numpy.full",
"numpy.loadtxt"
] | [((325, 348), 'numpy.full', 'np.full', (['[2466, 1]', '"""v"""'], {}), "([2466, 1], 'v')\n", (332, 348), True, 'import numpy as np\n'), ((511, 578), 'numpy.loadtxt', 'np.loadtxt', (['"""/home/wc/workspace/P2MPP/data/face3.obj"""'], {'dtype': '"""|S32"""'}), "('/home/wc/workspace/P2MPP/data/face3.obj', dtype='|S32')\n", (521, 578), True, 'import numpy as np\n'), ((256, 281), 'os.listdir', 'os.listdir', (['xyz_list_path'], {}), '(xyz_list_path)\n', (266, 281), False, 'import os\n'), ((462, 494), 'os.path.join', 'os.path.join', (['xyz_list_path', 'xyz'], {}), '(xyz_list_path, xyz)\n', (474, 494), False, 'import os\n'), ((652, 689), 'os.path.join', 'os.path.join', (['xyz_list_path', 'obj_path'], {}), '(xyz_list_path, obj_path)\n', (664, 689), False, 'import os\n'), ((604, 624), 'numpy.hstack', 'np.hstack', (['(v, xyzf)'], {}), '((v, xyzf))\n', (613, 624), True, 'import numpy as np\n')] |
"""
spectral.py
Frequency domain analysis of neural signals: creating PSD, fitting 1/f, spectral histograms
"""
import numpy as np
from scipy import signal
import matplotlib.pylab as plt
from sklearn import linear_model
def psd(x, Fs, method='mean', window='hann', nperseg=None, noverlap=None, filtlen=1.):
"""
Estimating the power spectral density (PSD) of a time series from short-time Fourier
Transform (mean, median), or the entire signal's FFT smoothed (medfilt)
Parameters
-----------
x : array_like 1d
Time series of measurement values
Fs : float, Hz
Sampling frequency of the x time series.
method : { 'mean', 'median', 'medfilt'}, optional
Methods to calculate the PSD. 'mean' is the same as Welch's method (mean of STFT), 'median' uses median of STFT instead of mean to minimize outlier effect. 'medfilt' filters the entire signals raw FFT with a median filter to smooth. Defaults to 'mean'.
The next 3 parameters are only relevant for method = {'mean', 'median'}
window : str or tuple or array_like, optional
Desired window to use. See scipy.signal.get_window for a list of windows and required parameters. If window is array_like it will be used directly as the window and its length must be nperseg. Defaults to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or tuple, is set to 1 second of data, and if window is array_like, is set to the length of the window.
noverlap : int, optional
Number of points to overlap between segments. If None, noverlap = nperseg // 2. Defaults to None.
filten : float, Hz, optional
(For medfilt method) Length of median filter in Hz.
Returns
-------
freq : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density of x.
References
----------
Mostly relies on scipy.signal.spectrogram and numpy.fft
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.spectrogram.html
"""
if method in ('mean', 'median'):
# welch-style spectrum (mean/median of STFT)
if nperseg is None:
if type(window) in (str, tuple):
# window is a string or tuple, defaults to 1 second of data
nperseg = int(Fs)
else:
# window is an array, defaults to window length
nperseg = len(window)
else:
nperseg = int(nperseg)
if noverlap is not None:
noverlap = int(noverlap)
# call signal.spectrogram function in scipy to compute STFT
freq, t, spg = signal.spectrogram(x, Fs, window, nperseg, noverlap)
if method is 'mean':
Pxx = np.mean(spg, axis=-1)
elif method is 'median':
Pxx = np.median(spg, axis=-1)
elif method is 'medfilt':
# median filtered FFT spectrum
# take the positive half of the spectrum since it's symmetrical
FT = np.fft.fft(x)[:int(np.ceil(len(x) / 2.))]
freq = np.fft.fftfreq(len(x), 1. / Fs)[:int(np.ceil(len(x) / 2.))] # get freq axis
# convert median filter length from Hz to samples
filtlen_samp = int(int(filtlen / (freq[1] - freq[0])) / 2 * 2 + 1)
Pxx = signal.medfilt(np.abs(FT)**2. / (Fs * len(x)), filtlen_samp)
else:
raise ValueError('Unknown PSD method: %s' % method)
return freq, Pxx
def scv(x, Fs, window='hann', nperseg=None, noverlap=0, outlierpct=None):
"""
Compute the spectral coefficient of variation (SCV) at each frequency.
White noise should have a SCV of 1 at all frequencies.
Parameters
-----------
x : array_like 1d
Time series of measurement values
Fs : float, Hz
Sampling frequency of the x time series.
window : str or tuple or array_like, optional
Desired window to use. See scipy.signal.get_window for a list of windows and required parameters. If window is array_like it will be used directly as the window and its length must be nperseg. Defaults to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or tuple, is set to 1 second of data, and if window is array_like, is set to the length of the window.
noverlap : int, optional
Number of points to overlap between segments. Defaults to 0 for independence.
outlierpct : float, percent, optional
Discarding a percentage of the windows with the lowest and highest total log power.
Returns
-------
freq : ndarray
Array of sample frequencies.
SCV : ndarray
Spectral coefficient of variation.
"""
if nperseg is None:
if type(window) in (str, tuple):
# window is a string or tuple, defaults to 1 second of data
nperseg = int(Fs)
else:
# window is an array, defaults to window length
nperseg = len(window)
else:
nperseg = int(nperseg)
if noverlap is not None:
noverlap = int(noverlap)
freq, t, spg = signal.spectrogram(x, Fs, window, nperseg, noverlap)
if outlierpct is not None:
# discard time windows with very low and very high powers
discard = int(spg.shape[1] / 100. * outlierpct)
outlieridx = np.argsort(np.mean(np.log10(spg), axis=0))[discard:-discard]
spg = spg[:, outlieridx]
spectcv = np.std(spg, axis=-1) / np.mean(spg, axis=-1)
return freq, spectcv
def scv_rs(x, Fs, window='hann', nperseg=None, noverlap=0, method='bootstrap', rs_params=None):
"""
Resampled version of scv: instead of a single estimate of mean and standard deviation, the spectrogram is resampled, either randomly (bootstrap) or time-stepped (rolling).
Parameters
-----------
x : array_like 1d
Time series of measurement values
Fs : float, Hz
Sampling frequency of the x time series.
window : str or tuple or array_like, optional
Desired window to use. See scipy.signal.get_window for a list of windows and required parameters. If window is array_like it will be used directly as the window and its length must be nperseg. Defaults to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or tuple, is set to 1 second of data, and if window is array_like, is set to the length of the window.
noverlap : int, optional
Number of points to overlap between segments. Defaults to 0 for independence.
method : {'bootstrap', 'rolling'}, optional
Method of resampling: bootstrap randomly samples a subset of the spectrogram repeatedly, while rolling takes the rolling window scv.
Defaults to 'bootstrap'.
rs_params : tuple, (int, int), optional
Parameters for resampling algorithm.
If 'bootstrap', rs_params = (nslices, ndraws), representing number of slices per draw, and number of random draws, defaults to (10% of slices, 100 draws).
If 'rolling', rs_params = (nslices, nsteps), representing number of slices per draw, and number of slices to step forward, defaults to (10, 5).
Returns
-------
freq : ndarray
Array of sample frequencies.
T : ndarray
Array of time indices, for 'rolling' resampling. If 'bootstrap', T = None.
spectcv_rs : ndarray
Resampled spectral coefficient of variation.
"""
if nperseg is None:
if type(window) in (str, tuple):
# window is a string or tuple, defaults to 1 second of data
nperseg = int(Fs)
else:
# window is an array, defaults to window length
nperseg = len(window)
else:
nperseg = int(nperseg)
if noverlap is not None:
noverlap = int(noverlap)
# compute spectrogram
freq, t, spg = signal.spectrogram(x, Fs, window, nperseg, noverlap)
if method is 'bootstrap':
# params are number of slices of STFT to compute SCV over, and number of draws
if rs_params is None:
# defaults to draw 1/10 of STFT slices, 100 draws
rs_params = (int(spg.shape[1] / 10.), 100)
nslices, ndraws = rs_params
spectcv_rs = np.zeros((len(freq), ndraws))
for draw in range(ndraws):
# repeated subsampling of spectrogram randomly, with replacement between draws
idx = np.random.choice(spg.shape[1], size=nslices, replace=False)
spectcv_rs[:, draw] = np.std(spg[:, idx], axis=-1) / np.mean(spg[:, idx], axis=-1)
T = None # no time component, return nothing
elif method is 'rolling':
# params are number of slices of STFT to compute SCV over, and number of slices to roll forward
if rs_params is None:
# defaults to 10 STFT slices, move forward by 5 slices
rs_params = (10, 5)
nslices, nsteps = rs_params
outlen = int(np.ceil((spg.shape[1] - nslices) / float(nsteps))) + 1
spectcv_rs = np.zeros((len(freq), outlen))
for ind in range(outlen):
curblock = spg[:, nsteps * ind:nslices + nsteps * ind]
spectcv_rs[:, ind] = np.std(curblock, axis=-1) / np.mean(curblock, axis=-1)
T = t[0::nsteps] # grab the time indices from the spectrogram
else:
raise ValueError('Unknown resampling method: %s' % method)
return freq, T, spectcv_rs
def spectral_hist(x, Fs, window='hann', nperseg=None, noverlap=None, nbins=50, flim=(0., 100.), cutpct=(0., 100.)):
"""
Compute the distribution of log10 power at each frequency from the signal spectrogram.
The histogram bins are the same for every frequency, thus evenly spacing the global min and max power
Parameters
-----------
x : array_like 1d
Time series of measurement values
Fs : float, Hz
Sampling frequency of the x time series.
window : str or tuple or array_like, optional
Desired window to use. See scipy.signal.get_window for a list of windows and required parameters. If window is array_like it will be used directly as the window and its length must be nperseg. Defaults to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or tuple, is set to 1 second of data, and if window is array_like, is set to the length of the window.
noverlap : int, optional
Number of points to overlap between segments. If None, noverlap = nperseg // 2. Defaults to None.
nbins : int, optional
Number of histogram bins to use, defaults to 50
flim : tuple, (start, end) in Hz, optional
Frequency range of the spectrogram across which to compute the histograms. Default to (0., 100.)
cutpct : tuple, (low, high), in percentage, optional
Power percentile at which to draw the lower and upper bin limits. Default to (0., 100.)
Returns
-------
freq : ndarray
Array of frequencies.
power_bins : ndarray
Histogram bins used to compute the distribution.
spect_hist : ndarray (2D)
Power distribution at every frequency, nbins x freqs 2D matrix
"""
if nperseg is None:
if type(window) in (str, tuple):
# window is a string or tuple, defaults to 1 second of data
nperseg = int(Fs)
else:
# window is an array, defaults to window length
nperseg = len(window)
else:
nperseg = int(nperseg)
if noverlap is not None:
noverlap = int(noverlap)
# compute spectrogram of data
freq, t, spg = signal.spectrogram(x, Fs, window, nperseg, noverlap, return_onesided=True)
# get log10 power before binning
ps = np.transpose(np.log10(spg))
# Limit spectrogram to freq range of interest
ps = ps[:, np.logical_and(freq >= flim[0], freq < flim[1])]
freq = freq[np.logical_and(freq >= flim[0], freq < flim[1])]
# Prepare bins for power. Min and max of bins determined by power cutoff percentage
power_min, power_max = np.percentile(np.ndarray.flatten(ps), cutpct)
power_bins = np.linspace(power_min, power_max, nbins + 1)
# Compute histogram of power for each frequency
spect_hist = np.zeros((len(ps[0]), nbins))
for i in range(len(ps[0])):
spect_hist[i], _ = np.histogram(ps[:, i], power_bins)
spect_hist[i] = spect_hist[i] / sum(spect_hist[i])
# flip them for sensible plotting direction
spect_hist = np.transpose(spect_hist)
spect_hist = np.flipud(spect_hist)
return freq, power_bins, spect_hist
def plot_spectral_hist(freq, power_bins, spect_hist, psd_freq=None, psd=None):
"""
Plot the spectral histogram.
Parameters
----------
freq : array_like, 1d
Frequencies over which the histogram is calculated.
power_bins : array_like, 1d
Power bins within which histogram is aggregated.
spect_hist : ndarray, 2d
Spectral histogram to be plotted.
psd_freq : array_like, 1d, optional
Frequency axis of the PSD to be plotted.
psd : array_like, 1d, optional
PSD to be plotted over the histograms.
"""
# automatically scale figure height based on number of bins
plt.figure(figsize=(8, 12 * len(power_bins) / len(freq)))
# plot histogram intensity as image and automatically adjust aspect ratio
plt.imshow(spect_hist, extent=[freq[0], freq[-1], power_bins[0], power_bins[-1]], aspect='auto')
plt.xlabel('Frequency (Hz)', fontsize=15)
plt.ylabel('Log10 Power', fontsize=15)
plt.colorbar(label='Probability')
if psd is not None:
# if a PSD is provided, plot over the histogram data
plt.plot(psd_freq[np.logical_and(psd_freq >= freq[0], psd_freq <= freq[-1])], np.log10(
psd[np.logical_and(psd_freq >= freq[0], psd_freq <= freq[-1])]), color='w', alpha=0.8)
def fit_slope(freq, psd, fit_frange, fit_excl=None, method='ols', plot_fit=False):
"""
Fit PSD with straight line in log-log domain over the specified frequency range.
Parameters
----------
freq : array_like, 1d
Frequency axis of PSD
psd : array_like, 1d
PSD to be fit over
fit_frange : tuple, (start, end), Hz
Frequency range to be fit over, in Hz, inclusive on both ends.
fit_excl : list of tuples, [(start, end), (start, end), ...], Hz, optional
Frequency ranges to be excluded from fit. Each element in list describes
the start and end of an exclusion zone.
method : str, {'ols', 'RANSAC'}, optional
Line fitting method. Defaults to 'ols'
'ols' is ordinary least squares fit with polyfit.
'RANSAC' is iterative robust fit discarding outliers.
plot_fit : bool, optional
If True, the PSD is plotted, along with the actual fitted PSD (excluding exclusion freqs),
as well as the fitted line itself. Defaults to False.
Returns
-------
slope : float
Slope of loglog fitted line, m in y = mx+b
offset : float
Offset of loglog fitted line, b in y = mx+b
"""
# make a mask for included and excluded frequency regions
fmask = np.zeros_like(freq)
# get freq indices within the fit frequencies
fmask[np.logical_and(freq >= fit_frange[0], freq <= fit_frange[1])] = 1
# discard freq indices within the exclusion frequencies
if fit_excl is not None:
# if a tuple is given, convert it to a list
if type(fit_excl) is tuple:
fit_excl = [fit_excl]
for exc_frange in fit_excl:
fmask[np.logical_and(freq >= exc_frange[0], freq <= exc_frange[1])] = 0
# grab the psd and freqs to be fit over
logf = np.log10(freq[fmask == 1])
logpsd = np.log10(psd[fmask == 1])
# fit line
if method is 'ols':
# solve regular least square
slope, offset = np.polyfit(logf, logpsd, deg=1)
elif method is 'RANSAC':
lm = linear_model.RANSACRegressor(random_state=42)
lm.fit(logf.reshape(-1, 1), logpsd.reshape(-1, 1))
offset = lm.predict(0.)[0][0]
slope = lm.estimator_.coef_[0][0]
else:
raise ValueError('Unknown PSD fitting method: %s' % method)
if plot_fit:
plt.figure(figsize=(5, 5))
plt.plot(np.log10(freq), np.log10(psd), label='Whole PSD')
plt.plot(logf, logpsd, '-o', label='Fitted PSD', alpha=0.4)
plt.plot(logf, logf * slope + offset, '-k', label='Fit Line', lw=3)
plt.legend()
plt.xlabel('Log10 Frequency (Hz)', fontsize=15)
plt.ylabel('Log10 Power (V^2/Hz)', fontsize=15)
return slope, offset
| [
"numpy.log10",
"numpy.polyfit",
"scipy.signal.spectrogram",
"matplotlib.pylab.imshow",
"numpy.mean",
"numpy.histogram",
"matplotlib.pylab.figure",
"matplotlib.pylab.legend",
"numpy.fft.fft",
"numpy.linspace",
"matplotlib.pylab.plot",
"numpy.abs",
"numpy.flipud",
"numpy.random.choice",
"m... | [((5201, 5253), 'scipy.signal.spectrogram', 'signal.spectrogram', (['x', 'Fs', 'window', 'nperseg', 'noverlap'], {}), '(x, Fs, window, nperseg, noverlap)\n', (5219, 5253), False, 'from scipy import signal\n'), ((8015, 8067), 'scipy.signal.spectrogram', 'signal.spectrogram', (['x', 'Fs', 'window', 'nperseg', 'noverlap'], {}), '(x, Fs, window, nperseg, noverlap)\n', (8033, 8067), False, 'from scipy import signal\n'), ((11814, 11888), 'scipy.signal.spectrogram', 'signal.spectrogram', (['x', 'Fs', 'window', 'nperseg', 'noverlap'], {'return_onesided': '(True)'}), '(x, Fs, window, nperseg, noverlap, return_onesided=True)\n', (11832, 11888), False, 'from scipy import signal\n'), ((12323, 12367), 'numpy.linspace', 'np.linspace', (['power_min', 'power_max', '(nbins + 1)'], {}), '(power_min, power_max, nbins + 1)\n', (12334, 12367), True, 'import numpy as np\n'), ((12687, 12711), 'numpy.transpose', 'np.transpose', (['spect_hist'], {}), '(spect_hist)\n', (12699, 12711), True, 'import numpy as np\n'), ((12729, 12750), 'numpy.flipud', 'np.flipud', (['spect_hist'], {}), '(spect_hist)\n', (12738, 12750), True, 'import numpy as np\n'), ((13598, 13699), 'matplotlib.pylab.imshow', 'plt.imshow', (['spect_hist'], {'extent': '[freq[0], freq[-1], power_bins[0], power_bins[-1]]', 'aspect': '"""auto"""'}), "(spect_hist, extent=[freq[0], freq[-1], power_bins[0], power_bins\n [-1]], aspect='auto')\n", (13608, 13699), True, 'import matplotlib.pylab as plt\n'), ((13699, 13740), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Frequency (Hz)"""'], {'fontsize': '(15)'}), "('Frequency (Hz)', fontsize=15)\n", (13709, 13740), True, 'import matplotlib.pylab as plt\n'), ((13745, 13783), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""Log10 Power"""'], {'fontsize': '(15)'}), "('Log10 Power', fontsize=15)\n", (13755, 13783), True, 'import matplotlib.pylab as plt\n'), ((13788, 13821), 'matplotlib.pylab.colorbar', 'plt.colorbar', ([], {'label': '"""Probability"""'}), "(label='Probability')\n", (13800, 13821), True, 'import matplotlib.pylab as plt\n'), ((15616, 15635), 'numpy.zeros_like', 'np.zeros_like', (['freq'], {}), '(freq)\n', (15629, 15635), True, 'import numpy as np\n'), ((16150, 16176), 'numpy.log10', 'np.log10', (['freq[fmask == 1]'], {}), '(freq[fmask == 1])\n', (16158, 16176), True, 'import numpy as np\n'), ((16190, 16215), 'numpy.log10', 'np.log10', (['psd[fmask == 1]'], {}), '(psd[fmask == 1])\n', (16198, 16215), True, 'import numpy as np\n'), ((2722, 2774), 'scipy.signal.spectrogram', 'signal.spectrogram', (['x', 'Fs', 'window', 'nperseg', 'noverlap'], {}), '(x, Fs, window, nperseg, noverlap)\n', (2740, 2774), False, 'from scipy import signal\n'), ((5537, 5557), 'numpy.std', 'np.std', (['spg'], {'axis': '(-1)'}), '(spg, axis=-1)\n', (5543, 5557), True, 'import numpy as np\n'), ((5560, 5581), 'numpy.mean', 'np.mean', (['spg'], {'axis': '(-1)'}), '(spg, axis=-1)\n', (5567, 5581), True, 'import numpy as np\n'), ((11949, 11962), 'numpy.log10', 'np.log10', (['spg'], {}), '(spg)\n', (11957, 11962), True, 'import numpy as np\n'), ((12095, 12142), 'numpy.logical_and', 'np.logical_and', (['(freq >= flim[0])', '(freq < flim[1])'], {}), '(freq >= flim[0], freq < flim[1])\n', (12109, 12142), True, 'import numpy as np\n'), ((12274, 12296), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['ps'], {}), '(ps)\n', (12292, 12296), True, 'import numpy as np\n'), ((12527, 12561), 'numpy.histogram', 'np.histogram', (['ps[:, i]', 'power_bins'], {}), '(ps[:, i], power_bins)\n', (12539, 12561), True, 'import numpy as np\n'), ((15696, 15756), 'numpy.logical_and', 'np.logical_and', (['(freq >= fit_frange[0])', '(freq <= fit_frange[1])'], {}), '(freq >= fit_frange[0], freq <= fit_frange[1])\n', (15710, 15756), True, 'import numpy as np\n'), ((16317, 16348), 'numpy.polyfit', 'np.polyfit', (['logf', 'logpsd'], {'deg': '(1)'}), '(logf, logpsd, deg=1)\n', (16327, 16348), True, 'import numpy as np\n'), ((16682, 16708), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (16692, 16708), True, 'import matplotlib.pylab as plt\n'), ((16784, 16843), 'matplotlib.pylab.plot', 'plt.plot', (['logf', 'logpsd', '"""-o"""'], {'label': '"""Fitted PSD"""', 'alpha': '(0.4)'}), "(logf, logpsd, '-o', label='Fitted PSD', alpha=0.4)\n", (16792, 16843), True, 'import matplotlib.pylab as plt\n'), ((16852, 16919), 'matplotlib.pylab.plot', 'plt.plot', (['logf', '(logf * slope + offset)', '"""-k"""'], {'label': '"""Fit Line"""', 'lw': '(3)'}), "(logf, logf * slope + offset, '-k', label='Fit Line', lw=3)\n", (16860, 16919), True, 'import matplotlib.pylab as plt\n'), ((16928, 16940), 'matplotlib.pylab.legend', 'plt.legend', ([], {}), '()\n', (16938, 16940), True, 'import matplotlib.pylab as plt\n'), ((16949, 16996), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Log10 Frequency (Hz)"""'], {'fontsize': '(15)'}), "('Log10 Frequency (Hz)', fontsize=15)\n", (16959, 16996), True, 'import matplotlib.pylab as plt\n'), ((17005, 17052), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""Log10 Power (V^2/Hz)"""'], {'fontsize': '(15)'}), "('Log10 Power (V^2/Hz)', fontsize=15)\n", (17015, 17052), True, 'import matplotlib.pylab as plt\n'), ((2822, 2843), 'numpy.mean', 'np.mean', (['spg'], {'axis': '(-1)'}), '(spg, axis=-1)\n', (2829, 2843), True, 'import numpy as np\n'), ((8565, 8624), 'numpy.random.choice', 'np.random.choice', (['spg.shape[1]'], {'size': 'nslices', 'replace': '(False)'}), '(spg.shape[1], size=nslices, replace=False)\n', (8581, 8624), True, 'import numpy as np\n'), ((12030, 12077), 'numpy.logical_and', 'np.logical_and', (['(freq >= flim[0])', '(freq < flim[1])'], {}), '(freq >= flim[0], freq < flim[1])\n', (12044, 12077), True, 'import numpy as np\n'), ((16392, 16437), 'sklearn.linear_model.RANSACRegressor', 'linear_model.RANSACRegressor', ([], {'random_state': '(42)'}), '(random_state=42)\n', (16420, 16437), False, 'from sklearn import linear_model\n'), ((16726, 16740), 'numpy.log10', 'np.log10', (['freq'], {}), '(freq)\n', (16734, 16740), True, 'import numpy as np\n'), ((16742, 16755), 'numpy.log10', 'np.log10', (['psd'], {}), '(psd)\n', (16750, 16755), True, 'import numpy as np\n'), ((2895, 2918), 'numpy.median', 'np.median', (['spg'], {'axis': '(-1)'}), '(spg, axis=-1)\n', (2904, 2918), True, 'import numpy as np\n'), ((3074, 3087), 'numpy.fft.fft', 'np.fft.fft', (['x'], {}), '(x)\n', (3084, 3087), True, 'import numpy as np\n'), ((8659, 8687), 'numpy.std', 'np.std', (['spg[:, idx]'], {'axis': '(-1)'}), '(spg[:, idx], axis=-1)\n', (8665, 8687), True, 'import numpy as np\n'), ((8690, 8719), 'numpy.mean', 'np.mean', (['spg[:, idx]'], {'axis': '(-1)'}), '(spg[:, idx], axis=-1)\n', (8697, 8719), True, 'import numpy as np\n'), ((13934, 13991), 'numpy.logical_and', 'np.logical_and', (['(psd_freq >= freq[0])', '(psd_freq <= freq[-1])'], {}), '(psd_freq >= freq[0], psd_freq <= freq[-1])\n', (13948, 13991), True, 'import numpy as np\n'), ((16028, 16088), 'numpy.logical_and', 'np.logical_and', (['(freq >= exc_frange[0])', '(freq <= exc_frange[1])'], {}), '(freq >= exc_frange[0], freq <= exc_frange[1])\n', (16042, 16088), True, 'import numpy as np\n'), ((5447, 5460), 'numpy.log10', 'np.log10', (['spg'], {}), '(spg)\n', (5455, 5460), True, 'import numpy as np\n'), ((9337, 9362), 'numpy.std', 'np.std', (['curblock'], {'axis': '(-1)'}), '(curblock, axis=-1)\n', (9343, 9362), True, 'import numpy as np\n'), ((9365, 9391), 'numpy.mean', 'np.mean', (['curblock'], {'axis': '(-1)'}), '(curblock, axis=-1)\n', (9372, 9391), True, 'import numpy as np\n'), ((14020, 14077), 'numpy.logical_and', 'np.logical_and', (['(psd_freq >= freq[0])', '(psd_freq <= freq[-1])'], {}), '(psd_freq >= freq[0], psd_freq <= freq[-1])\n', (14034, 14077), True, 'import numpy as np\n'), ((3370, 3380), 'numpy.abs', 'np.abs', (['FT'], {}), '(FT)\n', (3376, 3380), True, 'import numpy as np\n')] |
import time
import numpy as np
import tensorflow as tf
from dater import reader
from modeler.multirnnmodel import MultiRNNModel
from trainer.tftrainer import TFTrainer
class MutiRNNTrainer(TFTrainer):
def __init__(self):
self.config = SmallConfig()
self.eval_config = SmallConfig()
self.eval_config.batch_size = 1
self.eval_config.num_steps = 1
pass
def get_model(self):
self.multiRNNModel = MultiRNNModel
pass
def get_data(self):
raw_data = reader.ptb_raw_data('data/simple-examples.tar/data/')
self.train_data, self.valid_data, self.test_data, _ = raw_data
pass
def train(self):
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-self.config.init_scale,
self.config.init_scale)
with tf.name_scope("Train"):
train_input = PTBInput(config=self.config, data=self.train_data, name="TrainInput")
with tf.variable_scope("Model", reuse=None, initializer=initializer):
m = self.multiRNNModel(is_training=True, config=self.config, input_=train_input)
with tf.name_scope("Valid"):
valid_input = PTBInput(config=self.config, data=self.valid_data, name="ValidInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mvalid = self.multiRNNModel(is_training=False, config=self.config, input_=valid_input)
with tf.name_scope("Test"):
test_input = PTBInput(config=self.eval_config, data=self.test_data, name="TestInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mtest = self.multiRNNModel(is_training=False, config=self.eval_config,
input_=test_input)
sv = tf.train.Supervisor()
with sv.managed_session() as session:
for i in range(self.config.max_max_epoch):
lr_decay = self.config.lr_decay ** max(i + 1 - self.config.max_epoch, 0.0)
m.assign_lr(session, self.config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = self.run_epoch(session, m, eval_op=m.train_op,
verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity = self.run_epoch(session, mvalid)
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
test_perplexity = self.run_epoch(session, mtest)
print("Test Perplexity: %.3f" % test_perplexity)
pass
def run_epoch(self, session, model, eval_op=None, verbose=False):
"""Runs the model on the given data."""
costs = 0.0
iters = 0
state = session.run(model.initial_state)
start_time = time.time()
fetches = {
"cost": model.cost,
"final_state": model.final_state,
}
if eval_op is not None:
fetches["eval_op"] = eval_op
for step in range(model.input.epoch_size):
feed_dict = {}
for i, (c, h) in enumerate(model.initial_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
costs += cost
iters += model.input.num_steps
if verbose and step % (model.input.epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / model.input.epoch_size, np.exp(costs / iters),
iters * model.input.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
class SmallConfig(object):
"""Small config."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 13
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000
class PTBInput(object):
"""The input data."""
def __init__(self, config, data, name=None):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
self.input_data, self.targets = reader.ptb_producer(
data, batch_size, num_steps, name=name)
| [
"tensorflow.Graph",
"tensorflow.variable_scope",
"numpy.exp",
"dater.reader.ptb_raw_data",
"tensorflow.name_scope",
"tensorflow.train.Supervisor",
"dater.reader.ptb_producer",
"tensorflow.random_uniform_initializer",
"time.time"
] | [((524, 577), 'dater.reader.ptb_raw_data', 'reader.ptb_raw_data', (['"""data/simple-examples.tar/data/"""'], {}), "('data/simple-examples.tar/data/')\n", (543, 577), False, 'from dater import reader\n'), ((3122, 3133), 'time.time', 'time.time', ([], {}), '()\n', (3131, 3133), False, 'import time\n'), ((4054, 4075), 'numpy.exp', 'np.exp', (['(costs / iters)'], {}), '(costs / iters)\n', (4060, 4075), True, 'import numpy as np\n'), ((4703, 4762), 'dater.reader.ptb_producer', 'reader.ptb_producer', (['data', 'batch_size', 'num_steps'], {'name': 'name'}), '(data, batch_size, num_steps, name=name)\n', (4722, 4762), False, 'from dater import reader\n'), ((748, 826), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-self.config.init_scale)', 'self.config.init_scale'], {}), '(-self.config.init_scale, self.config.init_scale)\n', (777, 826), True, 'import tensorflow as tf\n'), ((1951, 1972), 'tensorflow.train.Supervisor', 'tf.train.Supervisor', ([], {}), '()\n', (1970, 1972), True, 'import tensorflow as tf\n'), ((901, 923), 'tensorflow.name_scope', 'tf.name_scope', (['"""Train"""'], {}), "('Train')\n", (914, 923), True, 'import tensorflow as tf\n'), ((1230, 1252), 'tensorflow.name_scope', 'tf.name_scope', (['"""Valid"""'], {}), "('Valid')\n", (1243, 1252), True, 'import tensorflow as tf\n'), ((1565, 1586), 'tensorflow.name_scope', 'tf.name_scope', (['"""Test"""'], {}), "('Test')\n", (1578, 1586), True, 'import tensorflow as tf\n'), ((697, 707), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (705, 707), True, 'import tensorflow as tf\n'), ((1046, 1109), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model"""'], {'reuse': 'None', 'initializer': 'initializer'}), "('Model', reuse=None, initializer=initializer)\n", (1063, 1109), True, 'import tensorflow as tf\n'), ((1375, 1438), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model"""'], {'reuse': '(True)', 'initializer': 'initializer'}), "('Model', reuse=True, initializer=initializer)\n", (1392, 1438), True, 'import tensorflow as tf\n'), ((1711, 1774), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Model"""'], {'reuse': '(True)', 'initializer': 'initializer'}), "('Model', reuse=True, initializer=initializer)\n", (1728, 1774), True, 'import tensorflow as tf\n'), ((3930, 3951), 'numpy.exp', 'np.exp', (['(costs / iters)'], {}), '(costs / iters)\n', (3936, 3951), True, 'import numpy as np\n'), ((4010, 4021), 'time.time', 'time.time', ([], {}), '()\n', (4019, 4021), False, 'import time\n')] |
import numpy as np
from ..utils import slice_row_sparse
from .metrics import NDCG
def evaluate(model, Xtr, Xts, target_users,
topk=100, metrics=[NDCG], min_targets=1000,
from_to=('user', 'item')):
"""
"""
if target_users is None:
target_users = np.random.choice(Xts.shape[0], min_targets, False)
# predict all
no_entries = 0
trues, true_rels, preds = [], [], []
for u in target_users:
true, rel = slice_row_sparse(Xts, u)
if len(true) == 0:
no_entries += 1
continue
true_rels.append(rel)
trues.append(true)
pred = model.predict(u, Xtr, from_to, topk)
preds.append(pred.astype(np.int32))
# compute metrics
metrics = [Metric(topk=topk) for Metric in metrics]
result = {
str(metric): metric.compute(trues, preds, true_rels=true_rels)
for metric in metrics
}
return result
| [
"numpy.random.choice"
] | [((294, 344), 'numpy.random.choice', 'np.random.choice', (['Xts.shape[0]', 'min_targets', '(False)'], {}), '(Xts.shape[0], min_targets, False)\n', (310, 344), True, 'import numpy as np\n')] |
import os
import sys
from openbabel import openbabel as ob
from openbabel import pybel as pb
import gen3D
import numpy as np
from statistics import mean
import props
ELEMENT_TABLE = props.ElementData()
class FILTER(object):
def __init__(self, reactant_file, cluster_bond_file = None, fixed_atoms = None):
self.reactant_file = reactant_file
self.cluster_bond_file = cluster_bond_file
self.fixed_atoms = fixed_atoms
if self.fixed_atoms:
with open(self.fixed_atoms, 'r') as f:
lines = f.read()
self.fixed_atoms = eval(lines)
def initialization(self):
mol = next(pb.readfile('xyz', self.reactant_file))
if self.cluster_bond_file:
m = pb.ob.OBMol()
m.BeginModify()
for atom in mol:
coords = [coord for coord in atom.coords]
atomno = atom.atomicnum
obatom = ob.OBAtom()
obatom.thisown = 0
obatom.SetAtomicNum(atomno)
obatom.SetVector(*coords)
m.AddAtom(obatom)
del obatom
with open(self.cluster_bond_file, 'r') as f:
lines = f.read()
cluster_bond = eval(lines)
bonds = [(bond.GetBeginAtomIdx(), bond.GetEndAtomIdx(), bond.GetBondOrder())
for bond in pb.ob.OBMolBondIter(mol.OBMol)]
bonds.extend(cluster_bond)
for bond in bonds:
m.AddBond(bond[0], bond[1], bond[2])
# m.ConnectTheDots()
m.PerceiveBondOrders()
# m.SetTotalSpinMultiplicity(1)
m.SetTotalCharge(int(mol.charge))
m.Center()
m.EndModify()
self.mol = gen3D.Molecule(m)
else:
self.mol = gen3D.Molecule(mol.OBMol)
self.atoms = tuple(atom.atomicnum for atom in self.mol)
for frag in self.mol.write('can').split()[0].split('.'):
if '[OH]' in frag and 'Sn' not in frag:
return 'job_fail', 'non-bonded OH group'
return 'pass', 'pass'
def check_feasible_rxn(self, check_mm_overlap = True, qmmm = None, qm_atoms = 23, threshold_ratio = 0.6):
status, msg = self.initialization()
if status == 'job_fail':
return 'job_fail', msg
if check_mm_overlap:
status, msg = self.check_overlap_mm_region_v2(qmmm = qmmm, qm_atoms = qm_atoms, threshold_ratio = threshold_ratio)
else:
status = True
if status:
status, msg = self.check_reactant_bonds()
if status:
status, msg = self.check_unreasonable_connection()
if status:
return 'job_success', msg
else:
return 'job_fail', msg
else:
return 'job_fail', msg
else:
return 'job_fail', msg
def check_reactant_bonds(self):
# extract reactant bonds
reactant_bonds = [tuple(sorted((bond.GetBeginAtomIdx() - 1, bond.GetEndAtomIdx() - 1)) + [bond.GetBondOrder()])
for bond in pb.ob.OBMolBondIter(self.mol.OBMol)]
self.reactant_bonds = tuple(sorted(reactant_bonds))
# check the bond order and save in a dict
bond_type = {}
for i in range(len(self.atoms)):
num = 0
for j in reactant_bonds:
if j[0] == i or j[1] == i:
num += j[2]
bond_type[i] = num
if 0 in bond_type.values(): # The dissociated atom
return False, 'Have dissociated atom.'
else:
for idx, i in enumerate(self.atoms):
# use != or > need test
if i == 6 and idx not in self.fixed_atoms:
if bond_type[idx] < 3 or bond_type[idx] > 4: # remove only C=O
return False, 'reactant carbon bond type is invalid.({})'.format(bond_type[idx])
# use != or > need test
elif i == 8 and bond_type[idx] > 2 and idx not in self.fixed_atoms: # Here we can't use !=2 because some time reactant O don't detect bind on Sn
return False, 'reactant oxygen bond type is invalid.({})'.format(bond_type[idx])
# use != or > need test
elif i == 14 and bond_type[idx] != 4 and idx not in self.fixed_atoms:
return False, 'reactant silicon bond type is invalid.({})'.format(bond_type[idx])
# While the bronsted acid already have proton on the active site, then aborted.
elif i == 8 and bond_type[idx] > 3:
return False, 'oxygen have more than 4 connection.({})'.format(bond_type[idx])
return True, 'bond type check is pass.'
def check_unreasonable_connection(self):
# Use generator is more efficient
reactant_carbon = [idx for idx, reactant_atoms in enumerate(self.atoms) if idx not in self.fixed_atoms and reactant_atoms == 6]
reactant_oxygen = [idx for idx, reactant_atoms in enumerate(self.atoms) if idx not in self.fixed_atoms and reactant_atoms == 8]
active_site_oxygen = [active_site_atom for active_site_atom in self.fixed_atoms if self.atoms[active_site_atom] == 8]
active_site_silicon = [active_site_atom for active_site_atom in self.fixed_atoms if self.atoms[active_site_atom] == 14]
active_site_metal = [active_site_atom for active_site_atom in self.fixed_atoms if self.atoms[active_site_atom] in [42, 50, 74]]
hcap = [active_site_atom for active_site_atom in self.fixed_atoms if self.atoms[active_site_atom] == 1]
for bond in self.reactant_bonds:
if (bond[0] in reactant_oxygen and bond[1] in active_site_silicon) or (bond[1] in reactant_oxygen and bond[0] in active_site_silicon):
return False, 'reactant oxygen have connection with active site silicon.'
elif (bond[0] not in self.fixed_atoms and bond[1] in hcap) or (bond[1] not in self.fixed_atoms and bond[0] in hcap):
return False, 'reactant have connection with hcap.'
elif (bond[0] in reactant_carbon and bond[1] in active_site_oxygen) or (bond[1] in reactant_carbon and bond[0] in active_site_oxygen):
return False, 'reactant carbon have connection with active site oxygen.'
elif (bond[0] in reactant_oxygen and bond[1] in active_site_oxygen) or (bond[1] in reactant_oxygen and bond[0] in active_site_oxygen):
return False, 'reactant oxygen have connection with active site oxygen.'
return True, 'check_unreasonable_connection is pass.'
def check_overlap_mm_region(self, qm_silicon = [], threshold = 5.4):
# mm silicon index start from 0
# Choose the silicon in cluster model which is in mm region
self.mol.gen3D(self.fixed_atoms, make3D=False)
if len(self.mol.mols) == 1:
return True, 'pass'
else:
nodes_1 = [mol.toNode() for mol in self.mol.mols]
fd1 = [node.getCentroid() for node in nodes_1]
tmps, dist2 = [], []
for idx, i in enumerate(self.mol.mols):
if all(idx2 not in self.fixed_atoms for idx2 in i.mols_indices[idx]):
if any(self.atoms[idx2] == 6 or self.atoms[idx2] == 8 for idx2 in i.mols_indices[idx]):
tmps.append(idx)
for tmp in tmps:
for qm_si in qm_silicon:
diff2 = self.mol[qm_si].coords - fd1[tmp]
dist2.append(np.linalg.norm(diff2))
# print(max(dist2))
# print(mean(dist2))
if max(dist2) > 6.5 and mean(dist2) > 5.4:
# print(max(dist2))
# print(mean(dist2))
return False, 'Overlap with the mm region'
else:
return True, 'pass'
def check_overlap_mm_region_v2(self, qmmm = None, qm_atoms = 23, threshold_ratio = 0.6):
"""
The distance between qm atoms and mm atoms should greater than 0.6 vdw radius.
"""
dist2 = []
for idx1, qm_atom in enumerate(self.mol):
if idx1 >= qm_atoms:
continue
for idx2, mm_atom in enumerate(qmmm):
if idx2 < qm_atoms:
continue
diff2 = np.array(qm_atom.coords) - np.array(mm_atom.coords)
dist = np.linalg.norm(diff2)
element = ELEMENT_TABLE.from_atomic_number(qm_atom.OBAtom.GetAtomicNum())
vdw_rad = element.vdw_radius
if dist < vdw_rad * threshold_ratio:
dist2.append(dist)
if dist2:
return False, 'Maybe overlap with the mm region'
else:
return True, 'pass'
# cluster_bond = '/mnt/d/Lab/QMproject/AutomaticReactionDiscovery/script/bonds.txt'
# fixed_atoms = '/mnt/d/Lab/QMproject/AutomaticReactionDiscovery/script/fixed_atoms.txt'
# qmmm = '/mnt/d/Lab/QMproject/AutomaticReactionDiscovery/code/ard/qmmm.xyz'
# qmmm_mol = next(pb.readfile('xyz', qmmm))
# # reactant_file = os.path.join('/mnt/d/Lab/QMproject/AutomaticReactionDiscovery/code/ard/reactions', 'UYFCJUSUJARWAH-UHFFFAOYSA-N_9/product.xyz')
# # f = FILTER(reactant_file=reactant_file, cluster_bond_file=cluster_bond, fixed_atoms = fixed_atoms)
# # msg = f.check_overlap_mm_region_2(qmmm = qmmm_mol, qm_atoms = 23)
# a = os.listdir('/mnt/d/Lab/QMproject/AutomaticReactionDiscovery/code/ard/reactions')
# for i in a:
# print('---------')
# print(i)
# b = os.path.join('/mnt/d/Lab/QMproject/AutomaticReactionDiscovery/code/ard/reactions', i)
# reactant_file = os.path.join(b, 'reactant.xyz')
# f = FILTER(reactant_file, cluster_bond_file=cluster_bond, fixed_atoms = fixed_atoms)
# status, msg = f.check_feasible_rxn(check_mm_overlap = True, qmmm = qmmm_mol, qm_atoms = 23, threshold_ratio = 0.6)
# # state, msg = f.check_feasible_rxn(check_mm_overlap=True)
# # print(state)
# # print(msg) | [
"statistics.mean",
"openbabel.openbabel.OBAtom",
"openbabel.pybel.ob.OBMolBondIter",
"gen3D.Molecule",
"openbabel.pybel.ob.OBMol",
"numpy.array",
"numpy.linalg.norm",
"props.ElementData",
"openbabel.pybel.readfile"
] | [((192, 211), 'props.ElementData', 'props.ElementData', ([], {}), '()\n', (209, 211), False, 'import props\n'), ((680, 718), 'openbabel.pybel.readfile', 'pb.readfile', (['"""xyz"""', 'self.reactant_file'], {}), "('xyz', self.reactant_file)\n", (691, 718), True, 'from openbabel import pybel as pb\n'), ((773, 786), 'openbabel.pybel.ob.OBMol', 'pb.ob.OBMol', ([], {}), '()\n', (784, 786), True, 'from openbabel import pybel as pb\n'), ((1823, 1840), 'gen3D.Molecule', 'gen3D.Molecule', (['m'], {}), '(m)\n', (1837, 1840), False, 'import gen3D\n'), ((1880, 1905), 'gen3D.Molecule', 'gen3D.Molecule', (['mol.OBMol'], {}), '(mol.OBMol)\n', (1894, 1905), False, 'import gen3D\n'), ((972, 983), 'openbabel.openbabel.OBAtom', 'ob.OBAtom', ([], {}), '()\n', (981, 983), True, 'from openbabel import openbabel as ob\n'), ((3279, 3314), 'openbabel.pybel.ob.OBMolBondIter', 'pb.ob.OBMolBondIter', (['self.mol.OBMol'], {}), '(self.mol.OBMol)\n', (3298, 3314), True, 'from openbabel import pybel as pb\n'), ((8725, 8746), 'numpy.linalg.norm', 'np.linalg.norm', (['diff2'], {}), '(diff2)\n', (8739, 8746), True, 'import numpy as np\n'), ((1428, 1458), 'openbabel.pybel.ob.OBMolBondIter', 'pb.ob.OBMolBondIter', (['mol.OBMol'], {}), '(mol.OBMol)\n', (1447, 1458), True, 'from openbabel import pybel as pb\n'), ((7955, 7966), 'statistics.mean', 'mean', (['dist2'], {}), '(dist2)\n', (7959, 7966), False, 'from statistics import mean\n'), ((8649, 8673), 'numpy.array', 'np.array', (['qm_atom.coords'], {}), '(qm_atom.coords)\n', (8657, 8673), True, 'import numpy as np\n'), ((8676, 8700), 'numpy.array', 'np.array', (['mm_atom.coords'], {}), '(mm_atom.coords)\n', (8684, 8700), True, 'import numpy as np\n'), ((7828, 7849), 'numpy.linalg.norm', 'np.linalg.norm', (['diff2'], {}), '(diff2)\n', (7842, 7849), True, 'import numpy as np\n')] |
import sys
import cv2
import math
import os
import json
import pandas as pd
import numpy as np
import json
stroke_data = {
"data" : []
}
stroke_data_ = open('result.json', 'r')
stroke_data = json.loads(stroke_data_.read())
print(stroke_data['data'])
stroke_data_.close()
speed_for_each_stroke = {}
def read_coordinates():
f = open("coordinates.txt", 'r')
data_ = f.read()
data = data_.strip()
str_table_coordinates = data.split(' ')
table_coordinates = []
for i in str_table_coordinates:
table_coordinates.append(list(map(int, (i.split(',')))))
return table_coordinates
def find_net(table_coordinates):
"""
Finding net coordinates, taking avg of x1 and x4 (mid points of table)
"""
top_x = table_coordinates[1][0]
bottom_x = table_coordinates[4][0]
avg = int((top_x+bottom_x)/2)
return avg
def get_coordinate_of_net_cross(x_coords, midpoint_x=270):
print(list(x_coords), midpoint_x)
for pos, x_coordinate in enumerate(list(x_coords)):
if x_coordinate >= midpoint_x:
return pos
# 1 2 3 4 5 6 7 8 9 9 10
def euclidian_distance(x1, y1, x2, y2):
distance = math.sqrt(((x1 - x2)**2) + ((y1 - y2)**2))
return distance
def get_speed(buffer=3, fps = 20):
speed_for_each_stroke = {}
table_coordinates = read_coordinates()
midpoint_x = find_net(table_coordinates)
coordinates_df = pd.read_csv('final_result.csv')
stroke_number = coordinates_df['stroke_number']
x_coords = coordinates_df['x']
y_coords = coordinates_df['y']
x_coords = list(x_coords)
y_coords = list(y_coords)
strokes_x_y = []
prev_stroke_number = list(stroke_number)[0]
stroke = []
# separate strokes into array
for i in coordinates_df.iterrows():
# print(i[1]['stroke_number'])
if prev_stroke_number != i[1]['stroke_number']:
prev_stroke_number = i[1]['stroke_number']
strokes_x_y.append(np.asarray(stroke))
stroke = []
stroke.append([i[1]['x'], i[1]['y']])
strokes_x_y.append(np.asarray(stroke))
# import sys
# sys.exit(1)
left_midpoint_coordinate = [
((table_coordinates[0][0] + table_coordinates[5][0]) / 2),
((table_coordinates[0][1] + table_coordinates[5][1]) / 2)]
right_midpoint_coordinate = [
((table_coordinates[2][0] + table_coordinates[3][0]) / 2),
((table_coordinates[2][1] + table_coordinates[3][1]) / 2)]
strokes_x_y = np.asarray(strokes_x_y)
table_distance = euclidian_distance(
left_midpoint_coordinate[0],
left_midpoint_coordinate[1],
right_midpoint_coordinate[0],
right_midpoint_coordinate[1])
# print(stroke)
import sys
print(len(strokes_x_y))
for pos, stroke in enumerate(strokes_x_y):
print('ffs', pos)
x_coords = stroke[:, 0]
y_coords = stroke[:, 1]
speed_list = []
position_of_net_cross = get_coordinate_of_net_cross(x_coords, midpoint_x)
distances = []
print(position_of_net_cross, 'pos', midpoint_x)
# sys.exit(1)
print(len(x_coords), len(y_coords))
try:
for point in range(position_of_net_cross-buffer, position_of_net_cross+buffer+1):
speed_list.append([x_coords[point], y_coords[point]])
except:
pass
for i in range(len(speed_list)-1):
distances.append(euclidian_distance(
speed_list[i][0], speed_list[i][1], speed_list[i+1][0], speed_list[i+1][1]
))
speed = sum(distances) / (1 / fps * ((len(distances)+1)))
speed = (speed * 2.74) / table_distance
# find speed for each stroke
speed *= 18 / 5
speed_for_each_stroke[pos+1] = speed
return speed_for_each_stroke
def get_zone(ball_coordinates, no_columns=6, no_rows=3):
tt_table = np.zeros((3, 6), np.int32)
table_map = {
"03" : 1,
"04" : 2,
"05" : 3,
"13" : 4,
"14" : 5,
"15" : 6,
"23" : 7,
"24" : 8,
"25" : 9
}
col_seg = 1920 / no_columns
row_seg = 1080 / no_rows
bounce_col = 0
bounce_row = 0
for i in range(1, no_columns+1):
if ball_coordinates[0] < col_seg * i:
bounce_col = i
break
for i in range(1, no_rows+1):
if ball_coordinates[1] < row_seg * i:
bounce_row = i
break
# tt_table[bounce_row-1][bounce_col-1] += 1
valid_check = str(bounce_row) + str(bounce_col)
if valid_check in table_map:
return str(table_map[valid_check])
else:
print('wrong coords')
return str(3)
def warp_coordinates(x, y, matrix):
print('warping')
print(x, y, 'pints')
p = (x, y)
px = (matrix[0][0] * p[0] + matrix[0][1] * p[1] + matrix[0][2]) / \
((matrix[2][0] * p[0] + matrix[2][1] * p[1] + matrix[2][2]))
py = (matrix[1][0] * p[0] + matrix[1][1] * p[1] + matrix[1][2]) / \
((matrix[2][0] * p[0] + matrix[2][1] * p[1] + matrix[2][2]))
print(px, py, ' warped pints')
return [int(px), int(py)]
def get_transposed_coordinates(x, y):
table_coordinates = read_coordinates()
table_coordinates_corners = np.array([
table_coordinates[0],
table_coordinates[2],
table_coordinates[3],
table_coordinates[5]
], np.float32)
print(table_coordinates, 'table coords')
warped_dimensions = np.array([[0, 0], [1920, 0], [0, 1080], [1920, 1080]], np.float32)
matrix = cv2.getPerspectiveTransform(table_coordinates_corners,
warped_dimensions)
x, y = warp_coordinates(x, y, matrix)
position = get_zone([x,y])
return position
def caluclate_speed_and_placements():
global stroke_data
# do speed tomorrow
result = pd.read_csv('final_result.csv')
left_bounces = result.groupby("stroke_number").first().reset_index()
right_bounces = result.groupby("stroke_number").last().reset_index()
speed_for_each_stroke = get_speed()
print(speed_for_each_stroke, 'speed', len(left_bounces))
bc = 0
for bounces in zip(left_bounces.iterrows(), right_bounces.iterrows()):
print(bc, 'bc')
left, right = bounces[0][1], bounces[1][1]
print(left['x'], right['x'], 'lr')
data_ = {
"stroke_name" : stroke_data['data'][bc]['stroke_name'],
"stroke_number" : left['stroke_number'],
"position" : get_transposed_coordinates(right['x'], right['y']),
"speed" : speed_for_each_stroke[left['stroke_number']]
}
stroke_data['data'][bc] = data_
# if bc == 3:
# break
bc += 1
file_ = open('stroke_speed_result.json', 'w')
json.dump(stroke_data, file_)
if __name__ == "__main__":
caluclate_speed_and_placements()
| [
"pandas.read_csv",
"cv2.getPerspectiveTransform",
"numpy.asarray",
"math.sqrt",
"numpy.array",
"numpy.zeros",
"json.dump"
] | [((1182, 1224), 'math.sqrt', 'math.sqrt', (['((x1 - x2) ** 2 + (y1 - y2) ** 2)'], {}), '((x1 - x2) ** 2 + (y1 - y2) ** 2)\n', (1191, 1224), False, 'import math\n'), ((1422, 1453), 'pandas.read_csv', 'pd.read_csv', (['"""final_result.csv"""'], {}), "('final_result.csv')\n", (1433, 1453), True, 'import pandas as pd\n'), ((2484, 2507), 'numpy.asarray', 'np.asarray', (['strokes_x_y'], {}), '(strokes_x_y)\n', (2494, 2507), True, 'import numpy as np\n'), ((3950, 3976), 'numpy.zeros', 'np.zeros', (['(3, 6)', 'np.int32'], {}), '((3, 6), np.int32)\n', (3958, 3976), True, 'import numpy as np\n'), ((5323, 5437), 'numpy.array', 'np.array', (['[table_coordinates[0], table_coordinates[2], table_coordinates[3],\n table_coordinates[5]]', 'np.float32'], {}), '([table_coordinates[0], table_coordinates[2], table_coordinates[3],\n table_coordinates[5]], np.float32)\n', (5331, 5437), True, 'import numpy as np\n'), ((5551, 5617), 'numpy.array', 'np.array', (['[[0, 0], [1920, 0], [0, 1080], [1920, 1080]]', 'np.float32'], {}), '([[0, 0], [1920, 0], [0, 1080], [1920, 1080]], np.float32)\n', (5559, 5617), True, 'import numpy as np\n'), ((5633, 5706), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['table_coordinates_corners', 'warped_dimensions'], {}), '(table_coordinates_corners, warped_dimensions)\n', (5660, 5706), False, 'import cv2\n'), ((5942, 5973), 'pandas.read_csv', 'pd.read_csv', (['"""final_result.csv"""'], {}), "('final_result.csv')\n", (5953, 5973), True, 'import pandas as pd\n'), ((6876, 6905), 'json.dump', 'json.dump', (['stroke_data', 'file_'], {}), '(stroke_data, file_)\n', (6885, 6905), False, 'import json\n'), ((2090, 2108), 'numpy.asarray', 'np.asarray', (['stroke'], {}), '(stroke)\n', (2100, 2108), True, 'import numpy as np\n'), ((1977, 1995), 'numpy.asarray', 'np.asarray', (['stroke'], {}), '(stroke)\n', (1987, 1995), True, 'import numpy as np\n')] |
import numpy as np
# projection mask of NYUv2
PMASK = np.zeros([480, 640], dtype=np.float64)
PMASK[44:471, 40:601] = 1.0
# sorted names
METRIC_NAMES = [
'RMSE',
'Mean RMSE',
'Mean Log10',
'Abs Rel Diff',
'Squa Rel Diff',
'delta < 1.25',
'delta < 1.25^2',
'delta < 1.25^3',
]
def get_metrics(
depths, preds, projection_mask=True, masks=None, rmse_only=False):
'''
Args:
depths: a list of ground truth depth maps, of dtype np.float64
in range (0,1).
preds: a list of predictions, of dtype np.float64, in range (0,1).
projection_mask: if use the valid projection mask of NYUv2,
in which case, the depth map has size 480x640.
Returns:
A dictionary of different metrics.
'''
# Check shape and dtype
assert len(preds) == len(depths)
for i in range(len(preds)):
assert preds[i].dtype == np.float64
assert depths[i].dtype == np.float64
assert preds[i].shape == depths[i].shape
preds = np.stack(preds, axis=0) * 10.0
depths = np.stack(depths, axis=0) * 10.0
results = {}
# Masks
if masks:
masks = np.stack(masks, axis=0)
masks = np.float64(depths > 0) * np.float64(masks)
else:
masks = np.float64(depths > 0)
if projection_mask:
assert masks.shape[1:] == (480, 640)
masks = masks * PMASK[None]
masks = masks.astype(np.bool)
npixels = np.sum(masks, axis=(1, 2))
diff = preds - depths
# MSE, RMSE
mse = np.sum(diff**2.0 * masks, axis=(1, 2)) / npixels
mse = np.mean(mse)
rmse = np.sqrt(mse)
if rmse_only:
return rmse
results['MSE'] = mse
results['RMSE'] = rmse
# Delta
delta = np.maximum(preds / depths, depths / preds)
delta1 = np.sum(np.float64(delta < 1.25) * masks, axis=(1, 2)) / npixels
delta2 = np.sum(np.float64(delta < 1.25**2) * masks, axis=(1, 2)) / npixels
delta3 = np.sum(np.float64(delta < 1.25**3) * masks, axis=(1, 2)) / npixels
results['delta < 1.25'] = np.mean(delta1)
results['delta < 1.25^2'] = np.mean(delta2)
results['delta < 1.25^3'] = np.mean(delta3)
# Absolute relative difference
abrdiff = np.abs(diff) * masks / depths
abrdiff = np.sum(abrdiff, axis=(1, 2)) / npixels
results['Abs Rel Diff'] = np.mean(abrdiff)
# Squared relative difference
sqrdiff = np.square(diff) * masks / depths
sqrdiff = np.sum(sqrdiff, axis=(1, 2)) / npixels
results['Squa Rel Diff'] = np.mean(sqrdiff)
# Mean log10
log10 = np.abs(np.log10(preds) - np.log10(depths))
log10 = np.sum(log10 * masks, axis=(1, 2)) / npixels
results['Mean Log10'] = np.mean(log10)
# Mean RMSE
mrmse = np.sum(np.square(diff) * masks, axis=(1, 2)) / npixels
mrmse = np.mean(np.sqrt(mrmse))
results['Mean RMSE'] = mrmse
return results
| [
"numpy.mean",
"numpy.abs",
"numpy.log10",
"numpy.sqrt",
"numpy.float64",
"numpy.square",
"numpy.sum",
"numpy.zeros",
"numpy.stack",
"numpy.maximum"
] | [((55, 93), 'numpy.zeros', 'np.zeros', (['[480, 640]'], {'dtype': 'np.float64'}), '([480, 640], dtype=np.float64)\n', (63, 93), True, 'import numpy as np\n'), ((1437, 1463), 'numpy.sum', 'np.sum', (['masks'], {'axis': '(1, 2)'}), '(masks, axis=(1, 2))\n', (1443, 1463), True, 'import numpy as np\n'), ((1576, 1588), 'numpy.mean', 'np.mean', (['mse'], {}), '(mse)\n', (1583, 1588), True, 'import numpy as np\n'), ((1600, 1612), 'numpy.sqrt', 'np.sqrt', (['mse'], {}), '(mse)\n', (1607, 1612), True, 'import numpy as np\n'), ((1729, 1771), 'numpy.maximum', 'np.maximum', (['(preds / depths)', '(depths / preds)'], {}), '(preds / depths, depths / preds)\n', (1739, 1771), True, 'import numpy as np\n'), ((2039, 2054), 'numpy.mean', 'np.mean', (['delta1'], {}), '(delta1)\n', (2046, 2054), True, 'import numpy as np\n'), ((2087, 2102), 'numpy.mean', 'np.mean', (['delta2'], {}), '(delta2)\n', (2094, 2102), True, 'import numpy as np\n'), ((2135, 2150), 'numpy.mean', 'np.mean', (['delta3'], {}), '(delta3)\n', (2142, 2150), True, 'import numpy as np\n'), ((2314, 2330), 'numpy.mean', 'np.mean', (['abrdiff'], {}), '(abrdiff)\n', (2321, 2330), True, 'import numpy as np\n'), ((2497, 2513), 'numpy.mean', 'np.mean', (['sqrdiff'], {}), '(sqrdiff)\n', (2504, 2513), True, 'import numpy as np\n'), ((2672, 2686), 'numpy.mean', 'np.mean', (['log10'], {}), '(log10)\n', (2679, 2686), True, 'import numpy as np\n'), ((1015, 1038), 'numpy.stack', 'np.stack', (['preds'], {'axis': '(0)'}), '(preds, axis=0)\n', (1023, 1038), True, 'import numpy as np\n'), ((1059, 1083), 'numpy.stack', 'np.stack', (['depths'], {'axis': '(0)'}), '(depths, axis=0)\n', (1067, 1083), True, 'import numpy as np\n'), ((1151, 1174), 'numpy.stack', 'np.stack', (['masks'], {'axis': '(0)'}), '(masks, axis=0)\n', (1159, 1174), True, 'import numpy as np\n'), ((1260, 1282), 'numpy.float64', 'np.float64', (['(depths > 0)'], {}), '(depths > 0)\n', (1270, 1282), True, 'import numpy as np\n'), ((1517, 1557), 'numpy.sum', 'np.sum', (['(diff ** 2.0 * masks)'], {'axis': '(1, 2)'}), '(diff ** 2.0 * masks, axis=(1, 2))\n', (1523, 1557), True, 'import numpy as np\n'), ((2245, 2273), 'numpy.sum', 'np.sum', (['abrdiff'], {'axis': '(1, 2)'}), '(abrdiff, axis=(1, 2))\n', (2251, 2273), True, 'import numpy as np\n'), ((2427, 2455), 'numpy.sum', 'np.sum', (['sqrdiff'], {'axis': '(1, 2)'}), '(sqrdiff, axis=(1, 2))\n', (2433, 2455), True, 'import numpy as np\n'), ((2599, 2633), 'numpy.sum', 'np.sum', (['(log10 * masks)'], {'axis': '(1, 2)'}), '(log10 * masks, axis=(1, 2))\n', (2605, 2633), True, 'import numpy as np\n'), ((2791, 2805), 'numpy.sqrt', 'np.sqrt', (['mrmse'], {}), '(mrmse)\n', (2798, 2805), True, 'import numpy as np\n'), ((1191, 1213), 'numpy.float64', 'np.float64', (['(depths > 0)'], {}), '(depths > 0)\n', (1201, 1213), True, 'import numpy as np\n'), ((1216, 1233), 'numpy.float64', 'np.float64', (['masks'], {}), '(masks)\n', (1226, 1233), True, 'import numpy as np\n'), ((2201, 2213), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (2207, 2213), True, 'import numpy as np\n'), ((2380, 2395), 'numpy.square', 'np.square', (['diff'], {}), '(diff)\n', (2389, 2395), True, 'import numpy as np\n'), ((2551, 2566), 'numpy.log10', 'np.log10', (['preds'], {}), '(preds)\n', (2559, 2566), True, 'import numpy as np\n'), ((2569, 2585), 'numpy.log10', 'np.log10', (['depths'], {}), '(depths)\n', (2577, 2585), True, 'import numpy as np\n'), ((1792, 1816), 'numpy.float64', 'np.float64', (['(delta < 1.25)'], {}), '(delta < 1.25)\n', (1802, 1816), True, 'import numpy as np\n'), ((1869, 1898), 'numpy.float64', 'np.float64', (['(delta < 1.25 ** 2)'], {}), '(delta < 1.25 ** 2)\n', (1879, 1898), True, 'import numpy as np\n'), ((1949, 1978), 'numpy.float64', 'np.float64', (['(delta < 1.25 ** 3)'], {}), '(delta < 1.25 ** 3)\n', (1959, 1978), True, 'import numpy as np\n'), ((2723, 2738), 'numpy.square', 'np.square', (['diff'], {}), '(diff)\n', (2732, 2738), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from numpy.testing import assert_almost_equal
from theanolm.commands.score import _merge_subwords
class TestScore(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_merge_subwords(self):
# word vocabulary
subwords = ['<s>', 'aaa', 'bbb', 'ccc', 'ddd', '</s>']
subword_logprobs = [0.0, 0.1, 0.2, 0.3, 0.4]
words, word_logprobs = _merge_subwords(subwords, subword_logprobs, None)
self.assertSequenceEqual(words, subwords)
assert_almost_equal(word_logprobs, subword_logprobs)
# subword vocabulary with word boundary token, <unk> predicted
subwords = ['<s>', '<w>', 'aaa', '<w>', 'bbb', '<unk>', '<w>', 'ccc', 'ddd', '<w>', '</s>']
subword_logprobs = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
words, word_logprobs = _merge_subwords(subwords, subword_logprobs, "word-boundary")
self.assertSequenceEqual(words, [['<s>'], ['aaa'], ['<unk>'], ['ccc', 'ddd'], ['</s>']])
assert_almost_equal(word_logprobs, [0.2 + 0.3, 0.4 + 0.5 + 0.6, 0.7 + 0.8 + 0.9, 1.0])
# subword vocabulary with word boundary token, <unk> not predicted
subword_logprobs = [0.1, 0.2, 0.3, 0.4, None, 0.6, 0.7, 0.8, 0.9, 1.0]
words, word_logprobs = _merge_subwords(subwords, subword_logprobs, "word-boundary")
self.assertSequenceEqual(words, [['<s>'], ['aaa'], ['<unk>'], ['ccc', 'ddd'], ['</s>']])
self.assertAlmostEqual(word_logprobs[0], 0.2 + 0.3)
self.assertIsNone(word_logprobs[1])
self.assertAlmostEqual(word_logprobs[2], 0.7 + 0.8 + 0.9)
self.assertAlmostEqual(word_logprobs[3], 1.0)
# subword vocabulary with prefix/affix markings, <unk> predicted
subwords = ['<s>', 'aaa', 'bbb+', '+ccc', '<unk>', '</s>']
subword_logprobs = [0.1, 0.2, 0.3, 0.4, 0.5]
words, word_logprobs = _merge_subwords(subwords, subword_logprobs, "prefix-affix")
self.assertSequenceEqual(words, [['<s>'], ['aaa'], ['bbb+', '+ccc'], ['<unk>'], ['</s>']])
assert_almost_equal(word_logprobs, [0.1, 0.2 + 0.3, 0.4, 0.5])
# subword vocabulary with prefix/affix markings, <unk> not predicted
subwords = ['<s>', 'aaa', 'bbb+', '+ccc', '<unk>', '</s>']
subword_logprobs = [0.1, 0.2, 0.3, None, 0.5]
words, word_logprobs = _merge_subwords(subwords, subword_logprobs, "prefix-affix")
self.assertSequenceEqual(words, [['<s>'], ['aaa'], ['bbb+', '+ccc'], ['<unk>'], ['</s>']])
self.assertAlmostEqual(word_logprobs[0], 0.1)
self.assertAlmostEqual(word_logprobs[1], 0.2 + 0.3)
self.assertIsNone(word_logprobs[2])
self.assertAlmostEqual(word_logprobs[3], 0.5)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.testing.assert_almost_equal",
"theanolm.commands.score._merge_subwords"
] | [((2833, 2848), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2846, 2848), False, 'import unittest\n'), ((482, 531), 'theanolm.commands.score._merge_subwords', '_merge_subwords', (['subwords', 'subword_logprobs', 'None'], {}), '(subwords, subword_logprobs, None)\n', (497, 531), False, 'from theanolm.commands.score import _merge_subwords\n'), ((590, 642), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['word_logprobs', 'subword_logprobs'], {}), '(word_logprobs, subword_logprobs)\n', (609, 642), False, 'from numpy.testing import assert_almost_equal\n'), ((924, 984), 'theanolm.commands.score._merge_subwords', '_merge_subwords', (['subwords', 'subword_logprobs', '"""word-boundary"""'], {}), "(subwords, subword_logprobs, 'word-boundary')\n", (939, 984), False, 'from theanolm.commands.score import _merge_subwords\n'), ((1090, 1180), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['word_logprobs', '[0.2 + 0.3, 0.4 + 0.5 + 0.6, 0.7 + 0.8 + 0.9, 1.0]'], {}), '(word_logprobs, [0.2 + 0.3, 0.4 + 0.5 + 0.6, 0.7 + 0.8 +\n 0.9, 1.0])\n', (1109, 1180), False, 'from numpy.testing import assert_almost_equal\n'), ((1363, 1423), 'theanolm.commands.score._merge_subwords', '_merge_subwords', (['subwords', 'subword_logprobs', '"""word-boundary"""'], {}), "(subwords, subword_logprobs, 'word-boundary')\n", (1378, 1423), False, 'from theanolm.commands.score import _merge_subwords\n'), ((1970, 2029), 'theanolm.commands.score._merge_subwords', '_merge_subwords', (['subwords', 'subword_logprobs', '"""prefix-affix"""'], {}), "(subwords, subword_logprobs, 'prefix-affix')\n", (1985, 2029), False, 'from theanolm.commands.score import _merge_subwords\n'), ((2137, 2199), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['word_logprobs', '[0.1, 0.2 + 0.3, 0.4, 0.5]'], {}), '(word_logprobs, [0.1, 0.2 + 0.3, 0.4, 0.5])\n', (2156, 2199), False, 'from numpy.testing import assert_almost_equal\n'), ((2430, 2489), 'theanolm.commands.score._merge_subwords', '_merge_subwords', (['subwords', 'subword_logprobs', '"""prefix-affix"""'], {}), "(subwords, subword_logprobs, 'prefix-affix')\n", (2445, 2489), False, 'from theanolm.commands.score import _merge_subwords\n')] |
"""
Twin-delayed DDPG
"""
import numpy as np
import tensorflow as tf
from utils.logx import EpochLogger
from utils.tf_utils import set_tf_allow_growth
set_tf_allow_growth()
import gym
import time
from tqdm.auto import tqdm
def hard_update(target: tf.keras.Model, source: tf.keras.Model):
target.set_weights(source.get_weights())
def soft_update(target: tf.keras.Model, source: tf.keras.Model, tau):
new_weights = []
for target_weights, source_weights in zip(target.get_weights(), source.get_weights()):
new_weights.append(target_weights * (1. - tau) + source_weights * tau)
target.set_weights(new_weights)
def huber_loss(y_true, y_pred, delta=1.0):
"""Huber loss.
https://en.wikipedia.org/wiki/Huber_loss
"""
error = y_true - y_pred
cond = tf.abs(error) < delta
squared_loss = 0.5 * tf.square(error)
linear_loss = delta * (tf.abs(error) - 0.5 * delta)
return tf.where(cond, squared_loss, linear_loss)
def combined_shape(length, shape=None):
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
class ReplayBuffer:
"""
A simple FIFO experience replay buffer for SAC agents.
"""
def __init__(self, obs_dim, act_dim, size):
self.obs_buf = np.zeros(combined_shape(size, obs_dim), dtype=np.float32)
self.obs2_buf = np.zeros(combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(combined_shape(size, act_dim), dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(self, obs, act, rew, next_obs, done):
self.obs_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(0, self.size, size=batch_size)
batch = dict(obs=self.obs_buf[idxs],
obs2=self.obs2_buf[idxs],
act=self.act_buf[idxs],
rew=self.rew_buf[idxs],
done=self.done_buf[idxs])
return batch
class EnsembleDense(tf.keras.layers.Dense):
def __init__(self, num_ensembles, units, **kwargs):
super(EnsembleDense, self).__init__(units=units, **kwargs)
self.num_ensembles = num_ensembles
def build(self, input_shape):
last_dim = int(input_shape[-1])
self.kernel = self.add_weight(
'kernel',
shape=[self.num_ensembles, last_dim, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=[self.num_ensembles, 1, self.units],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
outputs = tf.linalg.matmul(inputs, self.kernel) # (num_ensembles, None, units)
if self.use_bias:
outputs = outputs + self.bias
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
class SqueezeLayer(tf.keras.layers.Layer):
def __init__(self, axis=-1):
super(SqueezeLayer, self).__init__()
self.axis = axis
def call(self, inputs, **kwargs):
return tf.squeeze(inputs, axis=self.axis)
def build_mlp(input_dim, output_dim, mlp_hidden, activation='relu', out_activation=None):
return tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(input_dim,)),
tf.keras.layers.Dense(mlp_hidden, activation=activation),
tf.keras.layers.Dense(mlp_hidden, activation=activation),
tf.keras.layers.Dense(output_dim, activation=out_activation),
])
def build_mlp_ensemble(input_dim, output_dim, mlp_hidden, num_ensembles, num_layers=3,
activation='relu', out_activation=None, squeeze=True):
model = tf.keras.Sequential()
model.add(tf.keras.layers.InputLayer(batch_input_shape=(num_ensembles, None, input_dim)))
for _ in range(num_layers - 1):
model.add(EnsembleDense(num_ensembles, mlp_hidden, activation=activation))
model.add(EnsembleDense(num_ensembles, output_dim, activation=out_activation))
if output_dim == 1 and squeeze is True:
model.add(SqueezeLayer(axis=-1))
return model
class EnsembleQNet(tf.keras.Model):
def __init__(self, ob_dim, ac_dim, mlp_hidden, num_ensembles=2):
super(EnsembleQNet, self).__init__()
self.ob_dim = ob_dim
self.ac_dim = ac_dim
self.mlp_hidden = mlp_hidden
self.num_ensembles = num_ensembles
self.q_net = build_mlp_ensemble(input_dim=self.ob_dim + self.ac_dim,
output_dim=1,
mlp_hidden=self.mlp_hidden,
num_ensembles=self.num_ensembles,
num_layers=3,
squeeze=True)
self.build(input_shape=[(None, ob_dim), (None, ac_dim)])
def get_config(self):
config = super(EnsembleQNet, self).get_config()
config.update({
'ob_dim': self.ob_dim,
'ac_dim': self.ac_dim,
'mlp_hidden': self.mlp_hidden,
'num_ensembles': self.num_ensembles
})
return config
def call(self, inputs, training=None, mask=None):
obs, act = inputs
inputs = tf.concat((obs, act), axis=-1)
inputs = tf.tile(tf.expand_dims(inputs, axis=0), (self.num_ensembles, 1, 1))
q = self.q_net(inputs) # (num_ensembles, None)
if training:
return q
else:
return tf.reduce_min(q, axis=0)
class Actor(tf.keras.Model):
def __init__(self, ob_dim, ac_dim, act_lim, mlp_hidden):
super(Actor, self).__init__()
self.net = build_mlp(ob_dim, ac_dim, mlp_hidden)
self.ac_dim = ac_dim
self.act_lim = act_lim
self.build(input_shape=[(None, ob_dim)])
def get_config(self):
config = super(Actor, self).get_config()
config.update({
'ob_dim': self.ob_dim,
'ac_dim': self.ac_dim,
'mlp_hidden': self.mlp_hidden,
'act_lim': self.act_lim
})
return config
def call(self, inputs, training=None, mask=None):
pi_raw = self.net(inputs, training=training)
pi_final = tf.tanh(pi_raw) * self.act_lim
return pi_final
class TD3Agent(object):
def __init__(self,
ob_dim,
ac_dim,
act_lim,
mlp_hidden=256,
learning_rate=3e-4,
tau=5e-3,
gamma=0.99,
huber_delta=None,
actor_noise=0.1,
target_noise=0.2,
noise_clip=0.5
):
self.ob_dim = ob_dim
self.ac_dim = ac_dim
self.act_lim = act_lim
self.mlp_hidden = mlp_hidden
self.huber_delta = huber_delta
self.actor_noise = actor_noise
self.target_noise = target_noise
self.noise_clip = noise_clip
self.policy_net = Actor(ob_dim, ac_dim, act_lim, mlp_hidden)
self.q_network = EnsembleQNet(ob_dim, ac_dim, mlp_hidden)
self.target_q_network = EnsembleQNet(ob_dim, ac_dim, mlp_hidden)
hard_update(self.target_q_network, self.q_network)
self.policy_optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
self.q_optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
self.tau = tau
self.gamma = gamma
self.build_tf_function()
def build_tf_function(self):
self.act_batch = tf.function(func=self.act_batch, input_signature=[
tf.TensorSpec(shape=[None, self.ob_dim], dtype=tf.float32),
tf.TensorSpec(shape=(), dtype=tf.bool),
])
self._update_nets = tf.function(func=self._update_nets, input_signature=[
tf.TensorSpec(shape=[None, self.ob_dim], dtype=tf.float32),
tf.TensorSpec(shape=[None, self.ac_dim], dtype=tf.float32),
tf.TensorSpec(shape=[None, self.ob_dim], dtype=tf.float32),
tf.TensorSpec(shape=[None], dtype=tf.float32),
tf.TensorSpec(shape=[None], dtype=tf.float32),
])
self._update_actor = tf.function(func=self._update_actor, input_signature=[
tf.TensorSpec(shape=[None, self.ob_dim], dtype=tf.float32)
])
def set_logger(self, logger):
self.logger = logger
def log_tabular(self):
self.logger.log_tabular('Q1Vals', with_min_and_max=False)
self.logger.log_tabular('Q2Vals', with_min_and_max=False)
self.logger.log_tabular('LossPi', average_only=True)
self.logger.log_tabular('LossQ', average_only=True)
def update_target(self):
soft_update(self.target_q_network, self.q_network, self.tau)
def _update_nets(self, obs, actions, next_obs, done, reward):
print(f'Tracing _update_nets with obs={obs}, actions={actions}')
# compute target q
next_action = self.policy_net(next_obs)
# Target policy smoothing
epsilon = tf.random.normal(shape=[tf.shape(obs)[0], self.ac_dim]) * self.target_noise
epsilon = tf.clip_by_value(epsilon, -self.noise_clip, self.noise_clip)
next_action = next_action + epsilon
next_action = tf.clip_by_value(next_action, -self.act_lim, self.act_lim)
target_q_values = self.target_q_network((next_obs, next_action), training=False)
q_target = reward + self.gamma * (1.0 - done) * target_q_values
# q loss
with tf.GradientTape() as q_tape:
q_values = self.q_network((obs, actions), training=True) # (num_ensembles, None)
if self.huber_delta is not None:
q_values_loss = huber_loss(tf.expand_dims(q_target, axis=0), q_values, delta=self.huber_delta)
else:
q_values_loss = 0.5 * tf.square(tf.expand_dims(q_target, axis=0) - q_values)
# (num_ensembles, None)
q_values_loss = tf.reduce_sum(q_values_loss, axis=0) # (None,)
# apply importance weights
q_values_loss = tf.reduce_mean(q_values_loss)
q_gradients = q_tape.gradient(q_values_loss, self.q_network.trainable_variables)
self.q_optimizer.apply_gradients(zip(q_gradients, self.q_network.trainable_variables))
info = dict(
Q1Vals=q_values[0],
Q2Vals=q_values[1],
LossQ=q_values_loss,
)
return info
def _update_actor(self, obs):
print(f'Tracing _update_actor with obs={obs}')
# policy loss
with tf.GradientTape() as policy_tape:
a = self.policy_net(obs)
q = self.q_network((obs, a))
policy_loss = -tf.reduce_mean(q, axis=0)
policy_gradients = policy_tape.gradient(policy_loss, self.policy_net.trainable_variables)
self.policy_optimizer.apply_gradients(zip(policy_gradients, self.policy_net.trainable_variables))
info = dict(
LossPi=policy_loss,
)
return info
def update(self, obs, act, obs2, done, rew, update_target=True):
obs = tf.convert_to_tensor(obs, dtype=tf.float32)
act = tf.convert_to_tensor(act, dtype=tf.float32)
obs2 = tf.convert_to_tensor(obs2, dtype=tf.float32)
done = tf.convert_to_tensor(done, dtype=tf.float32)
rew = tf.convert_to_tensor(rew, dtype=tf.float32)
info = self._update_nets(obs, act, obs2, done, rew)
if update_target:
actor_info = self._update_actor(obs)
info.update(actor_info)
self.update_target()
for key, item in info.items():
info[key] = item.numpy()
self.logger.store(**info)
def act(self, obs, deterministic):
obs = tf.expand_dims(obs, axis=0)
pi_final = self.act_batch(obs, deterministic)
return pi_final[0]
def act_batch(self, obs, deterministic):
print(f'Tracing td3 act_batch with obs {obs}')
pi_final = self.policy_net(obs)
if deterministic:
return pi_final
else:
noise = tf.random.normal(shape=[tf.shape(obs)[0], self.ac_dim], dtype=tf.float32) * self.actor_noise
pi_final = pi_final + noise
pi_final = tf.clip_by_value(pi_final, -self.act_lim, self.act_lim)
return pi_final
def td3(env_name,
env_fn=None,
max_ep_len=1000,
steps_per_epoch=5000,
epochs=200,
start_steps=10000,
update_after=1000,
update_every=50,
update_per_step=1,
batch_size=256,
num_test_episodes=20,
logger_kwargs=dict(),
seed=1,
# agent args
nn_size=256,
learning_rate=1e-3,
actor_noise=0.1,
target_noise=0.2,
noise_clip=0.5,
tau=5e-3,
gamma=0.99,
policy_delay=2,
# replay
replay_size=int(1e6),
):
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
tf.random.set_seed(seed)
np.random.seed(seed)
env = gym.make(env_name) if env_fn is None else env_fn()
env.seed(seed)
test_env = gym.vector.make(env_name, num_envs=num_test_episodes, asynchronous=False)
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
# Action limit for clamping: critically, assumes all dimensions share the same bound!
act_limit = env.action_space.high[0]
agent = TD3Agent(ob_dim=obs_dim, ac_dim=act_dim, act_lim=act_limit, mlp_hidden=nn_size,
learning_rate=learning_rate, tau=tau,
gamma=gamma, actor_noise=actor_noise,
target_noise=target_noise,
noise_clip=noise_clip)
agent.set_logger(logger)
replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)
def get_action(o, deterministic=False):
return agent.act(tf.convert_to_tensor(o, dtype=tf.float32), tf.convert_to_tensor(deterministic)).numpy()
def get_action_batch(o, deterministic=False):
return agent.act_batch(tf.convert_to_tensor(o, dtype=tf.float32), tf.convert_to_tensor(deterministic)).numpy()
def test_agent():
o, d, ep_ret, ep_len = test_env.reset(), np.zeros(shape=num_test_episodes, dtype=np.bool), \
np.zeros(shape=num_test_episodes), np.zeros(shape=num_test_episodes, dtype=np.int64)
t = tqdm(total=1, desc='Testing')
while not np.all(d):
a = get_action_batch(o, deterministic=True)
o, r, d_, _ = test_env.step(a)
ep_ret = r * (1 - d) + ep_ret
ep_len = np.ones(shape=num_test_episodes, dtype=np.int64) * (1 - d) + ep_len
d = np.logical_or(d, d_)
t.update(1)
t.close()
logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)
# Prepare for interaction with environment
total_steps = steps_per_epoch * epochs
start_time = time.time()
o, ep_ret, ep_len = env.reset(), 0, 0
bar = tqdm(total=steps_per_epoch)
# Main loop: collect experience in env and update/log each epoch
for t in range(total_steps):
# Until start_steps have elapsed, randomly sample actions
# from a uniform distribution for better exploration. Afterwards,
# use the learned policy.
if t > start_steps:
a = get_action(o)
else:
a = env.action_space.sample()
# Step the env
o2, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
d = False if ep_len == max_ep_len else d
# Store experience to replay buffer
replay_buffer.store(o, a, r, o2, d)
# Super critical, easy to overlook step: make sure to update
# most recent observation!
o = o2
# End of trajectory handling
if d or (ep_len == max_ep_len):
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, ep_ret, ep_len = env.reset(), 0, 0
# Update handling
if t >= update_after and t % update_every == 0:
for j in range(update_every * update_per_step):
batch = replay_buffer.sample_batch(batch_size)
update_target = (j % policy_delay == 0)
agent.update(**batch, update_target=update_target)
bar.update(1)
# End of epoch handling
if (t + 1) % steps_per_epoch == 0:
bar.close()
if t >= update_after:
epoch = (t + 1) // steps_per_epoch
# Test the performance of the deterministic version of the agent.
test_agent()
# Log info about epoch
logger.log_tabular('Epoch', epoch)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('TestEpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.log_tabular('TestEpLen', average_only=True)
logger.log_tabular('TotalEnvInteracts', t)
agent.log_tabular()
logger.log_tabular('Time', time.time() - start_time)
logger.dump_tabular()
if t < total_steps:
bar = tqdm(total=steps_per_epoch)
if __name__ == '__main__':
import argparse
import envs
from utils.run_utils import setup_logger_kwargs
__all__ = ['envs']
parser = argparse.ArgumentParser()
parser.add_argument('--env_name', type=str, default='Hopper-v2')
parser.add_argument('--seed', type=int, default=1)
# agent arguments
parser.add_argument('--learning_rate', type=float, default=1e-3)
parser.add_argument('--tau', type=float, default=5e-3)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--nn_size', '-s', type=int, default=256)
parser.add_argument('--actor_noise', type=float, default=0.1)
parser.add_argument('--target_noise', type=float, default=0.2)
parser.add_argument('--noise_clip', type=float, default=0.5)
parser.add_argument('--policy_delay', type=int, default=2)
# training arguments
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--start_steps', type=int, default=10000)
parser.add_argument('--replay_size', type=int, default=1000000)
parser.add_argument('--steps_per_epoch', type=int, default=5000)
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--num_test_episodes', type=int, default=20)
parser.add_argument('--max_ep_len', type=int, default=1000)
parser.add_argument('--update_after', type=int, default=1000)
parser.add_argument('--update_every', type=int, default=50)
parser.add_argument('--update_per_step', type=int, default=1)
args = vars(parser.parse_args())
logger_kwargs = setup_logger_kwargs(exp_name=args['env_name'] + '_td3_test', data_dir='data', seed=args['seed'])
td3(**args, logger_kwargs=logger_kwargs)
| [
"tensorflow.shape",
"tensorflow.tanh",
"tensorflow.reduce_sum",
"tensorflow.GradientTape",
"tensorflow.keras.layers.Dense",
"tensorflow.reduce_mean",
"gym.make",
"tensorflow.reduce_min",
"numpy.isscalar",
"argparse.ArgumentParser",
"tensorflow.keras.Sequential",
"tensorflow.concat",
"numpy.r... | [((154, 175), 'utils.tf_utils.set_tf_allow_growth', 'set_tf_allow_growth', ([], {}), '()\n', (173, 175), False, 'from utils.tf_utils import set_tf_allow_growth\n'), ((926, 967), 'tensorflow.where', 'tf.where', (['cond', 'squared_loss', 'linear_loss'], {}), '(cond, squared_loss, linear_loss)\n', (934, 967), True, 'import tensorflow as tf\n'), ((4580, 4601), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (4599, 4601), True, 'import tensorflow as tf\n'), ((13796, 13824), 'utils.logx.EpochLogger', 'EpochLogger', ([], {}), '(**logger_kwargs)\n', (13807, 13824), False, 'from utils.logx import EpochLogger\n'), ((13864, 13888), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (13882, 13888), True, 'import tensorflow as tf\n'), ((13893, 13913), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (13907, 13913), True, 'import numpy as np\n'), ((14010, 14083), 'gym.vector.make', 'gym.vector.make', (['env_name'], {'num_envs': 'num_test_episodes', 'asynchronous': '(False)'}), '(env_name, num_envs=num_test_episodes, asynchronous=False)\n', (14025, 14083), False, 'import gym\n'), ((15828, 15839), 'time.time', 'time.time', ([], {}), '()\n', (15837, 15839), False, 'import time\n'), ((15892, 15919), 'tqdm.auto.tqdm', 'tqdm', ([], {'total': 'steps_per_epoch'}), '(total=steps_per_epoch)\n', (15896, 15919), False, 'from tqdm.auto import tqdm\n'), ((18485, 18510), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (18508, 18510), False, 'import argparse\n'), ((19911, 20012), 'utils.run_utils.setup_logger_kwargs', 'setup_logger_kwargs', ([], {'exp_name': "(args['env_name'] + '_td3_test')", 'data_dir': '"""data"""', 'seed': "args['seed']"}), "(exp_name=args['env_name'] + '_td3_test', data_dir=\n 'data', seed=args['seed'])\n", (19930, 20012), False, 'from utils.run_utils import setup_logger_kwargs\n'), ((793, 806), 'tensorflow.abs', 'tf.abs', (['error'], {}), '(error)\n', (799, 806), True, 'import tensorflow as tf\n'), ((841, 857), 'tensorflow.square', 'tf.square', (['error'], {}), '(error)\n', (850, 857), True, 'import tensorflow as tf\n'), ((1087, 1105), 'numpy.isscalar', 'np.isscalar', (['shape'], {}), '(shape)\n', (1098, 1105), True, 'import numpy as np\n'), ((1541, 1573), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.float32'}), '(size, dtype=np.float32)\n', (1549, 1573), True, 'import numpy as np\n'), ((1598, 1630), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.float32'}), '(size, dtype=np.float32)\n', (1606, 1630), True, 'import numpy as np\n'), ((2096, 2144), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.size'], {'size': 'batch_size'}), '(0, self.size, size=batch_size)\n', (2113, 2144), True, 'import numpy as np\n'), ((3491, 3528), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['inputs', 'self.kernel'], {}), '(inputs, self.kernel)\n', (3507, 3528), True, 'import tensorflow as tf\n'), ((3970, 4004), 'tensorflow.squeeze', 'tf.squeeze', (['inputs'], {'axis': 'self.axis'}), '(inputs, axis=self.axis)\n', (3980, 4004), True, 'import tensorflow as tf\n'), ((4616, 4694), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'batch_input_shape': '(num_ensembles, None, input_dim)'}), '(batch_input_shape=(num_ensembles, None, input_dim))\n', (4642, 4694), True, 'import tensorflow as tf\n'), ((6135, 6165), 'tensorflow.concat', 'tf.concat', (['(obs, act)'], {'axis': '(-1)'}), '((obs, act), axis=-1)\n', (6144, 6165), True, 'import tensorflow as tf\n'), ((8155, 8197), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (8179, 8197), True, 'import tensorflow as tf\n'), ((8225, 8267), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (8249, 8267), True, 'import tensorflow as tf\n'), ((9997, 10057), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['epsilon', '(-self.noise_clip)', 'self.noise_clip'], {}), '(epsilon, -self.noise_clip, self.noise_clip)\n', (10013, 10057), True, 'import tensorflow as tf\n'), ((10124, 10182), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['next_action', '(-self.act_lim)', 'self.act_lim'], {}), '(next_action, -self.act_lim, self.act_lim)\n', (10140, 10182), True, 'import tensorflow as tf\n'), ((11967, 12010), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['obs'], {'dtype': 'tf.float32'}), '(obs, dtype=tf.float32)\n', (11987, 12010), True, 'import tensorflow as tf\n'), ((12025, 12068), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['act'], {'dtype': 'tf.float32'}), '(act, dtype=tf.float32)\n', (12045, 12068), True, 'import tensorflow as tf\n'), ((12084, 12128), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['obs2'], {'dtype': 'tf.float32'}), '(obs2, dtype=tf.float32)\n', (12104, 12128), True, 'import tensorflow as tf\n'), ((12144, 12188), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['done'], {'dtype': 'tf.float32'}), '(done, dtype=tf.float32)\n', (12164, 12188), True, 'import tensorflow as tf\n'), ((12203, 12246), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['rew'], {'dtype': 'tf.float32'}), '(rew, dtype=tf.float32)\n', (12223, 12246), True, 'import tensorflow as tf\n'), ((12618, 12645), 'tensorflow.expand_dims', 'tf.expand_dims', (['obs'], {'axis': '(0)'}), '(obs, axis=0)\n', (12632, 12645), True, 'import tensorflow as tf\n'), ((13925, 13943), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (13933, 13943), False, 'import gym\n'), ((15299, 15328), 'tqdm.auto.tqdm', 'tqdm', ([], {'total': '(1)', 'desc': '"""Testing"""'}), "(total=1, desc='Testing')\n", (15303, 15328), False, 'from tqdm.auto import tqdm\n'), ((885, 898), 'tensorflow.abs', 'tf.abs', (['error'], {}), '(error)\n', (891, 898), True, 'import tensorflow as tf\n'), ((4138, 4190), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(input_dim,)'}), '(input_shape=(input_dim,))\n', (4164, 4190), True, 'import tensorflow as tf\n'), ((4200, 4256), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['mlp_hidden'], {'activation': 'activation'}), '(mlp_hidden, activation=activation)\n', (4221, 4256), True, 'import tensorflow as tf\n'), ((4266, 4322), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['mlp_hidden'], {'activation': 'activation'}), '(mlp_hidden, activation=activation)\n', (4287, 4322), True, 'import tensorflow as tf\n'), ((4332, 4392), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['output_dim'], {'activation': 'out_activation'}), '(output_dim, activation=out_activation)\n', (4353, 4392), True, 'import tensorflow as tf\n'), ((6191, 6221), 'tensorflow.expand_dims', 'tf.expand_dims', (['inputs'], {'axis': '(0)'}), '(inputs, axis=0)\n', (6205, 6221), True, 'import tensorflow as tf\n'), ((6382, 6406), 'tensorflow.reduce_min', 'tf.reduce_min', (['q'], {'axis': '(0)'}), '(q, axis=0)\n', (6395, 6406), True, 'import tensorflow as tf\n'), ((7112, 7127), 'tensorflow.tanh', 'tf.tanh', (['pi_raw'], {}), '(pi_raw)\n', (7119, 7127), True, 'import tensorflow as tf\n'), ((10374, 10391), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (10389, 10391), True, 'import tensorflow as tf\n'), ((10828, 10864), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['q_values_loss'], {'axis': '(0)'}), '(q_values_loss, axis=0)\n', (10841, 10864), True, 'import tensorflow as tf\n'), ((10943, 10972), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['q_values_loss'], {}), '(q_values_loss)\n', (10957, 10972), True, 'import tensorflow as tf\n'), ((11431, 11448), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (11446, 11448), True, 'import tensorflow as tf\n'), ((13112, 13167), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['pi_final', '(-self.act_lim)', 'self.act_lim'], {}), '(pi_final, -self.act_lim, self.act_lim)\n', (13128, 13167), True, 'import tensorflow as tf\n'), ((15119, 15167), 'numpy.zeros', 'np.zeros', ([], {'shape': 'num_test_episodes', 'dtype': 'np.bool'}), '(shape=num_test_episodes, dtype=np.bool)\n', (15127, 15167), True, 'import numpy as np\n'), ((15202, 15235), 'numpy.zeros', 'np.zeros', ([], {'shape': 'num_test_episodes'}), '(shape=num_test_episodes)\n', (15210, 15235), True, 'import numpy as np\n'), ((15237, 15286), 'numpy.zeros', 'np.zeros', ([], {'shape': 'num_test_episodes', 'dtype': 'np.int64'}), '(shape=num_test_episodes, dtype=np.int64)\n', (15245, 15286), True, 'import numpy as np\n'), ((15347, 15356), 'numpy.all', 'np.all', (['d'], {}), '(d)\n', (15353, 15356), True, 'import numpy as np\n'), ((15604, 15624), 'numpy.logical_or', 'np.logical_or', (['d', 'd_'], {}), '(d, d_)\n', (15617, 15624), True, 'import numpy as np\n'), ((11570, 11595), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['q'], {'axis': '(0)'}), '(q, axis=0)\n', (11584, 11595), True, 'import tensorflow as tf\n'), ((18302, 18329), 'tqdm.auto.tqdm', 'tqdm', ([], {'total': 'steps_per_epoch'}), '(total=steps_per_epoch)\n', (18306, 18329), False, 'from tqdm.auto import tqdm\n'), ((8474, 8532), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, self.ob_dim]', 'dtype': 'tf.float32'}), '(shape=[None, self.ob_dim], dtype=tf.float32)\n', (8487, 8532), True, 'import tensorflow as tf\n'), ((8546, 8584), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '()', 'dtype': 'tf.bool'}), '(shape=(), dtype=tf.bool)\n', (8559, 8584), True, 'import tensorflow as tf\n'), ((8692, 8750), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, self.ob_dim]', 'dtype': 'tf.float32'}), '(shape=[None, self.ob_dim], dtype=tf.float32)\n', (8705, 8750), True, 'import tensorflow as tf\n'), ((8764, 8822), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, self.ac_dim]', 'dtype': 'tf.float32'}), '(shape=[None, self.ac_dim], dtype=tf.float32)\n', (8777, 8822), True, 'import tensorflow as tf\n'), ((8836, 8894), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, self.ob_dim]', 'dtype': 'tf.float32'}), '(shape=[None, self.ob_dim], dtype=tf.float32)\n', (8849, 8894), True, 'import tensorflow as tf\n'), ((8908, 8953), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None]', 'dtype': 'tf.float32'}), '(shape=[None], dtype=tf.float32)\n', (8921, 8953), True, 'import tensorflow as tf\n'), ((8967, 9012), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None]', 'dtype': 'tf.float32'}), '(shape=[None], dtype=tf.float32)\n', (8980, 9012), True, 'import tensorflow as tf\n'), ((9122, 9180), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, self.ob_dim]', 'dtype': 'tf.float32'}), '(shape=[None, self.ob_dim], dtype=tf.float32)\n', (9135, 9180), True, 'import tensorflow as tf\n'), ((10585, 10617), 'tensorflow.expand_dims', 'tf.expand_dims', (['q_target'], {'axis': '(0)'}), '(q_target, axis=0)\n', (10599, 10617), True, 'import tensorflow as tf\n'), ((14789, 14830), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['o'], {'dtype': 'tf.float32'}), '(o, dtype=tf.float32)\n', (14809, 14830), True, 'import tensorflow as tf\n'), ((14832, 14867), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['deterministic'], {}), '(deterministic)\n', (14852, 14867), True, 'import tensorflow as tf\n'), ((14959, 15000), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['o'], {'dtype': 'tf.float32'}), '(o, dtype=tf.float32)\n', (14979, 15000), True, 'import tensorflow as tf\n'), ((15002, 15037), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['deterministic'], {}), '(deterministic)\n', (15022, 15037), True, 'import tensorflow as tf\n'), ((15520, 15568), 'numpy.ones', 'np.ones', ([], {'shape': 'num_test_episodes', 'dtype': 'np.int64'}), '(shape=num_test_episodes, dtype=np.int64)\n', (15527, 15568), True, 'import numpy as np\n'), ((18183, 18194), 'time.time', 'time.time', ([], {}), '()\n', (18192, 18194), False, 'import time\n'), ((9927, 9940), 'tensorflow.shape', 'tf.shape', (['obs'], {}), '(obs)\n', (9935, 9940), True, 'import tensorflow as tf\n'), ((10719, 10751), 'tensorflow.expand_dims', 'tf.expand_dims', (['q_target'], {'axis': '(0)'}), '(q_target, axis=0)\n', (10733, 10751), True, 'import tensorflow as tf\n'), ((12980, 12993), 'tensorflow.shape', 'tf.shape', (['obs'], {}), '(obs)\n', (12988, 12993), True, 'import tensorflow as tf\n')] |
"""
Module allows the grabbing of dose rate factors for the calculation of radiotoxicity. There are four dose rates provided:
1. external from air (mrem/h per Ci/m^3)
Table includes: nuclide, air dose rate factor, ratio to inhalation dose (All EPA values)
2. external from 15 cm of soil (mrem/h per Ci/m^2)
Table includes: nuclide, GENII, EPA, DOE, GENII/EPA, DOE/EPA
3. ingestion (mrem/pCi)
Table includes: nuclide, f1 (fraction of the activity ingested that enters body fluids.), GENII, EPA, DOE, GENII/EPA, DOE/EPA
4. inhalation (mrem/pCi)
Table includes: nuclide, lung model*, GENII, EPA, DOE, GENII/EPA, DOE/EPA
This data is from:
[Exposure Scenarios and Unit Dose Factors for the Hanford
Immobilized Low-Activity Tank Waste Performance Assessment, ref.
HNF-SD-WM-TI-707 Rev. 1 December 1999] Appendix O of HNF-5636 [DATA PACKAGES
FOR THE HANFORD IMMOBILIZED LOW-ACTIVITY TANK WASTE PERFORMANCE ASSESSMENT:
2001 VERSION]
Liability Disclaimer:
The PyNE Development Team shall not be liable for any loss or injury resulting
from decisions made with this data.
*Lung Model:
"V" for tritium stands for vapor (50% larger absorption)
"O" for C-14 means that the carbon is assumed to have an Organic chemical form.
"D" material clears the lungs in days
"W" material clears the lungs in weeks
"Y" material clears the lungs in years
"""
from __future__ import print_function
import csv
import os
import numpy as np
import tables as tb
from pyne import nucname
from pyne.api import nuc_data
from pyne.dbgen.api import BASIC_FILTERS
def read_row(row):
"""Returns a list for each nuclide. Form varies based on type of dose rate factor:
1. External DF in Air: [int, float, float]
2. External DF in Soil: [int, float, float, float]
3. Ingestion DF: [int, float, float, float, float]
4. Inhalation DF: [int, string, float, float, float]
Parameters
----------
row : tuple
One entry in a dose factor file.
"""
# Create list
entry = []
# Evaluate each component of the given row
if row[0].endswith("+D"):
row[0] = row[0][:-2]
nuclide = nucname.id(row[0])
# Case 1: DF from External Air
if len(row) == 3:
dose_air = float(row[1])
if len(row[2]) == 0:
ratio = None
else:
ratio = float(row[2])
entry = [nuclide, dose_air, ratio]
# Case 2: DF from External Soil
elif len(row) == 6:
genii = float(row[1])
epa = float(row[2])
doe = float(row[3])
entry = [nuclide, genii, epa, doe]
# Case 4: DF from Inhalation
elif len(row) == 7 and row[1].isalpha():
lungmodel = row[1]
genii = float(row[2])
epa = float(row[3])
doe = float(row[4])
entry = [nuclide, genii, epa, doe, lungmodel]
# Case 4: DF from Ingestion
else:
f1 = float(row[1])
genii = float(row[2])
epa = float(row[3])
doe = float(row[4])
entry = [nuclide, genii, epa, doe, f1]
return entry
def grab_dose_factors():
"""Parses data from dose factor csv files."""
# Populates Dose Factor list with initial set of nuclides: opens first .csv file and parses it
dose_factors = []
with open(
os.path.join(os.path.dirname(__file__), "dosefactors_external_air.csv"), "r"
) as f:
reader = csv.reader(f)
next(f)
next(f)
for row in reader:
entry = read_row(row)
dose_factors.append(entry)
# Loops through remaining three files to add other dose factors to each nuclide
dose_files = [
"dosefactors_external_soil.csv",
"dosefactors_ingest.csv",
"dosefactors_inhale.csv",
]
for fname in dose_files:
# Opens remaining .csv files and parses them
with open(os.path.join(os.path.dirname(__file__), fname), "r") as f:
reader = csv.reader(f)
next(f)
next(f)
for row in reader:
entry = read_row(row)
# Adds info to nuclide's row
for nuclide in dose_factors:
if entry[0] == nuclide[0]:
nuclide += entry[1 : len(entry)]
# Create three dose factor lists with respect to source
genii = []
epa = []
doe = []
for nuclide in dose_factors:
for i, val in enumerate(nuclide):
if val is None:
nuclide[i] = -1
genii_row = (
nuclide[0],
-1,
-1,
nuclide[3],
nuclide[6],
nuclide[9],
nuclide[10],
nuclide[13],
)
genii.append(genii_row)
epa_row = (
nuclide[0],
nuclide[1],
nuclide[2],
nuclide[4],
nuclide[7],
nuclide[9],
nuclide[11],
nuclide[13],
)
epa.append(epa_row)
doe_row = (
nuclide[0],
-1,
-1,
nuclide[5],
nuclide[8],
nuclide[9],
nuclide[12],
nuclide[13],
)
doe.append(doe_row)
return genii, epa, doe
def make_dose_tables(genii, epa, doe, nuc_data, build_dir=""):
"""Adds three dose factor tables to the nuc_data.h5 library.
Parameters
----------
genii: list of tuples
Array of dose factors calculated by the code GENII.
epa: list of tuples
Array of dose factors calculated by the EPA.
doe: list of tuples
Array of dose factors calculated by the DOE.
nuc_data : str
Path to nuclide data file.
build_dir : str
Directory to place q_value files in.
"""
# Define data types for all three cases
dose_dtype = np.dtype(
[
("nuc", int),
("ext_air_dose", float),
("ratio", float),
("ext_soil_dose", float),
("ingest_dose", float),
("fluid_frac", float),
("inhale_dose", float),
("lung_mod", "S10"),
]
)
# Convert to numpy arrays
genii_array = np.array(genii, dtype=dose_dtype)
epa_array = np.array(epa, dtype=dose_dtype)
doe_array = np.array(doe, dtype=dose_dtype)
# Open the hdf5 file
nuc_file = tb.open_file(nuc_data, "a", filters=BASIC_FILTERS)
# Create a group for the tables
dose_group = nuc_file.create_group("/", "dose_factors", "Dose Rate Factors")
# Make three new tables
genii_table = nuc_file.create_table(
dose_group,
"GENII",
genii_array,
"Nuclide, External Air Dose Factor [mrem/h per Ci/m^3], Fraction of Ext Air Dose to Inhalation Dose, External Soil Dose Factor [mrem/h per Ci/m^2], Ingestion Dose Factor [mrem/pCi], Fraction of Activity in Body Fluids, Inhalation Dose Factor [mrem/pCi], Lung Model Used",
)
epa_table = nuc_file.create_table(
dose_group,
"EPA",
epa_array,
"Nuclide, External Air Dose Factor [mrem/h per Ci/m^3], Fraction of Ext Air Dose to Inhalation Dose, External Soil Dose Factor [mrem/h per Ci/m^2], Ingestion Dose Factor [mrem/pCi], Fraction of Activity in Body Fluids, Inhalation Dose Factor [mrem/pCi], Lung Model Used",
)
doe_table = nuc_file.create_table(
dose_group,
"DOE",
doe_array,
"Nuclide, External Air Dose Factor [mrem/h per Ci/m^3], Fraction of Ext Air Dose to Inhalation Dose, External Soil Dose Factor [mrem/h per Ci/m^2], Ingestion Dose Factor [mrem/pCi], Fraction of Activity in Body Fluids, Inhalation Dose Factor [mrem/pCi], Lung Model Used",
)
# Ensure that data was written to table
genii_table.flush()
epa_table.flush()
doe_table.flush()
# Close the hdf5 file
nuc_file.close()
def make_dose_factors(args):
"""Controller function for adding dose factors"""
nuc_data, build_dir = args.nuc_data, args.build_dir
if os.path.exists(nuc_data):
with tb.open_file(nuc_data, "r") as f:
if "/dose_factors" in f:
print("skipping creation of dose factor tables; already exists.")
return
# Grab the dose factors from each file
print("Grabbing dose factors...")
genii, epa, doe = grab_dose_factors()
# Make the 3 dose factor tables and writes them to file
print("Making dose factor tables...")
make_dose_tables(genii, epa, doe, nuc_data, build_dir)
| [
"os.path.exists",
"pyne.nucname.id",
"tables.open_file",
"numpy.array",
"os.path.dirname",
"numpy.dtype",
"csv.reader"
] | [((2144, 2162), 'pyne.nucname.id', 'nucname.id', (['row[0]'], {}), '(row[0])\n', (2154, 2162), False, 'from pyne import nucname\n'), ((5826, 6020), 'numpy.dtype', 'np.dtype', (["[('nuc', int), ('ext_air_dose', float), ('ratio', float), ('ext_soil_dose',\n float), ('ingest_dose', float), ('fluid_frac', float), ('inhale_dose',\n float), ('lung_mod', 'S10')]"], {}), "([('nuc', int), ('ext_air_dose', float), ('ratio', float), (\n 'ext_soil_dose', float), ('ingest_dose', float), ('fluid_frac', float),\n ('inhale_dose', float), ('lung_mod', 'S10')])\n", (5834, 6020), True, 'import numpy as np\n'), ((6182, 6215), 'numpy.array', 'np.array', (['genii'], {'dtype': 'dose_dtype'}), '(genii, dtype=dose_dtype)\n', (6190, 6215), True, 'import numpy as np\n'), ((6232, 6263), 'numpy.array', 'np.array', (['epa'], {'dtype': 'dose_dtype'}), '(epa, dtype=dose_dtype)\n', (6240, 6263), True, 'import numpy as np\n'), ((6280, 6311), 'numpy.array', 'np.array', (['doe'], {'dtype': 'dose_dtype'}), '(doe, dtype=dose_dtype)\n', (6288, 6311), True, 'import numpy as np\n'), ((6353, 6403), 'tables.open_file', 'tb.open_file', (['nuc_data', '"""a"""'], {'filters': 'BASIC_FILTERS'}), "(nuc_data, 'a', filters=BASIC_FILTERS)\n", (6365, 6403), True, 'import tables as tb\n'), ((8004, 8028), 'os.path.exists', 'os.path.exists', (['nuc_data'], {}), '(nuc_data)\n', (8018, 8028), False, 'import os\n'), ((3380, 3393), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (3390, 3393), False, 'import csv\n'), ((3925, 3938), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (3935, 3938), False, 'import csv\n'), ((8043, 8070), 'tables.open_file', 'tb.open_file', (['nuc_data', '"""r"""'], {}), "(nuc_data, 'r')\n", (8055, 8070), True, 'import tables as tb\n'), ((3287, 3312), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3302, 3312), False, 'import os\n'), ((3858, 3883), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3873, 3883), False, 'import os\n')] |
import pickle
import numpy as np
import tensorflow as tf
import librosa
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from mpl_toolkits.axes_grid1 import make_axes_locatable
import os
import json
import glob
from Input import Input
import Models.UnetAudioSeparator
import Models.UnetSpectrogramSeparator
import musdb
import museval
def alpha_snr(target, estimate):
# Compute SNR: 10 log_10 ( ||s_target||^2 / ||s_target - alpha * s_estimate||^2 ), but scale target to get optimal SNR (opt. wrt. alpha)
# Optimal alpha is Sum_i=1(s_target_i * s_estimate_i) / Sum_i=1 (s_estimate_i ^ 2) = inner_prod / estimate_power
estimate_power = np.sum(np.square(estimate))
target_power = np.sum(np.square(target))
inner_prod = np.inner(estimate, target)
alpha = inner_prod / estimate_power
error_power = np.sum(np.square(target - alpha * estimate))
snr = 10 * np.log10(target_power / error_power)
return snr
def predict(track):
'''
Function in accordance with MUSB evaluation API. Takes MUSDB track object and computes corresponding source estimates, as well as calls evlauation script.
Model has to be saved beforehand into a pickle file containing model configuration dictionary and checkpoint path!
:param track: Track object
:return: Source estimates dictionary
'''
'''if track.filename[:4] == "test" or int(track.filename[:3]) > 53:
return {
'vocals': np.zeros(track.audio.shape),
'accompaniment': np.zeros(track.audio.shape)
}'''
# Load model hyper-parameters and model checkpoint path
with open("prediction_params.pkl", "r") as file:
[model_config, load_model] = pickle.load(file)
# Determine input and output shapes, if we use U-net as separator
disc_input_shape = [model_config["batch_size"], model_config["num_frames"], 0] # Shape of discriminator input
if model_config["network"] == "unet":
separator_class = Models.UnetAudioSeparator.UnetAudioSeparator(model_config["num_layers"], model_config["num_initial_filters"],
output_type=model_config["output_type"],
context=model_config["context"],
mono=model_config["mono_downmix"],
upsampling=model_config["upsampling"],
num_sources=model_config["num_sources"],
filter_size=model_config["filter_size"],
merge_filter_size=model_config["merge_filter_size"])
elif model_config["network"] == "unet_spectrogram":
separator_class = Models.UnetSpectrogramSeparator.UnetSpectrogramSeparator(model_config["num_layers"], model_config["num_initial_filters"],
mono=model_config["mono_downmix"],
num_sources=model_config["num_sources"])
else:
raise NotImplementedError
sep_input_shape, sep_output_shape = separator_class.get_padding(np.array(disc_input_shape))
separator_func = separator_class.get_output
# Batch size of 1
sep_input_shape[0] = 1
sep_output_shape[0] = 1
mix_context, sources = Input.get_multitrack_placeholders(sep_output_shape, model_config["num_sources"], sep_input_shape, "input")
print("Testing...")
# BUILD MODELS
# Separator
separator_sources = separator_func(mix_context, False, reuse=False)
# Start session and queue input threads
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Load model
# Load pretrained model to continue training, if we are supposed to
restorer = tf.train.Saver(None, write_version=tf.train.SaverDef.V2)
print("Num of variables" + str(len(tf.global_variables())))
restorer.restore(sess, load_model)
print('Pre-trained model restored for song prediction')
mix_audio, orig_sr, mix_channels = track.audio, track.rate, track.audio.shape[1] # Audio has (n_samples, n_channels) shape
separator_preds = predict_track(model_config, sess, mix_audio, orig_sr, sep_input_shape, sep_output_shape, separator_sources, mix_context)
# Upsample predicted source audio and convert to stereo
pred_audio = [librosa.resample(pred.T, model_config["expected_sr"], orig_sr).T for pred in separator_preds]
if model_config["mono_downmix"] and mix_channels > 1: # Convert to multichannel if mixture input was multichannel by duplicating mono estimate
pred_audio = [np.tile(pred, [1, mix_channels]) for pred in pred_audio]
# Set estimates depending on estimation task (voice or multi-instrument separation)
if model_config["task"] == "voice": # [acc, vocals] order
estimates = {
'vocals' : pred_audio[1],
'accompaniment' : pred_audio[0]
}
else: # [bass, drums, other, vocals]
estimates = {
'bass' : pred_audio[0],
'drums' : pred_audio[1],
'other' : pred_audio[2],
'vocals' : pred_audio[3]
}
# Evaluate using museval
scores = museval.eval_mus_track(
track, estimates, output_dir="/mnt/daten/Datasets/MUSDB18/eval", # SiSec should use longer win and hop parameters here to make evaluation more stable!
)
# print nicely formatted mean scores
print(scores)
# Close session, clear computational graph
sess.close()
tf.reset_default_graph()
return estimates
def predict_track(model_config, sess, mix_audio, mix_sr, sep_input_shape, sep_output_shape, separator_sources, mix_context):
'''
Outputs source estimates for a given input mixture signal mix_audio [n_frames, n_channels] and a given Tensorflow session and placeholders belonging to the prediction network.
It iterates through the track, collecting segment-wise predictions to form the output.
:param model_config: Model configuration dictionary
:param sess: Tensorflow session used to run the network inference
:param mix_audio: [n_frames, n_channels] audio signal (numpy array). Can have higher sampling rate or channels than the model supports, will be downsampled correspondingly.
:param mix_sr: Sampling rate of mix_audio
:param sep_input_shape: Input shape of separator ([batch_size, num_samples, num_channels])
:param sep_output_shape: Input shape of separator ([batch_size, num_samples, num_channels])
:param separator_sources: List of Tensorflow tensors that represent the output of the separator network
:param mix_context: Input tensor of the network
:return:
'''
# Load mixture, convert to mono and downsample then
assert(len(mix_audio.shape) == 2)
if model_config["mono_downmix"]:
mix_audio = np.mean(mix_audio, axis=1, keepdims=True)
else:
if mix_audio.shape[1] == 1:# Duplicate channels if input is mono but model is stereo
mix_audio = np.tile(mix_audio, [1, 2])
mix_audio = librosa.resample(mix_audio.T, mix_sr, model_config["expected_sr"], res_type="kaiser_fast").T
# Preallocate source predictions (same shape as input mixture)
source_time_frames = mix_audio.shape[0]
source_preds = [np.zeros(mix_audio.shape, np.float32) for _ in range(model_config["num_sources"])]
input_time_frames = sep_input_shape[1]
output_time_frames = sep_output_shape[1]
# Pad mixture across time at beginning and end so that neural network can make prediction at the beginning and end of signal
pad_time_frames = (input_time_frames - output_time_frames) / 2
mix_audio_padded = np.pad(mix_audio, [(pad_time_frames, pad_time_frames), (0,0)], mode="constant", constant_values=0.0)
# Iterate over mixture magnitudes, fetch network rpediction
for source_pos in range(0, source_time_frames, output_time_frames):
# If this output patch would reach over the end of the source spectrogram, set it so we predict the very end of the output, then stop
if source_pos + output_time_frames > source_time_frames:
source_pos = source_time_frames - output_time_frames
# Prepare mixture excerpt by selecting time interval
mix_part = mix_audio_padded[source_pos:source_pos + input_time_frames,:]
mix_part = np.expand_dims(mix_part, axis=0)
source_parts = sess.run(separator_sources, feed_dict={mix_context: mix_part})
# Save predictions
# source_shape = [1, freq_bins, acc_mag_part.shape[2], num_chan]
for i in range(model_config["num_sources"]):
source_preds[i][source_pos:source_pos + output_time_frames] = source_parts[i][0, :, :]
return source_preds
def produce_source_estimates(model_config, load_model, musdb_path, output_path, subsets=None):
'''
Predicts source estimates for MUSDB for a given model checkpoint and configuration, and evaluate them.
:param model_config: Model configuration of the model to be evaluated
:param load_model: Model checkpoint path
:return:
'''
prediction_parameters = [model_config, load_model]
with open("prediction_params.pkl", "wb") as file:
pickle.dump(prediction_parameters, file)
mus = musdb.DB(root_dir=musdb_path)
#if mus.test(predict):
# print "Function is valid"
mus.run(predict, estimates_dir=output_path, subsets=subsets)
def compute_mean_metrics(json_folder, compute_averages=True):
files = glob.glob(os.path.join(json_folder, "*.json"))
sdr_inst_list = None
for path in files:
#print(path)
with open(path, "r") as f:
js = json.load(f)
if sdr_inst_list is None:
sdr_inst_list = [list() for _ in range(len(js["targets"]))]
for i in range(len(js["targets"])):
sdr_inst_list[i].extend([np.float(f['metrics']["SDR"]) for f in js["targets"][i]["frames"]])
#return np.array(sdr_acc), np.array(sdr_voc)
sdr_inst_list = [np.array(sdr) for sdr in sdr_inst_list]
if compute_averages:
return [(np.nanmedian(sdr), np.nanmedian(np.abs(sdr - np.nanmedian(sdr))), np.nanmean(sdr), np.nanstd(sdr)) for sdr in sdr_inst_list]
else:
return sdr_inst_list
def draw_violin_sdr(json_folder):
acc, voc = compute_mean_metrics(json_folder, compute_averages=False)
acc = acc[~np.isnan(acc)]
voc = voc[~np.isnan(voc)]
data = [acc, voc]
inds = [1,2]
fig, ax = plt.subplots()
ax.violinplot(data, showmeans=True, showmedians=False, showextrema=False, vert=False)
ax.scatter(np.percentile(data, 50, axis=1),inds, marker="o", color="black")
ax.set_title("Segment-wise SDR distribution")
ax.vlines([np.min(acc), np.min(voc), np.max(acc), np.max(voc)], [0.8, 1.8, 0.8, 1.8], [1.2, 2.2, 1.2, 2.2], color="blue")
ax.hlines(inds, [np.min(acc), np.min(voc)], [np.max(acc), np.max(voc)], color='black', linestyle='--', lw=1, alpha=0.5)
ax.set_yticks([1,2])
ax.set_yticklabels(["Accompaniment", "Vocals"])
fig.set_size_inches(8, 3.)
fig.savefig("sdr_histogram.pdf", bbox_inches='tight')
def draw_spectrogram(example_wav="musb_005_angela thomas wade_audio_model_without_context_cut_28234samples_61002samples_93770samples_126538.wav"):
y, sr = librosa.load(example_wav, sr=None)
spec = np.abs(librosa.stft(y, 512, 256, 512))
norm_spec = librosa.power_to_db(spec**2)
black_time_frames = np.array([28234, 61002, 93770, 126538]) / 256.0
fig, ax = plt.subplots()
img = ax.imshow(norm_spec)
plt.vlines(black_time_frames, [0, 0, 0, 0], [10, 10, 10, 10], colors="red", lw=2, alpha=0.5)
plt.vlines(black_time_frames, [256, 256, 256, 256], [246, 246, 246, 246], colors="red", lw=2, alpha=0.5)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
plt.colorbar(img, cax=cax)
ax.xaxis.set_label_position("bottom")
#ticks_x = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x * 256.0 / sr))
#ax.xaxis.set_major_formatter(ticks_x)
ax.xaxis.set_major_locator(ticker.FixedLocator(([i * sr / 256. for i in range(len(y)//sr + 1)])))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(([str(i) for i in range(len(y)//sr + 1)])))
ax.yaxis.set_major_locator(ticker.FixedLocator(([float(i) * 2000.0 / (sr/2.0) * 256. for i in range(6)])))
ax.yaxis.set_major_formatter(ticker.FixedFormatter([str(i*2) for i in range(6)]))
ax.set_xlabel("t (s)")
ax.set_ylabel('f (KHz)')
fig.set_size_inches(7., 3.)
fig.savefig("spectrogram_example.pdf", bbox_inches='tight')
#compute_mean_metrics("/mnt/windaten/Source_Estimates/endtoend/", False) | [
"numpy.log10",
"matplotlib.pyplot.vlines",
"librosa.resample",
"numpy.array",
"numpy.nanmean",
"librosa.load",
"numpy.mean",
"tensorflow.Session",
"numpy.max",
"musdb.DB",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"numpy.min",
"numpy.tile",
"numpy.nanstd",
"museval.eval_mus_track",
... | [((760, 786), 'numpy.inner', 'np.inner', (['estimate', 'target'], {}), '(estimate, target)\n', (768, 786), True, 'import numpy as np\n'), ((3555, 3666), 'Input.Input.get_multitrack_placeholders', 'Input.get_multitrack_placeholders', (['sep_output_shape', "model_config['num_sources']", 'sep_input_shape', '"""input"""'], {}), "(sep_output_shape, model_config[\n 'num_sources'], sep_input_shape, 'input')\n", (3588, 3666), False, 'from Input import Input\n'), ((3851, 3863), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3861, 3863), True, 'import tensorflow as tf\n'), ((4017, 4073), 'tensorflow.train.Saver', 'tf.train.Saver', (['None'], {'write_version': 'tf.train.SaverDef.V2'}), '(None, write_version=tf.train.SaverDef.V2)\n', (4031, 4073), True, 'import tensorflow as tf\n'), ((5436, 5528), 'museval.eval_mus_track', 'museval.eval_mus_track', (['track', 'estimates'], {'output_dir': '"""/mnt/daten/Datasets/MUSDB18/eval"""'}), "(track, estimates, output_dir=\n '/mnt/daten/Datasets/MUSDB18/eval')\n", (5458, 5528), False, 'import museval\n'), ((5754, 5778), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (5776, 5778), True, 'import tensorflow as tf\n'), ((7908, 8014), 'numpy.pad', 'np.pad', (['mix_audio', '[(pad_time_frames, pad_time_frames), (0, 0)]'], {'mode': '"""constant"""', 'constant_values': '(0.0)'}), "(mix_audio, [(pad_time_frames, pad_time_frames), (0, 0)], mode=\n 'constant', constant_values=0.0)\n", (7914, 8014), True, 'import numpy as np\n'), ((9499, 9528), 'musdb.DB', 'musdb.DB', ([], {'root_dir': 'musdb_path'}), '(root_dir=musdb_path)\n', (9507, 9528), False, 'import musdb\n'), ((10709, 10723), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10721, 10723), True, 'import matplotlib.pyplot as plt\n'), ((11522, 11556), 'librosa.load', 'librosa.load', (['example_wav'], {'sr': 'None'}), '(example_wav, sr=None)\n', (11534, 11556), False, 'import librosa\n'), ((11623, 11653), 'librosa.power_to_db', 'librosa.power_to_db', (['(spec ** 2)'], {}), '(spec ** 2)\n', (11642, 11653), False, 'import librosa\n'), ((11739, 11753), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (11751, 11753), True, 'import matplotlib.pyplot as plt\n'), ((11789, 11885), 'matplotlib.pyplot.vlines', 'plt.vlines', (['black_time_frames', '[0, 0, 0, 0]', '[10, 10, 10, 10]'], {'colors': '"""red"""', 'lw': '(2)', 'alpha': '(0.5)'}), "(black_time_frames, [0, 0, 0, 0], [10, 10, 10, 10], colors='red',\n lw=2, alpha=0.5)\n", (11799, 11885), True, 'import matplotlib.pyplot as plt\n'), ((11886, 11994), 'matplotlib.pyplot.vlines', 'plt.vlines', (['black_time_frames', '[256, 256, 256, 256]', '[246, 246, 246, 246]'], {'colors': '"""red"""', 'lw': '(2)', 'alpha': '(0.5)'}), "(black_time_frames, [256, 256, 256, 256], [246, 246, 246, 246],\n colors='red', lw=2, alpha=0.5)\n", (11896, 11994), True, 'import matplotlib.pyplot as plt\n'), ((12006, 12029), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (12025, 12029), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((12093, 12119), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['img'], {'cax': 'cax'}), '(img, cax=cax)\n', (12105, 12119), True, 'import matplotlib.pyplot as plt\n'), ((677, 696), 'numpy.square', 'np.square', (['estimate'], {}), '(estimate)\n', (686, 696), True, 'import numpy as np\n'), ((724, 741), 'numpy.square', 'np.square', (['target'], {}), '(target)\n', (733, 741), True, 'import numpy as np\n'), ((852, 888), 'numpy.square', 'np.square', (['(target - alpha * estimate)'], {}), '(target - alpha * estimate)\n', (861, 888), True, 'import numpy as np\n'), ((905, 941), 'numpy.log10', 'np.log10', (['(target_power / error_power)'], {}), '(target_power / error_power)\n', (913, 941), True, 'import numpy as np\n'), ((1704, 1721), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (1715, 1721), False, 'import pickle\n'), ((3373, 3399), 'numpy.array', 'np.array', (['disc_input_shape'], {}), '(disc_input_shape)\n', (3381, 3399), True, 'import numpy as np\n'), ((3877, 3910), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3908, 3910), True, 'import tensorflow as tf\n'), ((7079, 7120), 'numpy.mean', 'np.mean', (['mix_audio'], {'axis': '(1)', 'keepdims': '(True)'}), '(mix_audio, axis=1, keepdims=True)\n', (7086, 7120), True, 'import numpy as np\n'), ((7291, 7386), 'librosa.resample', 'librosa.resample', (['mix_audio.T', 'mix_sr', "model_config['expected_sr']"], {'res_type': '"""kaiser_fast"""'}), "(mix_audio.T, mix_sr, model_config['expected_sr'], res_type\n ='kaiser_fast')\n", (7307, 7386), False, 'import librosa\n'), ((7516, 7553), 'numpy.zeros', 'np.zeros', (['mix_audio.shape', 'np.float32'], {}), '(mix_audio.shape, np.float32)\n', (7524, 7553), True, 'import numpy as np\n'), ((8580, 8612), 'numpy.expand_dims', 'np.expand_dims', (['mix_part'], {'axis': '(0)'}), '(mix_part, axis=0)\n', (8594, 8612), True, 'import numpy as np\n'), ((9447, 9487), 'pickle.dump', 'pickle.dump', (['prediction_parameters', 'file'], {}), '(prediction_parameters, file)\n', (9458, 9487), False, 'import pickle\n'), ((9741, 9776), 'os.path.join', 'os.path.join', (['json_folder', '"""*.json"""'], {}), "(json_folder, '*.json')\n", (9753, 9776), False, 'import os\n'), ((10240, 10253), 'numpy.array', 'np.array', (['sdr'], {}), '(sdr)\n', (10248, 10253), True, 'import numpy as np\n'), ((10829, 10860), 'numpy.percentile', 'np.percentile', (['data', '(50)'], {'axis': '(1)'}), '(data, 50, axis=1)\n', (10842, 10860), True, 'import numpy as np\n'), ((11575, 11605), 'librosa.stft', 'librosa.stft', (['y', '(512)', '(256)', '(512)'], {}), '(y, 512, 256, 512)\n', (11587, 11605), False, 'import librosa\n'), ((11676, 11715), 'numpy.array', 'np.array', (['[28234, 61002, 93770, 126538]'], {}), '([28234, 61002, 93770, 126538])\n', (11684, 11715), True, 'import numpy as np\n'), ((4587, 4649), 'librosa.resample', 'librosa.resample', (['pred.T', "model_config['expected_sr']", 'orig_sr'], {}), "(pred.T, model_config['expected_sr'], orig_sr)\n", (4603, 4649), False, 'import librosa\n'), ((4851, 4883), 'numpy.tile', 'np.tile', (['pred', '[1, mix_channels]'], {}), '(pred, [1, mix_channels])\n', (4858, 4883), True, 'import numpy as np\n'), ((7248, 7274), 'numpy.tile', 'np.tile', (['mix_audio', '[1, 2]'], {}), '(mix_audio, [1, 2])\n', (7255, 7274), True, 'import numpy as np\n'), ((9899, 9911), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9908, 9911), False, 'import json\n'), ((10610, 10623), 'numpy.isnan', 'np.isnan', (['acc'], {}), '(acc)\n', (10618, 10623), True, 'import numpy as np\n'), ((10640, 10653), 'numpy.isnan', 'np.isnan', (['voc'], {}), '(voc)\n', (10648, 10653), True, 'import numpy as np\n'), ((10959, 10970), 'numpy.min', 'np.min', (['acc'], {}), '(acc)\n', (10965, 10970), True, 'import numpy as np\n'), ((10972, 10983), 'numpy.min', 'np.min', (['voc'], {}), '(voc)\n', (10978, 10983), True, 'import numpy as np\n'), ((10985, 10996), 'numpy.max', 'np.max', (['acc'], {}), '(acc)\n', (10991, 10996), True, 'import numpy as np\n'), ((10998, 11009), 'numpy.max', 'np.max', (['voc'], {}), '(voc)\n', (11004, 11009), True, 'import numpy as np\n'), ((11091, 11102), 'numpy.min', 'np.min', (['acc'], {}), '(acc)\n', (11097, 11102), True, 'import numpy as np\n'), ((11104, 11115), 'numpy.min', 'np.min', (['voc'], {}), '(voc)\n', (11110, 11115), True, 'import numpy as np\n'), ((11119, 11130), 'numpy.max', 'np.max', (['acc'], {}), '(acc)\n', (11125, 11130), True, 'import numpy as np\n'), ((11132, 11143), 'numpy.max', 'np.max', (['voc'], {}), '(voc)\n', (11138, 11143), True, 'import numpy as np\n'), ((10323, 10340), 'numpy.nanmedian', 'np.nanmedian', (['sdr'], {}), '(sdr)\n', (10335, 10340), True, 'import numpy as np\n'), ((10389, 10404), 'numpy.nanmean', 'np.nanmean', (['sdr'], {}), '(sdr)\n', (10399, 10404), True, 'import numpy as np\n'), ((10406, 10420), 'numpy.nanstd', 'np.nanstd', (['sdr'], {}), '(sdr)\n', (10415, 10420), True, 'import numpy as np\n'), ((4113, 4134), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (4132, 4134), True, 'import tensorflow as tf\n'), ((10101, 10130), 'numpy.float', 'np.float', (["f['metrics']['SDR']"], {}), "(f['metrics']['SDR'])\n", (10109, 10130), True, 'import numpy as np\n'), ((10368, 10385), 'numpy.nanmedian', 'np.nanmedian', (['sdr'], {}), '(sdr)\n', (10380, 10385), True, 'import numpy as np\n')] |
import numpy as np
from pymoo.experimental.deriv import DerivationBasedAlgorithm
from pymoo.algorithms.base.line import LineSearchProblem
from pymoo.algorithms.soo.univariate.exp import ExponentialSearch
from pymoo.algorithms.soo.univariate.golden import GoldenSectionSearch
from pymoo.core.population import Population
from pymoo.util.vectors import max_alpha
class GradientDescent(DerivationBasedAlgorithm):
def direction(self, dF, **kwargs):
return - dF
def step(self):
problem, sol = self.problem, self.opt[0]
self.evaluator.eval(self.problem, sol, evaluate_values_of=["dF"])
dF = sol.get("dF")[0]
print(sol)
if np.linalg.norm(dF) ** 2 < 1e-8:
self.termination.force_termination = True
return
direction = self.direction(dF)
line = LineSearchProblem(self.problem, sol, direction, strict_bounds=self.strict_bounds)
alpha = self.alpha
if self.strict_bounds:
if problem.has_bounds():
line.xu = np.array([max_alpha(sol.X, direction, *problem.bounds(), mode="all_hit_bounds")])
# remember the step length from the last run
alpha = min(alpha, line.xu[0])
if alpha == 0:
self.termination.force_termination = True
return
# make the solution to be the starting point of the univariate search
x0 = sol.copy(deep=True)
x0.set("__X__", x0.get("X"))
x0.set("X", np.zeros(1))
# determine the brackets to be searched in
exp = ExponentialSearch(delta=alpha).setup(line, evaluator=self.evaluator, termination=("n_iter", 20), x0=x0)
a, b = exp.run().pop[-2:]
# search in the brackets
res = GoldenSectionSearch().setup(line, evaluator=self.evaluator, termination=("n_iter", 20), a=a, b=b).run()
infill = res.opt[0]
# set the alpha value and revert the X to be the multi-variate one
infill.set("X", infill.get("__X__"))
self.alpha = infill.get("alpha")[0]
# keep always a few historical solutions
self.pop = Population.merge(self.pop, infill)[-10:]
| [
"pymoo.algorithms.soo.univariate.golden.GoldenSectionSearch",
"numpy.zeros",
"pymoo.core.population.Population.merge",
"numpy.linalg.norm",
"pymoo.algorithms.soo.univariate.exp.ExponentialSearch",
"pymoo.algorithms.base.line.LineSearchProblem"
] | [((840, 926), 'pymoo.algorithms.base.line.LineSearchProblem', 'LineSearchProblem', (['self.problem', 'sol', 'direction'], {'strict_bounds': 'self.strict_bounds'}), '(self.problem, sol, direction, strict_bounds=self.\n strict_bounds)\n', (857, 926), False, 'from pymoo.algorithms.base.line import LineSearchProblem\n'), ((1506, 1517), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (1514, 1517), True, 'import numpy as np\n'), ((2137, 2171), 'pymoo.core.population.Population.merge', 'Population.merge', (['self.pop', 'infill'], {}), '(self.pop, infill)\n', (2153, 2171), False, 'from pymoo.core.population import Population\n'), ((679, 697), 'numpy.linalg.norm', 'np.linalg.norm', (['dF'], {}), '(dF)\n', (693, 697), True, 'import numpy as np\n'), ((1585, 1615), 'pymoo.algorithms.soo.univariate.exp.ExponentialSearch', 'ExponentialSearch', ([], {'delta': 'alpha'}), '(delta=alpha)\n', (1602, 1615), False, 'from pymoo.algorithms.soo.univariate.exp import ExponentialSearch\n'), ((1771, 1792), 'pymoo.algorithms.soo.univariate.golden.GoldenSectionSearch', 'GoldenSectionSearch', ([], {}), '()\n', (1790, 1792), False, 'from pymoo.algorithms.soo.univariate.golden import GoldenSectionSearch\n')] |
import numpy as np
import tensorflow as tf
if int(tf.__version__[0]) > 1:
from tensorflow.keras.utils import Sequence
else:
from tensorflow.python.keras.utils import Sequence
class DefaultGenerator(Sequence):
"""
Create the default generator class using Keras sequence as the parent
"""
def __init__(self, x_data, y_data, batch_size=1, shuffle=True):
"""
Class initialization
:param x_data: ECG data from session, validation or test set in the form of (epoch, data) [(sample, features)]
:param y_data: EEG data from session, validation or test set in the form of (epoch, channel, data) or
alternatively [(sample, channel, data]
:param batch_size: batch size to be used in session, default is 1
:param shuffle: whether to shuffle the set, default is True
"""
self.x = x_data
self.y = y_data
self.indices = np.arange(np.shape(self.x)[0])
self.shuffle = shuffle
self.batch_size = batch_size
self.on_epoch_end()
def __len__(self):
"""
Obtain the steps needed per epoch
:return:
"""
return int(np.ceil(np.shape(self.x)[0]) / float(self.batch_size))
def __getitem__(self, idx):
"""
Generate each batch
:param idx: starting index of current minibatch (needed by Keras)
:return: batch_x: ECG data from current minibatch in the form of
:return: batch_y: EEG data from current minibatch
"""
# Obtain the indices of the samples that correspond to the current minibatch
idx_batch = self.indices[idx * self.batch_size:(idx + 1) * self.batch_size]
# Obtain the minibatch ECG data in the form of (sample, features) and note that Keras wants data in the form of
# (sample, features, 1) so need to reshape the data
batch_x = self.x[idx_batch, :]
batch_x = batch_x.reshape(batch_x.shape[0], batch_x.shape[1], 1)
# Obtain the minibatch EEG data in the form of (sample, channels, features) and note that Keras wants data in
# the form of (sample, features, channels) so need to transpose the data
batch_y = self.y[idx_batch, :, :]
batch_y = np.transpose(batch_y, axes=(0, 2, 1))
return batch_x, batch_y
def on_epoch_end(self):
"""
Updates indexes after each epoch by shuffling if enabled
"""
if self.shuffle:
np.random.shuffle(self.indices) | [
"numpy.shape",
"numpy.transpose",
"numpy.random.shuffle"
] | [((2251, 2288), 'numpy.transpose', 'np.transpose', (['batch_y'], {'axes': '(0, 2, 1)'}), '(batch_y, axes=(0, 2, 1))\n', (2263, 2288), True, 'import numpy as np\n'), ((2478, 2509), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indices'], {}), '(self.indices)\n', (2495, 2509), True, 'import numpy as np\n'), ((937, 953), 'numpy.shape', 'np.shape', (['self.x'], {}), '(self.x)\n', (945, 953), True, 'import numpy as np\n'), ((1189, 1205), 'numpy.shape', 'np.shape', (['self.x'], {}), '(self.x)\n', (1197, 1205), True, 'import numpy as np\n')] |
import os
import os.path as osp
import cv2
import numpy as np
import pickle
import argparse
import sys
sys.path.append(osp.join(sys.path[0], '..', '..')) # relative to current one
# print(sys.path)
import utils.utils as ut
from tqdm import tqdm
'''
history:
210924: update the part to global 24 index with visibility
'''
def pw3d_extract(dataset_path, out_path):
# scale factor
scaleFactor = 1.2
if_dbg = 0
# structs we use
imgnames_, scales_, centers_, parts_ = [], [], [], []
poses_, shapes_, genders_ = [], [], []
parts_coco_ = []
parts_ = []
# coco to 24
# global_idx = [19, 12, 8, 7, 6, 9, 10, 11, 2, 1, 3, 11, 12, 13, 21, 20, 23, 22] # the openpose order
global_idx = [] # the openpose order
idx_valid = [8,5,2,1,4,7,21,19,17, 16, 18, 20, 12, 15, 0] # the valid joint
# get a list of .pkl files in the directory
dataset_path = os.path.join(dataset_path, 'sequenceFiles', 'test')
files = [os.path.join(dataset_path, f)
for f in os.listdir(dataset_path) if f.endswith('.pkl')] # maybe the file randomeness
# go through all the .pkl files
# get regressor
for filename in tqdm(files):
with open(filename, 'rb') as f:
data = pickle.load(f, encoding='latin1')
smpl_pose = data['poses'] # N x 72
smpl_betas = data['betas']
poses2d = data['poses2d']
global_poses = data['cam_poses']
genders = data['genders']
valid = np.array(data['campose_valid']).astype(np.bool) # indicate which cam pose aligned to image , maybe 0 failed
num_people = len(smpl_pose)
num_frames = len(smpl_pose[0])
seq_name = str(data['sequence'])
img_names = np.array(['imageFiles/' + seq_name + '/image_%s.jpg' % str(i).zfill(5) for i in range(num_frames)])
cam_intrinsics = data['cam_intrinsics']
jointPositions = data['jointPositions'] # 24 x 3 xyz
# get through all the people in the sequence
# print('smpl pose shape', smpl_pose.shape)
for i in range(num_people):
if if_dbg and i>0:
break
valid_pose = smpl_pose[i][valid[i]] #
valid_betas = np.tile(smpl_betas[i][:10].reshape(1,-1), (num_frames, 1))
valid_betas = valid_betas[valid[i]]
valid_keypoints_2d = poses2d[i][valid[i]]
valid_img_names = img_names[valid[i]]
valid_global_poses = global_poses[valid[i]]
# valid_cam_intrinsics = cam_intrinsics[valid[i]]
# print('valid[i] shape', valid[i].shape)
valid_j3ds = jointPositions[i][valid[i]] # scalar indices only, jp dim0 dim 2?
# print('valid pose shape', valid_pose.shape) # (939,u72)
gender = genders[i]
# consider only valid frames
for valid_i in range(valid_pose.shape[0]):
part = valid_keypoints_2d[valid_i,:,:].T
j3d = valid_j3ds[valid_i].reshape([-1, 3])[idx_valid] # to the 24 x3 format , to only 15 valid jts
if if_dbg and valid_i> 2:
break
# 2D global, open ose version
# part_openpose = part.copy() # openpose 17 order
# part_g = np.zeros([24, 3]) # 24 version
# part_g[global_idx] = part_openpose # fill the 24 with the jt
# part_g[global_idx, 2] = 1
part = part[part[:,2]>0,:] # filterd the empty one
bbox = [min(part[:,0]), min(part[:,1]),
max(part[:,0]), max(part[:,1])]
center = [(bbox[2]+bbox[0])/2, (bbox[3]+bbox[1])/2]
scale = scaleFactor*max(bbox[2]-bbox[0], bbox[3]-bbox[1])/200
# transform global pose
pose = valid_pose[valid_i]
# print('pose shape', pose.shape)
extrinsics = valid_global_poses[valid_i][:3,:3]
T = valid_global_poses[valid_i][:3, -1] #
pose[:3] = cv2.Rodrigues(np.dot(extrinsics, cv2.Rodrigues(pose[:3])[0]))[0].T[0] # only change global
# 2D projects
j3d_mono = np.einsum('ij,kj->ki', extrinsics[:3, :3], j3d) + T # 24 x3 -> 2
# j3d_mono = j3d # assume already in camera space
j2d = np.einsum('ij,kj->ki', cam_intrinsics, j3d_mono)
j2d = j2d[:,:2]/j2d[:, 2:] # las dim
part_g = np.zeros([24, 3]) # assume all visible
part_g[:15,:2] = j2d # replace
part_g[:15,2] = 1 # all visible for 1st 15
imgnames_.append(valid_img_names[valid_i])
centers_.append(center)
scales_.append(scale)
poses_.append(pose)
shapes_.append(valid_betas[valid_i])
genders_.append(gender)
# parts_coco_.append(part_coco)
parts_.append(part_g)
print('totally {} processed'.format(len(imgnames_)))
# store data
if not os.path.isdir(out_path):
os.makedirs(out_path)
out_file = os.path.join(out_path,
'3dpw_test.npz')
if not if_dbg: # debug not save
np.savez(out_file, imgname=imgnames_,
center=centers_,
scale=scales_,
pose=poses_,
shape=shapes_,
gender=genders_,
part = parts_,
# part_coco=parts_coco_,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--ds_fd', default='/home/liu.shu/datasets/3DPW', help='the input of the dataset')
parser.add_argument('--out_fd', default='data/dataset_extras', help='Path to input image')
args = parser.parse_args()
pw3d_extract(args.ds_fd, args.out_fd)
| [
"numpy.savez",
"os.listdir",
"os.makedirs",
"argparse.ArgumentParser",
"tqdm.tqdm",
"os.path.join",
"pickle.load",
"numpy.array",
"numpy.zeros",
"os.path.isdir",
"numpy.einsum",
"cv2.Rodrigues"
] | [((119, 152), 'os.path.join', 'osp.join', (['sys.path[0]', '""".."""', '""".."""'], {}), "(sys.path[0], '..', '..')\n", (127, 152), True, 'import os.path as osp\n'), ((907, 958), 'os.path.join', 'os.path.join', (['dataset_path', '"""sequenceFiles"""', '"""test"""'], {}), "(dataset_path, 'sequenceFiles', 'test')\n", (919, 958), False, 'import os\n'), ((1179, 1190), 'tqdm.tqdm', 'tqdm', (['files'], {}), '(files)\n', (1183, 1190), False, 'from tqdm import tqdm\n'), ((5441, 5480), 'os.path.join', 'os.path.join', (['out_path', '"""3dpw_test.npz"""'], {}), "(out_path, '3dpw_test.npz')\n", (5453, 5480), False, 'import os\n'), ((5929, 5954), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5952, 5954), False, 'import argparse\n'), ((972, 1001), 'os.path.join', 'os.path.join', (['dataset_path', 'f'], {}), '(dataset_path, f)\n', (984, 1001), False, 'import os\n'), ((5371, 5394), 'os.path.isdir', 'os.path.isdir', (['out_path'], {}), '(out_path)\n', (5384, 5394), False, 'import os\n'), ((5404, 5425), 'os.makedirs', 'os.makedirs', (['out_path'], {}), '(out_path)\n', (5415, 5425), False, 'import os\n'), ((5535, 5667), 'numpy.savez', 'np.savez', (['out_file'], {'imgname': 'imgnames_', 'center': 'centers_', 'scale': 'scales_', 'pose': 'poses_', 'shape': 'shapes_', 'gender': 'genders_', 'part': 'parts_'}), '(out_file, imgname=imgnames_, center=centers_, scale=scales_, pose=\n poses_, shape=shapes_, gender=genders_, part=parts_)\n', (5543, 5667), True, 'import numpy as np\n'), ((1020, 1044), 'os.listdir', 'os.listdir', (['dataset_path'], {}), '(dataset_path)\n', (1030, 1044), False, 'import os\n'), ((1251, 1284), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (1262, 1284), False, 'import pickle\n'), ((1518, 1549), 'numpy.array', 'np.array', (["data['campose_valid']"], {}), "(data['campose_valid'])\n", (1526, 1549), True, 'import numpy as np\n'), ((4599, 4647), 'numpy.einsum', 'np.einsum', (['"""ij,kj->ki"""', 'cam_intrinsics', 'j3d_mono'], {}), "('ij,kj->ki', cam_intrinsics, j3d_mono)\n", (4608, 4647), True, 'import numpy as np\n'), ((4735, 4752), 'numpy.zeros', 'np.zeros', (['[24, 3]'], {}), '([24, 3])\n', (4743, 4752), True, 'import numpy as np\n'), ((4436, 4483), 'numpy.einsum', 'np.einsum', (['"""ij,kj->ki"""', 'extrinsics[:3, :3]', 'j3d'], {}), "('ij,kj->ki', extrinsics[:3, :3], j3d)\n", (4445, 4483), True, 'import numpy as np\n'), ((4305, 4328), 'cv2.Rodrigues', 'cv2.Rodrigues', (['pose[:3]'], {}), '(pose[:3])\n', (4318, 4328), False, 'import cv2\n')] |
import numpy as np
import networkx as nx
from itertools import combinations
from random import randrange, uniform
from numpy.linalg import inv
import scipy.sparse as sp
import pickle as pk
import os
def get_sparse_eigen_decomposition(graph, K):
adj = nx.adjacency_matrix(graph, nodelist=sorted(graph.nodes()), weight='weight').toarray()
return get_sparse_eigen_decomposition_from_adj(adj, K)
#TODO remove to only use the svd version
def get_sparse_eigen_decomposition_from_adj(adj, K):
eigenval, eigenvectors = np.linalg.eig(adj)
eigenval_Ksparse = np.argsort(eigenval)[-K:] # Find top eigenvalues index (not absolute values)
V_ksparse = np.zeros((adj.shape[0], K)) # Only keep the eigenvectors of the max eigenvalues
V_ksparse[:, 0:K] = eigenvectors[:, eigenval_Ksparse]
V_ksparse = np.matrix(V_ksparse)
V_ksparse_H = V_ksparse.getH()
def get_v(index):
v_index = V_ksparse_H[:, index]
v_index_H = V_ksparse[index, :]
return v_index, v_index_H
return V_ksparse, V_ksparse_H, get_v
def get_sparse_eigen_decomposition_from_svd_adj(adj, K):
eigen_file_name = "shape_" + str(adj.shape[0]) + '.p'
if not (os.path.exists(eigen_file_name)):
print(adj)
u, eigenval, eigenvectors = np.linalg.svd(adj, full_matrices=True)
svd_decomposition = {'u': u, 's': eigenval, 'vh': eigenvectors}
pk.dump(svd_decomposition, open((eigen_file_name), 'wb'))
else:
with open(eigen_file_name, 'rb') as f:
svd_decomposition = pk.load(f, encoding='latin1')
eigenval = svd_decomposition['s']
eigenvectors = svd_decomposition['vh']
eigenval_Ksparse = np.argsort(eigenval)[-K:] # Find top eigenvalues index (not absolute values)
V_ksparse = np.zeros((adj.shape[0], K)) # Only keep the eigenvectors of the max eigenvalues
V_ksparse[:, 0:K] = eigenvectors[:, eigenval_Ksparse]
V_ksparse = np.matrix(V_ksparse)
V_ksparse_H = V_ksparse.getH()
def get_v(index):
v_index = V_ksparse_H[:, index]
v_index_H = V_ksparse[index, :]
return v_index, v_index_H
return V_ksparse, V_ksparse_H, get_v
# Plotting graphs
def plot_graph(graph):
nx.draw_shell(
graph,
with_labels=True,
)
# Erdos Renyi graph: Add an edge with prob = 0.2
def generate_Erdos_Renyi_graph(n):
Erdos_Renyi_Prob = 0.2
Erdos_Renyi_graph = nx.erdos_renyi_graph(n, Erdos_Renyi_Prob)
return Erdos_Renyi_graph
# Preferential attachment model
def generate_pref_attachment_graph(n):
m0 = 1
Pref_Attach_graph = nx.barabasi_albert_graph(n, m0)
return Pref_Attach_graph
# Random graph with weight between [0,1]
def generate_random_graph(n):
Random_graph = nx.Graph()
for node_pair in combinations(range(n), 2): # Generate each possible egde
weight = uniform(0, 1.000001) # Need to be [0,1]
if weight > 0:
Random_graph.add_edge(node_pair[0], node_pair[1], weight=weight)
if (len(Random_graph.nodes()) < n): # Recursivly generate a new graph until all nodes are connected
del Random_graph # Delete the graph to avoid using memeroy unnecessarily
return generate_random_graph(n)
return Random_graph | [
"os.path.exists",
"random.uniform",
"networkx.barabasi_albert_graph",
"numpy.linalg.eig",
"pickle.load",
"networkx.Graph",
"numpy.argsort",
"numpy.zeros",
"networkx.draw_shell",
"numpy.linalg.svd",
"numpy.matrix",
"networkx.erdos_renyi_graph"
] | [((527, 545), 'numpy.linalg.eig', 'np.linalg.eig', (['adj'], {}), '(adj)\n', (540, 545), True, 'import numpy as np\n'), ((663, 690), 'numpy.zeros', 'np.zeros', (['(adj.shape[0], K)'], {}), '((adj.shape[0], K))\n', (671, 690), True, 'import numpy as np\n'), ((818, 838), 'numpy.matrix', 'np.matrix', (['V_ksparse'], {}), '(V_ksparse)\n', (827, 838), True, 'import numpy as np\n'), ((1779, 1806), 'numpy.zeros', 'np.zeros', (['(adj.shape[0], K)'], {}), '((adj.shape[0], K))\n', (1787, 1806), True, 'import numpy as np\n'), ((1934, 1954), 'numpy.matrix', 'np.matrix', (['V_ksparse'], {}), '(V_ksparse)\n', (1943, 1954), True, 'import numpy as np\n'), ((2216, 2254), 'networkx.draw_shell', 'nx.draw_shell', (['graph'], {'with_labels': '(True)'}), '(graph, with_labels=True)\n', (2229, 2254), True, 'import networkx as nx\n'), ((2415, 2456), 'networkx.erdos_renyi_graph', 'nx.erdos_renyi_graph', (['n', 'Erdos_Renyi_Prob'], {}), '(n, Erdos_Renyi_Prob)\n', (2435, 2456), True, 'import networkx as nx\n'), ((2594, 2625), 'networkx.barabasi_albert_graph', 'nx.barabasi_albert_graph', (['n', 'm0'], {}), '(n, m0)\n', (2618, 2625), True, 'import networkx as nx\n'), ((2747, 2757), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (2755, 2757), True, 'import networkx as nx\n'), ((569, 589), 'numpy.argsort', 'np.argsort', (['eigenval'], {}), '(eigenval)\n', (579, 589), True, 'import numpy as np\n'), ((1187, 1218), 'os.path.exists', 'os.path.exists', (['eigen_file_name'], {}), '(eigen_file_name)\n', (1201, 1218), False, 'import os\n'), ((1276, 1314), 'numpy.linalg.svd', 'np.linalg.svd', (['adj'], {'full_matrices': '(True)'}), '(adj, full_matrices=True)\n', (1289, 1314), True, 'import numpy as np\n'), ((1685, 1705), 'numpy.argsort', 'np.argsort', (['eigenval'], {}), '(eigenval)\n', (1695, 1705), True, 'import numpy as np\n'), ((2854, 2874), 'random.uniform', 'uniform', (['(0)', '(1.000001)'], {}), '(0, 1.000001)\n', (2861, 2874), False, 'from random import randrange, uniform\n'), ((1542, 1571), 'pickle.load', 'pk.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (1549, 1571), True, 'import pickle as pk\n')] |
import os
import torch
import numbers
import torchvision.transforms as transforms
import torchvision.transforms.functional as F
from torch.utils.data import Subset, TensorDataset
import numpy as np
def get_dataset(args, config):
if config.data.random_flip is False:
tran_transform = test_transform = transforms.Compose(
[transforms.Resize(config.data.image_size), transforms.ToTensor()]
)
else:
tran_transform = transforms.Compose(
[
transforms.Resize(config.data.image_size),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
]
)
test_transform = transforms.Compose(
[transforms.Resize(config.data.image_size), transforms.ToTensor()]
)
train_samples = np.load(args.train_fname)
train_labels = np.zeros(len(train_samples))
data_mean = np.mean(train_samples, axis=(0, 2, 3), keepdims=True)
data_std = np.std(train_samples, axis=(0, 2, 3), keepdims=True)
train_samples = (train_samples - data_mean)/data_std
print("train data shape are - ", train_samples.shape, train_labels.shape)
print("train data stats are - ", np.mean(train_samples), np.std(train_samples),
np.min(train_samples), np.max(train_samples))
dataset = torch.utils.data.TensorDataset(
torch.from_numpy(train_samples).float(), torch.from_numpy(train_labels).float())
return dataset
def logit_transform(image, lam=1e-6):
image = lam + (1 - 2 * lam) * image
return torch.log(image) - torch.log1p(-image)
def data_transform(config, X):
if config.data.uniform_dequantization:
X = X / 256.0 * 255.0 + torch.rand_like(X) / 256.0
if config.data.gaussian_dequantization:
X = X + torch.randn_like(X) * 0.01
if config.data.rescaled:
X = 2 * X - 1.0
elif config.data.logit_transform:
X = logit_transform(X)
if hasattr(config, "image_mean"):
return X - config.image_mean.to(X.device)[None, ...]
return X
def inverse_data_transform(config, X):
if hasattr(config, "image_mean"):
X = X + config.image_mean.to(X.device)[None, ...]
if config.data.logit_transform:
X = torch.sigmoid(X)
elif config.data.rescaled:
X = (X + 1.0) / 2.0
return torch.clamp(X, 0.0, 1.0)
| [
"numpy.mean",
"torch.log",
"torch.rand_like",
"numpy.std",
"torch.sigmoid",
"torchvision.transforms.RandomHorizontalFlip",
"torch.from_numpy",
"numpy.max",
"torch.randn_like",
"numpy.min",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"numpy.load",
"torch.clamp",
"t... | [((826, 851), 'numpy.load', 'np.load', (['args.train_fname'], {}), '(args.train_fname)\n', (833, 851), True, 'import numpy as np\n'), ((917, 970), 'numpy.mean', 'np.mean', (['train_samples'], {'axis': '(0, 2, 3)', 'keepdims': '(True)'}), '(train_samples, axis=(0, 2, 3), keepdims=True)\n', (924, 970), True, 'import numpy as np\n'), ((986, 1038), 'numpy.std', 'np.std', (['train_samples'], {'axis': '(0, 2, 3)', 'keepdims': '(True)'}), '(train_samples, axis=(0, 2, 3), keepdims=True)\n', (992, 1038), True, 'import numpy as np\n'), ((2333, 2357), 'torch.clamp', 'torch.clamp', (['X', '(0.0)', '(1.0)'], {}), '(X, 0.0, 1.0)\n', (2344, 2357), False, 'import torch\n'), ((1212, 1234), 'numpy.mean', 'np.mean', (['train_samples'], {}), '(train_samples)\n', (1219, 1234), True, 'import numpy as np\n'), ((1236, 1257), 'numpy.std', 'np.std', (['train_samples'], {}), '(train_samples)\n', (1242, 1257), True, 'import numpy as np\n'), ((1268, 1289), 'numpy.min', 'np.min', (['train_samples'], {}), '(train_samples)\n', (1274, 1289), True, 'import numpy as np\n'), ((1291, 1312), 'numpy.max', 'np.max', (['train_samples'], {}), '(train_samples)\n', (1297, 1312), True, 'import numpy as np\n'), ((1561, 1577), 'torch.log', 'torch.log', (['image'], {}), '(image)\n', (1570, 1577), False, 'import torch\n'), ((1580, 1599), 'torch.log1p', 'torch.log1p', (['(-image)'], {}), '(-image)\n', (1591, 1599), False, 'import torch\n'), ((2245, 2261), 'torch.sigmoid', 'torch.sigmoid', (['X'], {}), '(X)\n', (2258, 2261), False, 'import torch\n'), ((348, 389), 'torchvision.transforms.Resize', 'transforms.Resize', (['config.data.image_size'], {}), '(config.data.image_size)\n', (365, 389), True, 'import torchvision.transforms as transforms\n'), ((391, 412), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (410, 412), True, 'import torchvision.transforms as transforms\n'), ((509, 550), 'torchvision.transforms.Resize', 'transforms.Resize', (['config.data.image_size'], {}), '(config.data.image_size)\n', (526, 550), True, 'import torchvision.transforms as transforms\n'), ((568, 606), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (599, 606), True, 'import torchvision.transforms as transforms\n'), ((624, 645), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (643, 645), True, 'import torchvision.transforms as transforms\n'), ((729, 770), 'torchvision.transforms.Resize', 'transforms.Resize', (['config.data.image_size'], {}), '(config.data.image_size)\n', (746, 770), True, 'import torchvision.transforms as transforms\n'), ((772, 793), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (791, 793), True, 'import torchvision.transforms as transforms\n'), ((1369, 1400), 'torch.from_numpy', 'torch.from_numpy', (['train_samples'], {}), '(train_samples)\n', (1385, 1400), False, 'import torch\n'), ((1410, 1440), 'torch.from_numpy', 'torch.from_numpy', (['train_labels'], {}), '(train_labels)\n', (1426, 1440), False, 'import torch\n'), ((1708, 1726), 'torch.rand_like', 'torch.rand_like', (['X'], {}), '(X)\n', (1723, 1726), False, 'import torch\n'), ((1795, 1814), 'torch.randn_like', 'torch.randn_like', (['X'], {}), '(X)\n', (1811, 1814), False, 'import torch\n')] |
from numpy import arange
from hex_to_decimal import hex_to_decimal
from decimal_to_hex import decimal_to_hex
from binary_to_decimal import binary_to_decimal
from decimal_to_binary import decimal_to_binary
from octal_to_decimal import octal_to_decimal
from decimal_to_octal import decimal_to_octal
from binary_to_hex import binary_to_hex
from binary_to_octal import binary_to_octal
from unittest import main, TestCase
from random import random, randint
class TestFunctions(TestCase):
def test_hex(self):
for value in arange(0, 100, 0.001):
value = random() * randint(0, 1000)
hex_ = decimal_to_hex(value)
number = hex_to_decimal(hex_)
self.assertEqual(number, value)
def test_binary(self):
for value in arange(0, 100, 0.001):
value = random() * randint(0, 1000)
binary_ = decimal_to_binary(value)
number = binary_to_decimal(binary_)
self.assertEqual(number, value)
def test_octal(self):
for value in arange(0, 100, 0.001):
value = random() * randint(0, 1000)
octal_ = decimal_to_octal(value)
number = octal_to_decimal(octal_)
self.assertEqual(number, value)
def test_binary_to_decimal(self):
for value in arange(0, 100, 0.001):
value = random() * randint(0, 1000)
binary_ = decimal_to_binary(value)
number = binary_to_decimal(binary_)
self.assertEqual(number, value)
def test_binary_to_hex(self):
for value in arange(0, 100, 0.001):
value = random() * randint(0, 1000)
binary_ = decimal_to_binary(value)
hex_ = binary_to_hex(binary_)
number = hex_to_decimal(hex_)
self.assertEqual(number, value)
def test_binary_to_octal(self):
for value in arange(0, 100, 0.001):
value = random() * randint(0, 1000)
binary_ = decimal_to_binary(value)
octal_ = binary_to_octal(binary_)
number = octal_to_decimal(octal_)
self.assertEqual(number, value)
if __name__ == '__main__':
main()
| [
"octal_to_decimal.octal_to_decimal",
"binary_to_decimal.binary_to_decimal",
"decimal_to_octal.decimal_to_octal",
"decimal_to_hex.decimal_to_hex",
"binary_to_octal.binary_to_octal",
"hex_to_decimal.hex_to_decimal",
"decimal_to_binary.decimal_to_binary",
"unittest.main",
"random.random",
"binary_to_... | [((2160, 2166), 'unittest.main', 'main', ([], {}), '()\n', (2164, 2166), False, 'from unittest import main, TestCase\n'), ((532, 553), 'numpy.arange', 'arange', (['(0)', '(100)', '(0.001)'], {}), '(0, 100, 0.001)\n', (538, 553), False, 'from numpy import arange\n'), ((779, 800), 'numpy.arange', 'arange', (['(0)', '(100)', '(0.001)'], {}), '(0, 100, 0.001)\n', (785, 800), False, 'from numpy import arange\n'), ((1037, 1058), 'numpy.arange', 'arange', (['(0)', '(100)', '(0.001)'], {}), '(0, 100, 0.001)\n', (1043, 1058), False, 'from numpy import arange\n'), ((1303, 1324), 'numpy.arange', 'arange', (['(0)', '(100)', '(0.001)'], {}), '(0, 100, 0.001)\n', (1309, 1324), False, 'from numpy import arange\n'), ((1569, 1590), 'numpy.arange', 'arange', (['(0)', '(100)', '(0.001)'], {}), '(0, 100, 0.001)\n', (1575, 1590), False, 'from numpy import arange\n'), ((1873, 1894), 'numpy.arange', 'arange', (['(0)', '(100)', '(0.001)'], {}), '(0, 100, 0.001)\n', (1879, 1894), False, 'from numpy import arange\n'), ((622, 643), 'decimal_to_hex.decimal_to_hex', 'decimal_to_hex', (['value'], {}), '(value)\n', (636, 643), False, 'from decimal_to_hex import decimal_to_hex\n'), ((665, 685), 'hex_to_decimal.hex_to_decimal', 'hex_to_decimal', (['hex_'], {}), '(hex_)\n', (679, 685), False, 'from hex_to_decimal import hex_to_decimal\n'), ((872, 896), 'decimal_to_binary.decimal_to_binary', 'decimal_to_binary', (['value'], {}), '(value)\n', (889, 896), False, 'from decimal_to_binary import decimal_to_binary\n'), ((918, 944), 'binary_to_decimal.binary_to_decimal', 'binary_to_decimal', (['binary_'], {}), '(binary_)\n', (935, 944), False, 'from binary_to_decimal import binary_to_decimal\n'), ((1129, 1152), 'decimal_to_octal.decimal_to_octal', 'decimal_to_octal', (['value'], {}), '(value)\n', (1145, 1152), False, 'from decimal_to_octal import decimal_to_octal\n'), ((1174, 1198), 'octal_to_decimal.octal_to_decimal', 'octal_to_decimal', (['octal_'], {}), '(octal_)\n', (1190, 1198), False, 'from octal_to_decimal import octal_to_decimal\n'), ((1396, 1420), 'decimal_to_binary.decimal_to_binary', 'decimal_to_binary', (['value'], {}), '(value)\n', (1413, 1420), False, 'from decimal_to_binary import decimal_to_binary\n'), ((1442, 1468), 'binary_to_decimal.binary_to_decimal', 'binary_to_decimal', (['binary_'], {}), '(binary_)\n', (1459, 1468), False, 'from binary_to_decimal import binary_to_decimal\n'), ((1662, 1686), 'decimal_to_binary.decimal_to_binary', 'decimal_to_binary', (['value'], {}), '(value)\n', (1679, 1686), False, 'from decimal_to_binary import decimal_to_binary\n'), ((1706, 1728), 'binary_to_hex.binary_to_hex', 'binary_to_hex', (['binary_'], {}), '(binary_)\n', (1719, 1728), False, 'from binary_to_hex import binary_to_hex\n'), ((1750, 1770), 'hex_to_decimal.hex_to_decimal', 'hex_to_decimal', (['hex_'], {}), '(hex_)\n', (1764, 1770), False, 'from hex_to_decimal import hex_to_decimal\n'), ((1966, 1990), 'decimal_to_binary.decimal_to_binary', 'decimal_to_binary', (['value'], {}), '(value)\n', (1983, 1990), False, 'from decimal_to_binary import decimal_to_binary\n'), ((2012, 2036), 'binary_to_octal.binary_to_octal', 'binary_to_octal', (['binary_'], {}), '(binary_)\n', (2027, 2036), False, 'from binary_to_octal import binary_to_octal\n'), ((2058, 2082), 'octal_to_decimal.octal_to_decimal', 'octal_to_decimal', (['octal_'], {}), '(octal_)\n', (2074, 2082), False, 'from octal_to_decimal import octal_to_decimal\n'), ((575, 583), 'random.random', 'random', ([], {}), '()\n', (581, 583), False, 'from random import random, randint\n'), ((586, 602), 'random.randint', 'randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (593, 602), False, 'from random import random, randint\n'), ((822, 830), 'random.random', 'random', ([], {}), '()\n', (828, 830), False, 'from random import random, randint\n'), ((833, 849), 'random.randint', 'randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (840, 849), False, 'from random import random, randint\n'), ((1080, 1088), 'random.random', 'random', ([], {}), '()\n', (1086, 1088), False, 'from random import random, randint\n'), ((1091, 1107), 'random.randint', 'randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (1098, 1107), False, 'from random import random, randint\n'), ((1346, 1354), 'random.random', 'random', ([], {}), '()\n', (1352, 1354), False, 'from random import random, randint\n'), ((1357, 1373), 'random.randint', 'randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (1364, 1373), False, 'from random import random, randint\n'), ((1612, 1620), 'random.random', 'random', ([], {}), '()\n', (1618, 1620), False, 'from random import random, randint\n'), ((1623, 1639), 'random.randint', 'randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (1630, 1639), False, 'from random import random, randint\n'), ((1916, 1924), 'random.random', 'random', ([], {}), '()\n', (1922, 1924), False, 'from random import random, randint\n'), ((1927, 1943), 'random.randint', 'randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (1934, 1943), False, 'from random import random, randint\n')] |
import unittest
class TestDynamicsModel(unittest.TestCase):
def setUp(self):
from muzero.models.dynamics_model import DynamicsModel
from muzero.environment.action import Action
import tensorflow as tf
self.dynamics_model = DynamicsModel()
self.batch_of_hidden_states = tf.ones([4, 3, 3, 1])
self.default_action = Action(0)
def test_output_shape(self):
import tensorflow as tf
import numpy as np
input_tensor = None
state_count = 0
for state in self.batch_of_hidden_states:
self.assertEqual(state.shape, (3, 3, 1))
action = tf.ones([3, 3, 1]) * self.default_action.action_id
state_with_action = tf.concat([state, action], axis=2)
if input_tensor is None:
input_tensor = np.array([state_with_action])
else:
input_tensor = tf.concat([input_tensor, [state_with_action]], axis=0)
state_count += 1
self.assertEqual(input_tensor.shape, (state_count, 3, 3, 2))
self.assertEqual(input_tensor.shape, (4, 3, 3, 2))
hidden_states, reward = self.dynamics_model(input_tensor, training=True)
assert hidden_states.shape == (4, 3, 3, 1)
class TestResentationModel(unittest.TestCase):
def test_output_shape(self):
from muzero.models.representation_model import RepresentationModel
import tensorflow as tf
# Atari
rep = RepresentationModel(game_mode='Atari')
hidden_state = rep(tf.ones([4, 96, 96, 128]))
self.assertEqual(hidden_state.shape, (4, 6, 6, 1))
# TicTocToe
rep = RepresentationModel(game_mode='BoardGame')
hidden_state = rep(tf.ones([4, 8, 8, 17]))
self.assertEqual(hidden_state.shape, (4, 8, 8, 1))
class TestPredictionModel(unittest.TestCase):
def test_output_shape(self):
from muzero.models.prediction_model import PredictionModel
import tensorflow as tf
num_action = 10
pred = PredictionModel(num_action)
value, policy = pred(tf.ones([3, 24, 24, 5]))
self.assertEqual(value.shape, (3, 1))
self.assertEqual(policy.shape, (3, num_action))
| [
"muzero.models.representation_model.RepresentationModel",
"tensorflow.ones",
"muzero.models.dynamics_model.DynamicsModel",
"tensorflow.concat",
"numpy.array",
"muzero.environment.action.Action",
"muzero.models.prediction_model.PredictionModel"
] | [((262, 277), 'muzero.models.dynamics_model.DynamicsModel', 'DynamicsModel', ([], {}), '()\n', (275, 277), False, 'from muzero.models.dynamics_model import DynamicsModel\n'), ((316, 337), 'tensorflow.ones', 'tf.ones', (['[4, 3, 3, 1]'], {}), '([4, 3, 3, 1])\n', (323, 337), True, 'import tensorflow as tf\n'), ((368, 377), 'muzero.environment.action.Action', 'Action', (['(0)'], {}), '(0)\n', (374, 377), False, 'from muzero.environment.action import Action\n'), ((1480, 1518), 'muzero.models.representation_model.RepresentationModel', 'RepresentationModel', ([], {'game_mode': '"""Atari"""'}), "(game_mode='Atari')\n", (1499, 1518), False, 'from muzero.models.representation_model import RepresentationModel\n'), ((1667, 1709), 'muzero.models.representation_model.RepresentationModel', 'RepresentationModel', ([], {'game_mode': '"""BoardGame"""'}), "(game_mode='BoardGame')\n", (1686, 1709), False, 'from muzero.models.representation_model import RepresentationModel\n'), ((2042, 2069), 'muzero.models.prediction_model.PredictionModel', 'PredictionModel', (['num_action'], {}), '(num_action)\n', (2057, 2069), False, 'from muzero.models.prediction_model import PredictionModel\n'), ((730, 764), 'tensorflow.concat', 'tf.concat', (['[state, action]'], {'axis': '(2)'}), '([state, action], axis=2)\n', (739, 764), True, 'import tensorflow as tf\n'), ((1546, 1571), 'tensorflow.ones', 'tf.ones', (['[4, 96, 96, 128]'], {}), '([4, 96, 96, 128])\n', (1553, 1571), True, 'import tensorflow as tf\n'), ((1737, 1759), 'tensorflow.ones', 'tf.ones', (['[4, 8, 8, 17]'], {}), '([4, 8, 8, 17])\n', (1744, 1759), True, 'import tensorflow as tf\n'), ((2099, 2122), 'tensorflow.ones', 'tf.ones', (['[3, 24, 24, 5]'], {}), '([3, 24, 24, 5])\n', (2106, 2122), True, 'import tensorflow as tf\n'), ((647, 665), 'tensorflow.ones', 'tf.ones', (['[3, 3, 1]'], {}), '([3, 3, 1])\n', (654, 665), True, 'import tensorflow as tf\n'), ((833, 862), 'numpy.array', 'np.array', (['[state_with_action]'], {}), '([state_with_action])\n', (841, 862), True, 'import numpy as np\n'), ((912, 966), 'tensorflow.concat', 'tf.concat', (['[input_tensor, [state_with_action]]'], {'axis': '(0)'}), '([input_tensor, [state_with_action]], axis=0)\n', (921, 966), True, 'import tensorflow as tf\n')] |
#Genel
import sys, os, math, csv, random, time, datetime, webbrowser, subprocess
#PyQt5
from PyQt5 import QtWidgets, QtCore, QtGui
from tasarim import Ui_MainWindow
from PyQt5.QtWidgets import QFileDialog, QMessageBox
#TensorFlow
import tensorflow as tf
import tensorflow.keras
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten
from tensorflow.keras.applications import MobileNetV2, ResNet50, InceptionV3
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from tensorflow.keras.utils import get_file
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from tensorflow.keras.callbacks import TensorBoard
#OpenCv
import cv2
#Yardımcı
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
class App(QtWidgets.QMainWindow):
def __init__(self): #Nesne oluşturuluyor
super(App, self).__init__()
self.ui=Ui_MainWindow()
self.ui.setupUi(self)
self.setWindowIcon(QtGui.QIcon("assets\logo.png"))
self.CATEGORIES = ["A", "B", "C", "Ç", "D", "E", "F", "G", "Ğ", "H", "I", "İ", "J", "K", "L", "M", "N", "O", "Ö", "P", "R", "S", "Ş", "T", "U", "Ü", "V", "Y", "Z"]
self.subdirs=[]
self.dircounts=[]
self.piccount=0
self.test_o=0
self.egitim_o=0
self.dataset=""
self.csv_info()
self.yontem="Özel CNN ağı"
self.model_ad_yontem="Ozel-CNN-Agi"
self.epochs=10
self.batch_size=64
self.trasfer_model_path="MobileNetV2"
self.Model_adi=""
self.callbacks=[]
self.sub_process_check=False
self.test_model_path=""
self.test_image=""
check=False
for path, subdirs, files in os.walk("Models"):
for name in files:
if(not check):
self.ui.pushButton_7.setEnabled(True)
self.ui.pushButton_6.setEnabled(True)
self.ui.pushButton_10.setEnabled(True)
self.test_model_path="Models/"+name
check=True
self.ui.comboBox_3.addItem(name)
self.ui.pushButton.clicked.connect(self.dataset_folder)
self.ui.pushButton_2.clicked.connect(self.show_folder_info)
self.ui.pushButton_3.clicked.connect(self.split_dataset)
self.ui.pushButton_4.clicked.connect(self.csv_veriseti)
self.ui.comboBox.currentTextChanged.connect(self.yontem_sec)
self.ui.comboBox_2.currentTextChanged.connect(self.tl_yontem)
self.ui.radioButton.clicked.connect(self.radiobtn_durum)
self.ui.radioButton_2.clicked.connect(self.radiobtn_durum)
self.ui.radioButton_3.clicked.connect(self.radiobtn_durum)
self.ui.radioButton_4.clicked.connect(self.radiobtn_durum)
self.ui.pushButton_5.clicked.connect(self.egit)
self.ui.pushButton_9.clicked.connect(self.model_ad)
self.ui.checkBox.stateChanged.connect(self.ek_ozellikler)
self.ui.checkBox_2.stateChanged.connect(self.ek_ozellikler)
self.ui.spinBox_2.valueChanged.connect(self.radiobtn_durum)
self.ui.spinBox_3.valueChanged.connect(self.radiobtn_durum)
self.ui.spinBox_4.valueChanged.connect(self.radiobtn_durum)
self.ui.spinBox_5.valueChanged.connect(self.radiobtn_durum)
self.ui.comboBox_3.currentTextChanged.connect(self.test_model)
self.ui.pushButton_6.clicked.connect(self.test)
self.ui.pushButton_7.clicked.connect(self.tensorboard_log)
self.ui.pushButton_10.clicked.connect(self.model_donustur)
#Veriseti
def dataset_folder(self):
dialog = QFileDialog()
self.dataset = dialog.getExistingDirectory(self, 'Veri Seti Klasörü')
if(self.dataset!=""):
self.piccount=0
ctr=False
ct=0
for path, subdirs, files in os.walk(self.dataset):
if(not(ctr)):
self.subdirs=subdirs
ctr=True
#print(subdirs)
for name in files:
ct+=1
self.piccount+=1
#print(os.path.join(path, name))
self.dircounts.append(ct)
ct=0
self.dircounts.pop(0)
print("Dataset yol:", self.dataset)
print("Resim sayisi: ", self.piccount)
p_c=str(self.piccount)
c_c=str(len(self.subdirs))
self.ui.label_26.setText("Resim sayısı: "+p_c)
self.ui.label_27.setText("Class sayısı: "+c_c)
self.ui.label_37.setText("Resim sayısı: "+p_c)
self.ui.label_38.setText("Class sayısı: "+c_c)
if(self.piccount> 0):
self.ui.pushButton_2.setEnabled(True)
self.ui.pushButton_3.setEnabled(True)
else:
print("Veri seti yok")
def show_folder_info(self):
plt.bar(self.subdirs, self.dircounts, color ='blue', width = 0.4)
plt.title("Veri Seti İçeriği")
plt.xlabel("Sınıflar")
plt.ylabel("Örnek sayısı")
plt.show()
def split_dataset(self):
self.test_o=int(self.ui.spinBox.text())
self.egitim_o= 100 - int(self.ui.spinBox.text())
# if(self.test_o+self.validasyon_o>100):
# print("Hata")
# msg = QMessageBox()
# msg.setWindowTitle("Tutorial on PyQt5")
# msg.setText("This is the main text!")
# msg.setIcon(QMessageBox.Critical)
# x = msg.exec_()
# self.ui.spinBox.setValue(1)
# self.split_dataset()
self.ui.label_6.setText(str(math.floor(self.piccount*(int(self.ui.spinBox.text())/100))))
self.ui.label_7.setText(str(self.piccount-(int(self.ui.label_6.text()))))
self.ui.pushButton_4.setEnabled(True)
self.ui.label_50.setText("Test oranı: "+str(self.test_o)+"%, Eğitim oranı: "+str(self.egitim_o)+"%")
self.ui.label_52.setText("Test oranı: "+str(self.test_o)+"%, Eğitim oranı: "+str(self.egitim_o)+"%")
def csv_info(self):
for path, subdirs, files in os.walk("csv_dataset"):
for name in files:
if(len(files)>0):
ds_adi=str(name)
o_t=str(time.ctime(os.path.getctime(os.path.join(path, name))))
g_t=str(time.ctime(os.path.getmtime(os.path.join(path, name))))
self.ui.label_28.setText("Adı: "+ds_adi)
self.ui.label_29.setText("Oluşturulma tarihi: "+o_t)
self.ui.label_30.setText("Son güncelleme tarihi: "+g_t)
self.ui.label_39.setText("Adı: "+ds_adi)
self.ui.label_40.setText("Oluşturulma tarihi: "+o_t)
self.ui.label_41.setText("Son güncelleme tarihi: "+g_t)
def csv_veriseti(self):
self.ui.label_24.setText("CSV Veri seti oluşturuluyor...")
print("----- Resimler düzenlenmeye başlanıyor -----")
with open('csv_dataset\csv_dataset.csv', 'w', newline='') as f:
sutunadlari=['letter', 'pixels', 'status']
ekleme=csv.DictWriter(f, fieldnames=sutunadlari)
ekleme.writeheader()
self.convert(0, ekleme)
def convert(self, value, ekleme):
print("----- İşlenen sınıf: "+ self.CATEGORIES[value]+" -----")
for filename in os.listdir(self.dataset+"\\" + self.CATEGORIES[value]):
full_path=self.dataset+"\\" + self.CATEGORIES[value] + "\\" + filename
print("Resim: ",full_path)
image = Image.open(full_path).convert('L')
new_image = image.resize((48, 48))
#new_image.save(filename+'.jpg') düzenlenmiş resmi kayıt etmek için
data = list(new_image.getdata())
durumlar=["Training", "Test"]
durum= random.choices(durumlar, weights = [self.egitim_o, self.test_o])
ekleme.writerow({'letter': value, 'pixels': str(data).strip('[]').replace(',', ''), 'status': str(durum).strip('[]').replace("'", "")})
if(value!=len(self.CATEGORIES)-1):
value+=1
self.convert(value, ekleme)
else:
self.ui.label_24.setText("İşlem tamam")
self.csv_info()
#Eğitim
def radiobtn_durum(self):
if(self.ui.comboBox.currentText()=="Özel CNN ağı"):
if self.ui.radioButton.isChecked():
self.ui.groupBox_6.setEnabled(True)
self.ui.label_19.setText("Epochs: "+ str(self.ui.spinBox_2.text()))
self.ui.label_21.setText("Batch size: "+ str(self.ui.spinBox_3.text()))
else:
self.ui.groupBox_6.setEnabled(False)
self.ui.label_19.setText("Epochs: 10")
self.ui.label_21.setText("Batch size: 64")
else:
if self.ui.radioButton_3.isChecked():
self.ui.groupBox_9.setEnabled(True)
self.ui.label_19.setText("Epochs: "+ str(self.ui.spinBox_4.text()))
self.ui.label_21.setText("Batch size: "+ str(self.ui.spinBox_5.text()))
else:
self.ui.groupBox_9.setEnabled(False)
self.ui.label_19.setText("Epochs: 10")
self.ui.label_21.setText("Batch size: 64")
def yontem_sec(self):
text=self.ui.comboBox.currentText()
if(text=="Transfer öğrenme"):
self.yontem="Transfer öğrenme"
self.model_ad_yontem="Transfer-Ogrenme"
self.ui.groupBox_5.setEnabled(False)
self.ui.groupBox_8.setEnabled(True)
self.ui.label_16.setText("Yöntem: "+ self.yontem+"("+self.ui.comboBox_2.currentText()+")")
else:
self.yontem="Özel CNN ağı"
self.model_ad_yontem="Ozel-CNN-Agi"
self.ui.groupBox_8.setEnabled(False)
self.ui.groupBox_5.setEnabled(True)
self.ui.label_16.setText("Yöntem: "+ self.yontem)
def tl_yontem(self):
self.ui.label_16.setText("Yöntem: "+ self.yontem+"("+str(self.ui.comboBox_2.currentText())+")")
def model_ad(self):
if(self.ui.lineEdit.text()!="" and self.dataset!="" and self.test_o!=0):
self.ui.label_17.setText("Tam ad: "+self.ui.lineEdit.text()+"-"+self.model_ad_yontem+".h5")
self.Model_adi=self.ui.lineEdit.text()+"-"+self.model_ad_yontem
self.ui.pushButton_5.setEnabled(True)
self.ui.label_47.setStyleSheet("color: green;")
self.ui.label_47.setText("Hazır")
else:
self.ui.pushButton_5.setEnabled(False)
self.ui.label_47.setStyleSheet("color: red;")
self.ui.label_47.setText("Eğitim için bazı alanlar eksik")
def ek_ozellikler(self):
self.callbacks.clear()
if(self.ui.checkBox.isChecked()):
self.callbacks.append(tf.keras.callbacks.EarlyStopping(monitor='accuracy', patience=int(self.ui.spinBox_6.text())))
print("EarlyStoping")
if(self.ui.checkBox_2.isChecked()):
self.callbacks.append(tf.keras.callbacks.ModelCheckpoint(filepath="Models/"+ self.Model_adi+".h5", verbose=1, monitor='val_accuracy', mode='max',save_best_only=True))
print("ModelCheckpoint")
def egit(self):
num_classes = 29
print("=====EĞİTİM BAŞLIYOR======")
if(self.yontem=="Özel CNN ağı"):
if self.ui.radioButton.isChecked():
self.epochs=int(self.ui.spinBox_2.text())
self.batch_size=int(self.ui.spinBox_3.text())
else:
self.epochs=10
self.batch_size=64
print("Yöntem: ",self.yontem)
print("Test oranı: ",self.test_o)
print("epoch: ",self.epochs)
print("Batchsize: ",self.batch_size)
print("----------------------------------------------------------------")
#Ön işlemler
with open("csv_dataset\csv_dataset.csv") as f:
content = f.readlines()
lines = np.array(content)
num_of_instances = lines.size
print("-------------------------------------------")
print("Resim sayısı: ",num_of_instances)
print("Resimlerin boyutları: ",len(lines[1].split(",")[1].split(" ")))
print("-------------------------------------------")
x_train, y_train, x_test, y_test = [], [], [], []
# Test ve eğitim verisinin transfer edilmesi
for i in range(1,num_of_instances):
letter, img, status = lines[i].split(",")
val = img.split(" ")
pixels = np.array(val, 'float32')
letter = tensorflow.keras.utils.to_categorical(letter, num_classes)
if 'Training' in status:
y_train.append(letter)
x_train.append(pixels)
elif 'Test' in status:
y_test.append(letter)
x_test.append(pixels)
# Eğtitim ve test kümelerinin diziye aktarılıyor
x_train = np.array(x_train, 'float32')
y_train = np.array(y_train, 'float32')
x_test = np.array(x_test, 'float32')
y_test = np.array(y_test, 'float32')
x_train /= 255 # [0, 1] aralığına normalize etme işlemi
x_test /= 255
x_train = x_train.reshape(x_train.shape[0], 48, 48, 1)
x_train = x_train.astype('float32')
x_test = x_test.reshape(x_test.shape[0], 48, 48, 1)
x_test = x_test.astype('float32')
print("-------------------------------------------")
print(x_train.shape[0], 'Eğitim Sayısı')
print(x_test.shape[0], 'Test Sayısı')
print("-------------------------------------------")
model = Sequential()
#1. Katamn
model.add(Conv2D(64, (5, 5), activation='relu', input_shape=(48,48,1)))
model.add(MaxPooling2D(pool_size=(5,5), strides=(2, 2)))
#2. Katamn
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(AveragePooling2D(pool_size=(3,3), strides=(2, 2)))
#3. Katamn
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(AveragePooling2D(pool_size=(3,3), strides=(2, 2)))
model.add(Flatten())
# Tam bağlantı katmanı
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
#Tensorboard
self.callbacks.append(TensorBoard(log_dir="TensorBoard_logs/"+self.Model_adi+"-log", histogram_freq=1, write_graph=True, update_freq='epoch', profile_batch=2, embeddings_freq=1))
model.compile(loss='categorical_crossentropy', optimizer=tensorflow.keras.optimizers.Adam(), metrics=['accuracy'])
model.fit(x_train, y_train, epochs=self.epochs, batch_size=self.batch_size, validation_data=(x_test, y_test), callbacks=self.callbacks)
model.save("Models/"+self.Model_adi+".h5")
self.ui.label_47.setText("Model eğitildi ve kayıt edildi")
if(self.yontem=="Transfer öğrenme"):
self.trasfer_model=self.ui.comboBox_2.currentText()
if self.ui.radioButton_3.isChecked():
self.epochs=int(self.ui.spinBox_4.text())
self.batch_size=int(self.ui.spinBox_5.text())
else:
self.epochs=10
self.batch_size=64
print("Yöntem: ", self.yontem)
print("Model: ", self.trasfer_model)
print("Test oranı: ", self.test_o)
print("epoch: ", self.epochs)
print("Batchsize: ", self.batch_size)
print("----------------------------------------------------------------")
#Ön işlemler
if(self.trasfer_model!="InceptionV3"):
IMAGE_SHAPE = (224, 224, 3)
else:
IMAGE_SHAPE = (299, 299, 3)
CLASS_NAMES = np.array(self.CATEGORIES)
# 20% validation set 80% training set
image_generator = ImageDataGenerator(rescale=1/255, validation_split=self.test_o/100)
train_data_gen = image_generator.flow_from_directory(directory=self.dataset, batch_size=self.batch_size, classes=list(CLASS_NAMES), target_size=(IMAGE_SHAPE[0], IMAGE_SHAPE[1]), shuffle=True, subset="training")
test_data_gen = image_generator.flow_from_directory(directory=self.dataset, batch_size=self.batch_size, classes=list(CLASS_NAMES), target_size=(IMAGE_SHAPE[0], IMAGE_SHAPE[1]), shuffle=True, subset="validation")
if(self.trasfer_model=="MobileNetV2"):
model = MobileNetV2(input_shape=IMAGE_SHAPE)
if(self.trasfer_model=="ResNet50"):
model = ResNet50(input_shape=IMAGE_SHAPE)
if(self.trasfer_model=="InceptionV3"):
model = InceptionV3(input_shape=IMAGE_SHAPE)
# remove the last fully connected layer
model.layers.pop()
# freeze all the weights of the model except the last 4 layers
for layer in model.layers[:-4]:
layer.trainable = False
output = Dense(num_classes, activation="softmax")
# connect that dense layer to the model
output = output(model.layers[-1].output)
model = Model(inputs=model.inputs, outputs=output)
# print the summary of the model architecture
#model.summary()
# training the model using adam optimizer
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
training_steps_per_epoch = np.ceil(train_data_gen.samples / self.batch_size)
validation_steps_per_epoch = np.ceil(test_data_gen.samples / self.batch_size)
#Tensorboard
self.callbacks.append(TensorBoard(log_dir="TensorBoard_logs/"+self.Model_adi+"-log", histogram_freq=1, write_graph=True, update_freq='epoch', profile_batch=2, embeddings_freq=1))
hist=model.fit_generator(train_data_gen, steps_per_epoch=training_steps_per_epoch, validation_data=test_data_gen, validation_steps=validation_steps_per_epoch, epochs=self.epochs, callbacks=self.callbacks)
model.save("Models/"+self.Model_adi+".h5")
self.ui.label_47.setText("Model eğitildi ve kayıt edildi")
self.ui.comboBox_3.clear()
cb_check=0
for path, subdirs, files in os.walk("Models"):
#print("Dosyalar: ", files)
for name in files:
#print(os.path.join(path, name))
if(cb_check==0):
self.test_model_path="Models/"+name
self.ui.pushButton_7.setEnabled(True)
self.ui.pushButton_6.setEnabled(True)
self.ui.pushButton_10.setEnabled(True)
cb_check=cb_check+1
self.ui.comboBox_3.addItem(name)
#Test ve bilgi
def test_model(self):
self.test_model_path="Models/"+self.ui.comboBox_3.currentText()
def tensorboard_log(self):
if(self.sub_process_check==False):
webbrowser.open('http://localhost:6006/', new=1, autoraise=True)
self.sub_process=subprocess.Popen("tensorboard --logdir=TensorBoard_logs\\"+self.test_model_path.replace("Models/","").replace(".h5","")+"-log", creationflags=subprocess.CREATE_NEW_CONSOLE)
self.sub_process_check=True
if(self.sub_process_check==True):
self.sub_process.terminate()
self.sub_process=subprocess.Popen("tensorboard --logdir=TensorBoard_logs\\"+self.test_model_path.replace("Models/","").replace(".h5","")+"-log", creationflags=subprocess.CREATE_NEW_CONSOLE)
def model_bilgi(self, model, img):
s = self.test_model_path
find = ['Ozel-CNN-Agi', 'Transfer-Ogrenme',]
results = [item for item in find if item in s]
print(results)
if(results[0]=="Ozel-CNN-Agi"):
self.test_image = image.load_img(img, grayscale=True, target_size=(48, 48))
self.test_image = image.img_to_array(self.test_image)
self.test_image = np.expand_dims(self.test_image, axis = 0)
self.test_image /= 255
else:
self.test_image = load_img(img, target_size=(224, 224)) #Incention V3
self.test_image = img_to_array(self.test_image)
self.test_image = self.test_image.reshape((1, self.test_image.shape[0], self.test_image.shape[1], self.test_image.shape[2]))
predictions = model.predict(self.test_image)
harf=np.argmax(predictions[0])
skorlar = predictions[0].argsort()[-len(predictions[0]):][::-1]
max_score = 0.0
for i in skorlar:
score = predictions[0][i]
if score > max_score:
max_score = score
return self.CATEGORIES[harf], round(max_score*100,2)
def test(self):
secilen_model=tf.keras.models.load_model(self.test_model_path)
cap = cv2.VideoCapture(0)
# cap.set(3, 1280)
# cap.set(4, 720)
while True:
#kamera açlılıyor ve dondürülüyor
ret, img = cap.read()
img = cv2.flip(img, 1)
#çerçevenin bilgileri alınıyor
height, width = img.shape[:2]
#kırpılma alanı
#print(height, width)
x1, y1, x2, y2 = 313, 71, 600, 346
#kare çiziliyor
cv2.rectangle(img , (313, 71), (600, 346), (0,255,0) , 2) #sol üst x-y sağ alt x-y
#resim kırpılıyor ve gray oluor
img_cropped = img[y1:y2,x1:x2]
gray = cv2.cvtColor(img_cropped, cv2.COLOR_BGR2GRAY)
#resim kaydediliyor
cv2.imwrite("frame.png", gray)
tahmin, orani=self.model_bilgi(secilen_model ,"frame.png")
print("----------------------------")
print("HARF:" + tahmin)
#resime yazı yazılıyor
#cv2.putText(img, "Cerceve Bilgileri: "+str(width)+"x"+str(height), (0, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
cv2.putText(img, "Tahmin: "+str(tahmin)+" "+ str(orani)+"%", (315, 375), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
#Pencereler gösteriliyor
cv2.imshow("Ana kamera", img)
cv2.imshow("Islenen alan", gray)
if cv2.waitKey(50) & 0xFF == ord('x'):
break
cap.release()
cv2.destroyAllWindows()
def model_donustur(self):
tflite_model = tf.keras.models.load_model(self.test_model_path)
converter = tf.lite.TFLiteConverter.from_keras_model(tflite_model)
tflite_model = converter.convert()
open("TFL_models/"+self.test_model_path.replace("Models/", "").replace(".h5", "")+".tflite", "wb").write(tflite_model)
self.ui.label_53.setText("Model .tflite dosyasına cevirildi ve kayıtedildi")
#Pencereyi göstermek için
def pencere():
pencere=QtWidgets.QApplication(sys.argv)
win=App()
win.show()
sys.exit(pencere.exec_())
pencere()
| [
"csv.DictWriter",
"cv2.rectangle",
"tensorflow.keras.preprocessing.image.resize",
"PyQt5.QtGui.QIcon",
"matplotlib.pyplot.ylabel",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"webbrowser.open",
"cv2.imshow",
"numpy.array",
"random.choices",
"tensorflow.keras.layers.Dense",
"tens... | [((24653, 24685), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (24675, 24685), False, 'from PyQt5 import QtWidgets, QtCore, QtGui\n'), ((1104, 1119), 'tasarim.Ui_MainWindow', 'Ui_MainWindow', ([], {}), '()\n', (1117, 1119), False, 'from tasarim import Ui_MainWindow\n'), ((1975, 1992), 'os.walk', 'os.walk', (['"""Models"""'], {}), "('Models')\n", (1982, 1992), False, 'import sys, os, math, csv, random, time, datetime, webbrowser, subprocess\n'), ((3909, 3922), 'PyQt5.QtWidgets.QFileDialog', 'QFileDialog', ([], {}), '()\n', (3920, 3922), False, 'from PyQt5.QtWidgets import QFileDialog, QMessageBox\n'), ((5236, 5298), 'matplotlib.pyplot.bar', 'plt.bar', (['self.subdirs', 'self.dircounts'], {'color': '"""blue"""', 'width': '(0.4)'}), "(self.subdirs, self.dircounts, color='blue', width=0.4)\n", (5243, 5298), True, 'import matplotlib.pyplot as plt\n'), ((5320, 5350), 'matplotlib.pyplot.title', 'plt.title', (['"""Veri Seti İçeriği"""'], {}), "('Veri Seti İçeriği')\n", (5329, 5350), True, 'import matplotlib.pyplot as plt\n'), ((5360, 5382), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sınıflar"""'], {}), "('Sınıflar')\n", (5370, 5382), True, 'import matplotlib.pyplot as plt\n'), ((5392, 5418), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Örnek sayısı"""'], {}), "('Örnek sayısı')\n", (5402, 5418), True, 'import matplotlib.pyplot as plt\n'), ((5436, 5446), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5444, 5446), True, 'import matplotlib.pyplot as plt\n'), ((6510, 6532), 'os.walk', 'os.walk', (['"""csv_dataset"""'], {}), "('csv_dataset')\n", (6517, 6532), False, 'import sys, os, math, csv, random, time, datetime, webbrowser, subprocess\n'), ((7860, 7916), 'os.listdir', 'os.listdir', (["(self.dataset + '\\\\' + self.CATEGORIES[value])"], {}), "(self.dataset + '\\\\' + self.CATEGORIES[value])\n", (7870, 7916), False, 'import sys, os, math, csv, random, time, datetime, webbrowser, subprocess\n'), ((19809, 19826), 'os.walk', 'os.walk', (['"""Models"""'], {}), "('Models')\n", (19816, 19826), False, 'import sys, os, math, csv, random, time, datetime, webbrowser, subprocess\n'), ((22021, 22046), 'numpy.argmax', 'np.argmax', (['predictions[0]'], {}), '(predictions[0])\n', (22030, 22046), True, 'import numpy as np\n'), ((22407, 22455), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['self.test_model_path'], {}), '(self.test_model_path)\n', (22433, 22455), True, 'import tensorflow as tf\n'), ((22479, 22498), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (22495, 22498), False, 'import cv2\n'), ((24026, 24049), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (24047, 24049), False, 'import cv2\n'), ((24104, 24152), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['self.test_model_path'], {}), '(self.test_model_path)\n', (24130, 24152), True, 'import tensorflow as tf\n'), ((24173, 24227), 'tensorflow.lite.TFLiteConverter.from_keras_model', 'tf.lite.TFLiteConverter.from_keras_model', (['tflite_model'], {}), '(tflite_model)\n', (24213, 24227), True, 'import tensorflow as tf\n'), ((1178, 1209), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', (['"""assets\\\\logo.png"""'], {}), "('assets\\\\logo.png')\n", (1189, 1209), False, 'from PyQt5 import QtWidgets, QtCore, QtGui\n'), ((4160, 4181), 'os.walk', 'os.walk', (['self.dataset'], {}), '(self.dataset)\n', (4167, 4181), False, 'import sys, os, math, csv, random, time, datetime, webbrowser, subprocess\n'), ((7588, 7629), 'csv.DictWriter', 'csv.DictWriter', (['f'], {'fieldnames': 'sutunadlari'}), '(f, fieldnames=sutunadlari)\n', (7602, 7629), False, 'import sys, os, math, csv, random, time, datetime, webbrowser, subprocess\n'), ((8176, 8198), 'tensorflow.keras.preprocessing.image.resize', 'image.resize', (['(48, 48)'], {}), '((48, 48))\n', (8188, 8198), False, 'from tensorflow.keras.preprocessing import image\n'), ((8431, 8493), 'random.choices', 'random.choices', (['durumlar'], {'weights': '[self.egitim_o, self.test_o]'}), '(durumlar, weights=[self.egitim_o, self.test_o])\n', (8445, 8493), False, 'import sys, os, math, csv, random, time, datetime, webbrowser, subprocess\n'), ((12747, 12764), 'numpy.array', 'np.array', (['content'], {}), '(content)\n', (12755, 12764), True, 'import numpy as np\n'), ((13899, 13927), 'numpy.array', 'np.array', (['x_train', '"""float32"""'], {}), "(x_train, 'float32')\n", (13907, 13927), True, 'import numpy as np\n'), ((13950, 13978), 'numpy.array', 'np.array', (['y_train', '"""float32"""'], {}), "(y_train, 'float32')\n", (13958, 13978), True, 'import numpy as np\n'), ((14000, 14027), 'numpy.array', 'np.array', (['x_test', '"""float32"""'], {}), "(x_test, 'float32')\n", (14008, 14027), True, 'import numpy as np\n'), ((14049, 14076), 'numpy.array', 'np.array', (['y_test', '"""float32"""'], {}), "(y_test, 'float32')\n", (14057, 14076), True, 'import numpy as np\n'), ((14664, 14676), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (14674, 14676), False, 'from tensorflow.keras.models import Sequential, Model\n'), ((17248, 17273), 'numpy.array', 'np.array', (['self.CATEGORIES'], {}), '(self.CATEGORIES)\n', (17256, 17273), True, 'import numpy as np\n'), ((17354, 17425), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1 / 255)', 'validation_split': '(self.test_o / 100)'}), '(rescale=1 / 255, validation_split=self.test_o / 100)\n', (17372, 17425), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array\n'), ((18502, 18542), 'tensorflow.keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (18507, 18542), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten\n'), ((18681, 18723), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'model.inputs', 'outputs': 'output'}), '(inputs=model.inputs, outputs=output)\n', (18686, 18723), False, 'from tensorflow.keras.models import Sequential, Model\n'), ((19016, 19065), 'numpy.ceil', 'np.ceil', (['(train_data_gen.samples / self.batch_size)'], {}), '(train_data_gen.samples / self.batch_size)\n', (19023, 19065), True, 'import numpy as np\n'), ((19107, 19155), 'numpy.ceil', 'np.ceil', (['(test_data_gen.samples / self.batch_size)'], {}), '(test_data_gen.samples / self.batch_size)\n', (19114, 19155), True, 'import numpy as np\n'), ((20537, 20601), 'webbrowser.open', 'webbrowser.open', (['"""http://localhost:6006/"""'], {'new': '(1)', 'autoraise': '(True)'}), "('http://localhost:6006/', new=1, autoraise=True)\n", (20552, 20601), False, 'import sys, os, math, csv, random, time, datetime, webbrowser, subprocess\n'), ((21422, 21479), 'tensorflow.keras.preprocessing.image.load_img', 'image.load_img', (['img'], {'grayscale': '(True)', 'target_size': '(48, 48)'}), '(img, grayscale=True, target_size=(48, 48))\n', (21436, 21479), False, 'from tensorflow.keras.preprocessing import image\n'), ((21510, 21545), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['self.test_image'], {}), '(self.test_image)\n', (21528, 21545), False, 'from tensorflow.keras.preprocessing import image\n'), ((21576, 21615), 'numpy.expand_dims', 'np.expand_dims', (['self.test_image'], {'axis': '(0)'}), '(self.test_image, axis=0)\n', (21590, 21615), True, 'import numpy as np\n'), ((21697, 21734), 'tensorflow.keras.preprocessing.image.load_img', 'load_img', (['img'], {'target_size': '(224, 224)'}), '(img, target_size=(224, 224))\n', (21705, 21734), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array\n'), ((21779, 21808), 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['self.test_image'], {}), '(self.test_image)\n', (21791, 21808), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array\n'), ((22670, 22686), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (22678, 22686), False, 'import cv2\n'), ((22961, 23018), 'cv2.rectangle', 'cv2.rectangle', (['img', '(313, 71)', '(600, 346)', '(0, 255, 0)', '(2)'], {}), '(img, (313, 71), (600, 346), (0, 255, 0), 2)\n', (22974, 23018), False, 'import cv2\n'), ((23176, 23221), 'cv2.cvtColor', 'cv2.cvtColor', (['img_cropped', 'cv2.COLOR_BGR2GRAY'], {}), '(img_cropped, cv2.COLOR_BGR2GRAY)\n', (23188, 23221), False, 'import cv2\n'), ((23279, 23309), 'cv2.imwrite', 'cv2.imwrite', (['"""frame.png"""', 'gray'], {}), "('frame.png', gray)\n", (23290, 23309), False, 'import cv2\n'), ((23834, 23863), 'cv2.imshow', 'cv2.imshow', (['"""Ana kamera"""', 'img'], {}), "('Ana kamera', img)\n", (23844, 23863), False, 'import cv2\n'), ((23876, 23908), 'cv2.imshow', 'cv2.imshow', (['"""Islenen alan"""', 'gray'], {}), "('Islenen alan', gray)\n", (23886, 23908), False, 'import cv2\n'), ((11749, 11900), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', ([], {'filepath': "('Models/' + self.Model_adi + '.h5')", 'verbose': '(1)', 'monitor': '"""val_accuracy"""', 'mode': '"""max"""', 'save_best_only': '(True)'}), "(filepath='Models/' + self.Model_adi +\n '.h5', verbose=1, monitor='val_accuracy', mode='max', save_best_only=True)\n", (11783, 11900), True, 'import tensorflow as tf\n'), ((13418, 13442), 'numpy.array', 'np.array', (['val', '"""float32"""'], {}), "(val, 'float32')\n", (13426, 13442), True, 'import numpy as np\n'), ((14723, 14785), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(5, 5)'], {'activation': '"""relu"""', 'input_shape': '(48, 48, 1)'}), "(64, (5, 5), activation='relu', input_shape=(48, 48, 1))\n", (14729, 14785), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D\n'), ((14807, 14853), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(5, 5)', 'strides': '(2, 2)'}), '(pool_size=(5, 5), strides=(2, 2))\n', (14819, 14853), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D\n'), ((14900, 14937), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (14906, 14937), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D\n'), ((14961, 14998), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (14967, 14998), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D\n'), ((15022, 15072), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', ([], {'pool_size': '(3, 3)', 'strides': '(2, 2)'}), '(pool_size=(3, 3), strides=(2, 2))\n', (15038, 15072), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D\n'), ((15119, 15157), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""'}), "(128, (3, 3), activation='relu')\n", (15125, 15157), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D\n'), ((15181, 15219), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""'}), "(128, (3, 3), activation='relu')\n", (15187, 15219), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D\n'), ((15243, 15293), 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', ([], {'pool_size': '(3, 3)', 'strides': '(2, 2)'}), '(pool_size=(3, 3), strides=(2, 2))\n', (15259, 15293), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D\n'), ((15317, 15326), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (15324, 15326), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten\n'), ((15386, 15416), 'tensorflow.keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (15391, 15416), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten\n'), ((15440, 15452), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (15447, 15452), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten\n'), ((15476, 15506), 'tensorflow.keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (15481, 15506), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten\n'), ((15530, 15542), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (15537, 15542), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten\n'), ((15567, 15607), 'tensorflow.keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (15572, 15607), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten\n'), ((15694, 15862), 'tensorflow.keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': "('TensorBoard_logs/' + self.Model_adi + '-log')", 'histogram_freq': '(1)', 'write_graph': '(True)', 'update_freq': '"""epoch"""', 'profile_batch': '(2)', 'embeddings_freq': '(1)'}), "(log_dir='TensorBoard_logs/' + self.Model_adi + '-log',\n histogram_freq=1, write_graph=True, update_freq='epoch', profile_batch=\n 2, embeddings_freq=1)\n", (15705, 15862), False, 'from tensorflow.keras.callbacks import TensorBoard\n'), ((17970, 18006), 'tensorflow.keras.applications.MobileNetV2', 'MobileNetV2', ([], {'input_shape': 'IMAGE_SHAPE'}), '(input_shape=IMAGE_SHAPE)\n', (17981, 18006), False, 'from tensorflow.keras.applications import MobileNetV2, ResNet50, InceptionV3\n'), ((18079, 18112), 'tensorflow.keras.applications.ResNet50', 'ResNet50', ([], {'input_shape': 'IMAGE_SHAPE'}), '(input_shape=IMAGE_SHAPE)\n', (18087, 18112), False, 'from tensorflow.keras.applications import MobileNetV2, ResNet50, InceptionV3\n'), ((18188, 18224), 'tensorflow.keras.applications.InceptionV3', 'InceptionV3', ([], {'input_shape': 'IMAGE_SHAPE'}), '(input_shape=IMAGE_SHAPE)\n', (18199, 18224), False, 'from tensorflow.keras.applications import MobileNetV2, ResNet50, InceptionV3\n'), ((19216, 19384), 'tensorflow.keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': "('TensorBoard_logs/' + self.Model_adi + '-log')", 'histogram_freq': '(1)', 'write_graph': '(True)', 'update_freq': '"""epoch"""', 'profile_batch': '(2)', 'embeddings_freq': '(1)'}), "(log_dir='TensorBoard_logs/' + self.Model_adi + '-log',\n histogram_freq=1, write_graph=True, update_freq='epoch', profile_batch=\n 2, embeddings_freq=1)\n", (19227, 19384), False, 'from tensorflow.keras.callbacks import TensorBoard\n'), ((8117, 8138), 'PIL.Image.open', 'Image.open', (['full_path'], {}), '(full_path)\n', (8127, 8138), False, 'from PIL import Image\n'), ((23937, 23952), 'cv2.waitKey', 'cv2.waitKey', (['(50)'], {}), '(50)\n', (23948, 23952), False, 'import cv2\n'), ((6692, 6716), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (6704, 6716), False, 'import sys, os, math, csv, random, time, datetime, webbrowser, subprocess\n'), ((6776, 6800), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (6788, 6800), False, 'import sys, os, math, csv, random, time, datetime, webbrowser, subprocess\n')] |
import numpy as np
import MITgcmutils as mit
#import xmitgcm as xmit
#import matplotlib.pyplot as plt
from scipy.interpolate import griddata
import os
import gc
from multiprocessing import Pool
#plt.ion()
#-- directories --
dir_grd12 = '/glade/p/univ/ufsu0011/runs/gridMIT_update1/'
dir_grd50 = '/glade/p/univ/ufsu0011/runs/chao50/gridMIT/'
dir_in = '/glade/p/univ/ufsu0011/data_in/atmo_cond_12'
dir_out = '/glade/p/univ/ufsu0011/data_in/atmo_cond_50'
iper = 2003
tmpdir = str('%s/%04i' % (dir_out, iper))
if not os.path.isdir(tmpdir):
os.makedirs( tmpdir )
#-- some parameters --
#[nr50, ny50, nx50] = [75, 1450, 1850]
[nr12, ny12, nx12] = [46, 900, 1000]
nt = 1460 # +2 time records for begin/end interp
varN = ['t2', 'q2', 'u10', 'v10', 'radsw', 'radlw', 'precip']
nvar = len(varN)
# number of processors to use
# (should agree with those requested when lauching the interactive session)
# i.e. if nprocs = 36 (one full node)
# qsub -I -l select=1:ncpus=36:mpiprocs=36
nproc=36
#------------------------------------------------------------------
# Make Atmospheric forcing files from our previous 1/12 runs
# -->> need to update with 6-hourly forcing files from original DFS
#------------------------------------------------------------------
#-- grid params --
# parent and child grid origin are co-localized
rSphere = 6370000.0
#- 1/50 -
x50deg = mit.rdmds(dir_grd50 + 'XC')
y50deg = mit.rdmds(dir_grd50 + 'YC')
[ny50, nx50] = x50deg.shape
xx50 = np.radians(x50deg - x50deg[0,0]) * rSphere * np.cos(np.radians(y50deg))
yy50 = np.radians(y50deg - y50deg[0,0]) * rSphere
#- 1/12 -
x12deg = mit.rdmds(dir_grd12 + 'XC')
y12deg = mit.rdmds(dir_grd12 + 'YC')
xx12 = np.radians(x12deg - x50deg[0,0]) * rSphere * np.cos(np.radians(y12deg))
yy12 = np.radians(y12deg - y50deg[0,0]) * rSphere
xy12 = np.zeros([(ny12)*(nx12), 2])
xy12[:, 0] = xx12.reshape([ny12*nx12])
xy12[:, 1] = yy12.reshape([ny12*nx12])
#-- define horizontal interpolation --
def hz_interp(iii):
print("Interpolating %s, time: %04i" % (varN[ivar], iii) )
tmp_interp = griddata(xy12, var12[iii, :, :].reshape([ny12*nx12]), (xx50, yy50), method=mmeth)
return tmp_interp
for ivar in range(nvar):
#ivar = 6
#-- input file --
if varN[ivar] == 'precip' and iper <= 1976:
f = open( str("%s/precip_climExtd.box" % (dir_in) ), 'r')
var12 = np.fromfile(f, '>f4').reshape([nt+2, ny12, nx12])
f.close()
else:
f = open( str("%s/%04i/%s_%04i.box" % (dir_in, iper, varN[ivar], iper) ), 'r')
var12 = np.fromfile(f, '>f4').reshape([nt+2, ny12, nx12])
f.close()
#- hz interp (with parallelization) -
if varN[ivar] == 'u10' or varN[ivar] == 'v10':
mmeth='cubic'
else:
mmeth='linear'
#
if __name__ == '__main__':
p = Pool(nproc)
tmp_var50 = p.map(hz_interp, np.arange(nt+2))
#- reshape -
var50 = np.zeros([nt+2, ny50, nx50])
for iii in range(nt+2):
var50[iii, :, :] = tmp_var50[iii]
#- save -
if varN[ivar] == 'precip' and iper <= 1976:
f = open( str("%s/precip_climExtd.bin" % (dir_out) ), 'wb' )
var50.reshape([(nt+2)*ny50*nx50]).astype('>f4').tofile(f)
f.close()
else:
f = open( str("%s/%04i/%s_%04i.bin" % (dir_out, iper, varN[ivar], iper) ), 'wb' )
var50.reshape([(nt+2)*ny50*nx50]).astype('>f4').tofile(f)
f.close()
#
del var12, var50, tmp_var50
gc.collect()
exit()
| [
"numpy.radians",
"numpy.fromfile",
"os.makedirs",
"numpy.zeros",
"os.path.isdir",
"multiprocessing.Pool",
"gc.collect",
"MITgcmutils.rdmds",
"numpy.arange"
] | [((1378, 1405), 'MITgcmutils.rdmds', 'mit.rdmds', (["(dir_grd50 + 'XC')"], {}), "(dir_grd50 + 'XC')\n", (1387, 1405), True, 'import MITgcmutils as mit\n'), ((1415, 1442), 'MITgcmutils.rdmds', 'mit.rdmds', (["(dir_grd50 + 'YC')"], {}), "(dir_grd50 + 'YC')\n", (1424, 1442), True, 'import MITgcmutils as mit\n'), ((1619, 1646), 'MITgcmutils.rdmds', 'mit.rdmds', (["(dir_grd12 + 'XC')"], {}), "(dir_grd12 + 'XC')\n", (1628, 1646), True, 'import MITgcmutils as mit\n'), ((1656, 1683), 'MITgcmutils.rdmds', 'mit.rdmds', (["(dir_grd12 + 'YC')"], {}), "(dir_grd12 + 'YC')\n", (1665, 1683), True, 'import MITgcmutils as mit\n'), ((1820, 1846), 'numpy.zeros', 'np.zeros', (['[ny12 * nx12, 2]'], {}), '([ny12 * nx12, 2])\n', (1828, 1846), True, 'import numpy as np\n'), ((517, 538), 'os.path.isdir', 'os.path.isdir', (['tmpdir'], {}), '(tmpdir)\n', (530, 538), False, 'import os\n'), ((542, 561), 'os.makedirs', 'os.makedirs', (['tmpdir'], {}), '(tmpdir)\n', (553, 561), False, 'import os\n'), ((1557, 1590), 'numpy.radians', 'np.radians', (['(y50deg - y50deg[0, 0])'], {}), '(y50deg - y50deg[0, 0])\n', (1567, 1590), True, 'import numpy as np\n'), ((1770, 1803), 'numpy.radians', 'np.radians', (['(y12deg - y50deg[0, 0])'], {}), '(y12deg - y50deg[0, 0])\n', (1780, 1803), True, 'import numpy as np\n'), ((2841, 2871), 'numpy.zeros', 'np.zeros', (['[nt + 2, ny50, nx50]'], {}), '([nt + 2, ny50, nx50])\n', (2849, 2871), True, 'import numpy as np\n'), ((3339, 3351), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3349, 3351), False, 'import gc\n'), ((1478, 1511), 'numpy.radians', 'np.radians', (['(x50deg - x50deg[0, 0])'], {}), '(x50deg - x50deg[0, 0])\n', (1488, 1511), True, 'import numpy as np\n'), ((1530, 1548), 'numpy.radians', 'np.radians', (['y50deg'], {}), '(y50deg)\n', (1540, 1548), True, 'import numpy as np\n'), ((1691, 1724), 'numpy.radians', 'np.radians', (['(x12deg - x50deg[0, 0])'], {}), '(x12deg - x50deg[0, 0])\n', (1701, 1724), True, 'import numpy as np\n'), ((1743, 1761), 'numpy.radians', 'np.radians', (['y12deg'], {}), '(y12deg)\n', (1753, 1761), True, 'import numpy as np\n'), ((2754, 2765), 'multiprocessing.Pool', 'Pool', (['nproc'], {}), '(nproc)\n', (2758, 2765), False, 'from multiprocessing import Pool\n'), ((2799, 2816), 'numpy.arange', 'np.arange', (['(nt + 2)'], {}), '(nt + 2)\n', (2808, 2816), True, 'import numpy as np\n'), ((2346, 2367), 'numpy.fromfile', 'np.fromfile', (['f', '""">f4"""'], {}), "(f, '>f4')\n", (2357, 2367), True, 'import numpy as np\n'), ((2515, 2536), 'numpy.fromfile', 'np.fromfile', (['f', '""">f4"""'], {}), "(f, '>f4')\n", (2526, 2536), True, 'import numpy as np\n')] |
from builtins import zip
import numpy as np
import cv2
from matplotlib import pyplot as plt
def drawMatches(img1, kp1, img2, kp2, matches):
"""
My own implementation of cv2.drawMatches as OpenCV 2.4.9
does not have this function available but it's supported in
OpenCV 3.0.0
This function takes in two images with their associated
keypoints, as well as a list of DMatch data structure (matches)
that contains which keypoints matched in which images.
An image will be produced where a montage is shown with
the first image followed by the second image beside it.
Keypoints are delineated with circles, while lines are connected
between matching keypoints.
imgf,imgb - Grayscale images
kp1,kp2 - Detected list of keypoints through any of the OpenCV keypoint
detection algorithms
matches - A list of matches of corresponding keypoints through any
OpenCV keypoint matching algorithm
"""
#http://stackoverflow.com/questions/20259025/module-object-has-no-attribute-drawmatches-opencv-python
# Create a new output image that concatenates the two images together
# (a.k.a) a montage
rows1 = img1.shape[0]
cols1 = img1.shape[1]
rows2 = img2.shape[0]
cols2 = img2.shape[1]
out = np.zeros((max([rows1,rows2]),cols1+cols2,3), dtype='uint8')
# Place the first image to the left
out[:rows1,:cols1,:] = np.dstack([img1, img1, img1])
# Place the next image to the right of it
out[:rows2,cols1:cols1+cols2,:] = np.dstack([img2, img2, img2])
# For each pair of points we have between both images
# draw circles, then connect a line between them
for mat in matches:
# Get the matching keypoints for each of the images
img1_idx = mat.queryIdx
img2_idx = mat.trainIdx
# x - columns
# y - rows
(x1,y1) = kp1[img1_idx].pt
(x2,y2) = kp2[img2_idx].pt
# Draw a small circle at both co-ordinates
# radius 4
# colour blue
# thickness = 1
cv2.circle(out, (int(x1),int(y1)), 4, (255, 0, 0), 1)
cv2.circle(out, (int(x2)+cols1,int(y2)), 4, (255, 0, 0), 1)
# Draw a line in between the two points
# thickness = 1
# colour blue
cv2.line(out, (int(x1),int(y1)), (int(x2)+cols1,int(y2)), (255, 0, 0), 1)
# Show the image
cv2.imshow('Matched Features', out)
cv2.waitKey(0)
cv2.destroyAllWindows()
return out
def filter_matches(kp1, kp2, matches, ratio = 0.75):
"""
This function applies a ratio test
:param kp1: raw keypoint 1
:param kp2: raw keypoint 2
:param matches: raw matches
:param ratio: filtering ratio
:return: filtered keypoint 1, filtered keypoint 2, keypoint pairs
"""
mkp1, mkp2 = [], []
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
m = m[0]
mkp1.append( kp1[m.queryIdx] ) # keypoint with Index of the descriptor in query descriptors
mkp2.append( kp2[m.trainIdx] ) # keypoint with Index of the descriptor in train descriptors
p1 = np.float32([kp.pt for kp in mkp1])
p2 = np.float32([kp.pt for kp in mkp2])
kp_pairs = list(zip(mkp1, mkp2))
return p1, p2, kp_pairs
fn1 = r'C:\Users\Davtoh\Dropbox\PYTHON\projects\Descriptors\Tests\im2_1.jpg' # queryImage
fn2 = r'C:\Users\Davtoh\Dropbox\PYTHON\projects\Descriptors\Tests\im2_2.jpg' # trainImage
img1 = cv2.resize(cv2.imread(fn1, 0), (800, 600))
img2 = cv2.resize(cv2.imread(fn2, 0), (800, 600))
sift = cv2.SIFT()
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
# BFMatcher with default params
bf = cv2.BFMatcher()
# Match descriptors.
kp_pairs = bf.match(des1,des2)
# Sort them in the order of their distance.
kp_pairs = sorted(kp_pairs, key = lambda x:x.distance)
# cv2.drawMatchesKnn expects list of lists as matches.
img3 = drawMatches(img1,kp1,img2,kp2,kp_pairs)
plt.imshow(img3),plt.show() | [
"matplotlib.pyplot.imshow",
"cv2.BFMatcher",
"numpy.dstack",
"matplotlib.pyplot.show",
"cv2.imshow",
"builtins.zip",
"cv2.SIFT",
"cv2.destroyAllWindows",
"cv2.waitKey",
"numpy.float32",
"cv2.imread"
] | [((3587, 3597), 'cv2.SIFT', 'cv2.SIFT', ([], {}), '()\n', (3595, 3597), False, 'import cv2\n'), ((3726, 3741), 'cv2.BFMatcher', 'cv2.BFMatcher', ([], {}), '()\n', (3739, 3741), False, 'import cv2\n'), ((1424, 1453), 'numpy.dstack', 'np.dstack', (['[img1, img1, img1]'], {}), '([img1, img1, img1])\n', (1433, 1453), True, 'import numpy as np\n'), ((1539, 1568), 'numpy.dstack', 'np.dstack', (['[img2, img2, img2]'], {}), '([img2, img2, img2])\n', (1548, 1568), True, 'import numpy as np\n'), ((2393, 2428), 'cv2.imshow', 'cv2.imshow', (['"""Matched Features"""', 'out'], {}), "('Matched Features', out)\n", (2403, 2428), False, 'import cv2\n'), ((2433, 2447), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2444, 2447), False, 'import cv2\n'), ((2452, 2475), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2473, 2475), False, 'import cv2\n'), ((3150, 3184), 'numpy.float32', 'np.float32', (['[kp.pt for kp in mkp1]'], {}), '([kp.pt for kp in mkp1])\n', (3160, 3184), True, 'import numpy as np\n'), ((3194, 3228), 'numpy.float32', 'np.float32', (['[kp.pt for kp in mkp2]'], {}), '([kp.pt for kp in mkp2])\n', (3204, 3228), True, 'import numpy as np\n'), ((3497, 3515), 'cv2.imread', 'cv2.imread', (['fn1', '(0)'], {}), '(fn1, 0)\n', (3507, 3515), False, 'import cv2\n'), ((3547, 3565), 'cv2.imread', 'cv2.imread', (['fn2', '(0)'], {}), '(fn2, 0)\n', (3557, 3565), False, 'import cv2\n'), ((3997, 4013), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img3'], {}), '(img3)\n', (4007, 4013), True, 'from matplotlib import pyplot as plt\n'), ((4014, 4024), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4022, 4024), True, 'from matplotlib import pyplot as plt\n'), ((3249, 3264), 'builtins.zip', 'zip', (['mkp1', 'mkp2'], {}), '(mkp1, mkp2)\n', (3252, 3264), False, 'from builtins import zip\n')] |
"""Make a heatmap of punctuation."""
import math
from string import punctuation
import nltk
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn as sns
# Install seaborn using: pip install seaborn.
PUNCT_SET = set(punctuation)
def main():
# Load text files into dictionary by author.
strings_by_author = dict()
strings_by_author['doyle'] = text_to_string('hound.txt')
strings_by_author['wells'] = text_to_string('war.txt')
strings_by_author['unknown'] = text_to_string('lost.txt')
# Tokenize text strings preserving only punctuation marks.
punct_by_author = make_punct_dict(strings_by_author)
# Convert punctuation marks to numerical values and plot heatmaps.
plt.ion()
for author in punct_by_author:
heat = convert_punct_to_number(punct_by_author, author)
arr = np.array((heat[:6561])) # trim to largest size for square array
arr_reshaped = arr.reshape(int(math.sqrt(len(arr))),
int(math.sqrt(len(arr))))
fig, ax = plt.subplots(figsize=(7, 7))
sns.heatmap(arr_reshaped,
cmap=ListedColormap(['blue', 'yellow']),
square=True,
ax=ax)
ax.set_title('Heatmap Semicolons {}'.format(author))
plt.show()
def text_to_string(filename):
"""Read a text file and return a string."""
with open(filename) as infile:
return infile.read()
def make_punct_dict(strings_by_author):
"""Return dictionary of tokenized punctuation by corpus by author."""
punct_by_author = dict()
for author in strings_by_author:
tokens = nltk.word_tokenize(strings_by_author[author])
punct_by_author[author] = ([token for token in tokens
if token in PUNCT_SET])
print("Number punctuation marks in {} = {}"
.format(author, len(punct_by_author[author])))
return punct_by_author
def convert_punct_to_number(punct_by_author, author):
"""Return list of punctuation marks converted to numerical values."""
heat_vals = []
for char in punct_by_author[author]:
if char == ';':
value = 1
else:
value = 2
heat_vals.append(value)
return heat_vals
if __name__ == '__main__':
main()
| [
"nltk.word_tokenize",
"matplotlib.colors.ListedColormap",
"numpy.array",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((762, 771), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (769, 771), True, 'import matplotlib.pyplot as plt\n'), ((1338, 1348), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1346, 1348), True, 'import matplotlib.pyplot as plt\n'), ((885, 906), 'numpy.array', 'np.array', (['heat[:6561]'], {}), '(heat[:6561])\n', (893, 906), True, 'import numpy as np\n'), ((1089, 1117), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (1101, 1117), True, 'import matplotlib.pyplot as plt\n'), ((1694, 1739), 'nltk.word_tokenize', 'nltk.word_tokenize', (['strings_by_author[author]'], {}), '(strings_by_author[author])\n', (1712, 1739), False, 'import nltk\n'), ((1177, 1211), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['blue', 'yellow']"], {}), "(['blue', 'yellow'])\n", (1191, 1211), False, 'from matplotlib.colors import ListedColormap\n')] |
from skimage.draw import line
import numpy as np
import cv2
import matplotlib.pyplot as plt
def get_eye_line(eye_landmarks):
l0, l1, l2, l3, l4, l5 = eye_landmarks.astype('int')
A= list(((np.array(l1) + np.array(l5))/2).astype('int'))
B= list(((np.array(l2) + np.array(l4))/2).astype('int'))
line_ = list(zip(*line(*l0, *A))) + list(zip(*line(*A, *B))) + list(zip(*line(*B, *l3)))
return line_
def get_frp(image_gray, landmarks, return_img= False, check_errors= False):
if check_errors==True:
image_gray_show= image_gray.copy()
for point in landmarks:
point= tuple(map(int, point))
image_gray_show = cv2.circle(image_gray_show, point, 10, (0), -1)
plt.imshow(image_gray_show, cmap='gray')
plt.title('get frp start')
plt.show()
plt.imshow(image_gray)
plt.title('original image')
plt.show()
min_pixel_color= 255
max_pixel_color=0
eye_line = get_eye_line(landmarks)
for x,y in eye_line:
#print(image_gray[y, x])
gray_color = image_gray[y, x]
if gray_color>max_pixel_color:
max_pixel_color=gray_color
y_max, x_max= [y, x]
if gray_color<min_pixel_color:
min_pixel_color=gray_color
y_min, x_min= [y, x]
#print(min_pixel_color, max_pixel_color)
image_gray_show= image_gray.copy()
for x,y in eye_line:
image_gray_show[y, x]=255
try:
image_gray_show[y_min-2:y_min+2, x_min-2:x_min+2]=0
image_gray_show[y_max-2:y_max+2, x_max-2:x_max+2]=0
except:
print(f'detected face is incorrect !!! : make check errors= True to see | currect check_errors : {check_errors}')
if check_errors==True:
image_gray_show= image_gray.copy()
for point in eye_line:
image_gray_show = cv2.circle(image_gray_show, point, 10, (0), -1)
for point in landmarks:
point= tuple(map(int, point))
image_gray_show = cv2.circle(image_gray_show, point, 10, (0), -1)
plt.imshow(image_gray_show, cmap='gray')
plt.title('get_frp : detected face incorrect !!!')
plt.show()
print('max, min pixel colors : ', max_pixel_color, min_pixel_color)
else:pass
frp= (max_pixel_color+0.1)/(min_pixel_color+0.1)
if return_img:return image_gray_show, frp
else:return frp
def img2eye(image_gray, eye_landmark, landmark=None, resize= 28, check_errors= False):
eye_middle = list(map(int, np.mean(eye_landmark, axis=0)))
eye_width = int((landmark[9][1] - landmark[28][1])/5)
eye_image = image_gray[eye_middle[1]-eye_width: eye_middle[1]+eye_width, eye_middle[0]-eye_width: eye_middle[0]+eye_width]
return cv2.resize(eye_image, (resize, resize)) | [
"matplotlib.pyplot.imshow",
"numpy.mean",
"numpy.array",
"cv2.circle",
"skimage.draw.line",
"matplotlib.pyplot.title",
"cv2.resize",
"matplotlib.pyplot.show"
] | [((2717, 2756), 'cv2.resize', 'cv2.resize', (['eye_image', '(resize, resize)'], {}), '(eye_image, (resize, resize))\n', (2727, 2756), False, 'import cv2\n'), ((721, 761), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_gray_show'], {'cmap': '"""gray"""'}), "(image_gray_show, cmap='gray')\n", (731, 761), True, 'import matplotlib.pyplot as plt\n'), ((768, 794), 'matplotlib.pyplot.title', 'plt.title', (['"""get frp start"""'], {}), "('get frp start')\n", (777, 794), True, 'import matplotlib.pyplot as plt\n'), ((801, 811), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (809, 811), True, 'import matplotlib.pyplot as plt\n'), ((826, 848), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_gray'], {}), '(image_gray)\n', (836, 848), True, 'import matplotlib.pyplot as plt\n'), ((855, 882), 'matplotlib.pyplot.title', 'plt.title', (['"""original image"""'], {}), "('original image')\n", (864, 882), True, 'import matplotlib.pyplot as plt\n'), ((889, 899), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (897, 899), True, 'import matplotlib.pyplot as plt\n'), ((667, 712), 'cv2.circle', 'cv2.circle', (['image_gray_show', 'point', '(10)', '(0)', '(-1)'], {}), '(image_gray_show, point, 10, 0, -1)\n', (677, 712), False, 'import cv2\n'), ((2495, 2524), 'numpy.mean', 'np.mean', (['eye_landmark'], {'axis': '(0)'}), '(eye_landmark, axis=0)\n', (2502, 2524), True, 'import numpy as np\n'), ((2045, 2085), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_gray_show'], {'cmap': '"""gray"""'}), "(image_gray_show, cmap='gray')\n", (2055, 2085), True, 'import matplotlib.pyplot as plt\n'), ((2094, 2144), 'matplotlib.pyplot.title', 'plt.title', (['"""get_frp : detected face incorrect !!!"""'], {}), "('get_frp : detected face incorrect !!!')\n", (2103, 2144), True, 'import matplotlib.pyplot as plt\n'), ((2153, 2163), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2161, 2163), True, 'import matplotlib.pyplot as plt\n'), ((390, 403), 'skimage.draw.line', 'line', (['*B', '*l3'], {}), '(*B, *l3)\n', (394, 403), False, 'from skimage.draw import line\n'), ((1841, 1886), 'cv2.circle', 'cv2.circle', (['image_gray_show', 'point', '(10)', '(0)', '(-1)'], {}), '(image_gray_show, point, 10, 0, -1)\n', (1851, 1886), False, 'import cv2\n'), ((1989, 2034), 'cv2.circle', 'cv2.circle', (['image_gray_show', 'point', '(10)', '(0)', '(-1)'], {}), '(image_gray_show, point, 10, 0, -1)\n', (1999, 2034), False, 'import cv2\n'), ((199, 211), 'numpy.array', 'np.array', (['l1'], {}), '(l1)\n', (207, 211), True, 'import numpy as np\n'), ((214, 226), 'numpy.array', 'np.array', (['l5'], {}), '(l5)\n', (222, 226), True, 'import numpy as np\n'), ((260, 272), 'numpy.array', 'np.array', (['l2'], {}), '(l2)\n', (268, 272), True, 'import numpy as np\n'), ((275, 287), 'numpy.array', 'np.array', (['l4'], {}), '(l4)\n', (283, 287), True, 'import numpy as np\n'), ((335, 348), 'skimage.draw.line', 'line', (['*l0', '*A'], {}), '(*l0, *A)\n', (339, 348), False, 'from skimage.draw import line\n'), ((363, 375), 'skimage.draw.line', 'line', (['*A', '*B'], {}), '(*A, *B)\n', (367, 375), False, 'from skimage.draw import line\n')] |
"""
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import List, Tuple
import torch
import numpy as np
def max_reduce_like(input_: torch.Tensor, ref_tensor_shape: List[int]) -> torch.Tensor:
numel = np.prod(ref_tensor_shape)
if numel == 1:
retval = input_.max()
for _ in ref_tensor_shape:
retval.unsqueeze_(-1)
return retval
tmp_max = input_
for dim_idx, dim in enumerate(ref_tensor_shape):
if dim == 1:
tmp_max, _ = torch.max(tmp_max, dim_idx, keepdim=True)
return tmp_max
def min_reduce_like(input_: torch.Tensor, ref_tensor_shape: List[int]):
numel = np.prod(ref_tensor_shape)
if numel == 1:
retval = input_.min()
for _ in ref_tensor_shape:
retval.unsqueeze_(-1)
return retval
tmp_min = input_
for dim_idx, dim in enumerate(ref_tensor_shape):
if dim == 1:
tmp_min, _ = torch.min(tmp_min, dim_idx, keepdim=True)
return tmp_min
def get_channel_count_and_dim_idx(scale_shape: List[int]) -> Tuple[int, int]:
channel_dim_idx = 0
channel_count = 1
for dim_idx, dim in enumerate(scale_shape):
if dim != 1:
channel_dim_idx = dim_idx
channel_count = dim
return channel_count, channel_dim_idx
def expand_like(input_: torch.Tensor, scale_shape: List[int]) -> torch.Tensor:
retval = input_
count, idx = get_channel_count_and_dim_idx(scale_shape)
assert input_.numel() == count
assert len(input_.size()) == 1
for _ in range(0, idx):
retval = retval.unsqueeze(0)
for _ in range(idx + 1, len(scale_shape)):
retval = retval.unsqueeze(-1)
return retval
| [
"numpy.prod",
"torch.max",
"torch.min"
] | [((745, 770), 'numpy.prod', 'np.prod', (['ref_tensor_shape'], {}), '(ref_tensor_shape)\n', (752, 770), True, 'import numpy as np\n'), ((1178, 1203), 'numpy.prod', 'np.prod', (['ref_tensor_shape'], {}), '(ref_tensor_shape)\n', (1185, 1203), True, 'import numpy as np\n'), ((1031, 1072), 'torch.max', 'torch.max', (['tmp_max', 'dim_idx'], {'keepdim': '(True)'}), '(tmp_max, dim_idx, keepdim=True)\n', (1040, 1072), False, 'import torch\n'), ((1464, 1505), 'torch.min', 'torch.min', (['tmp_min', 'dim_idx'], {'keepdim': '(True)'}), '(tmp_min, dim_idx, keepdim=True)\n', (1473, 1505), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
surface2stations.py
Extract synthetics at given stations from a NetCDF database of surface
wavefield created by AxiSEM3D (named axisem3d_surface.nc by the solver)
and save them into a NetCDF waveform database (same as the built-in
NetCDF output axisem3d_synthetics.nc).
To see usage, type
python surface2stations.py -h
'''
################### PARSER ###################
aim = '''Extract synthetics at given stations from a NetCDF database of surface
wavefield created by AxiSEM3D (named axisem3d_surface.nc by the solver)
and save them into a NetCDF waveform database (same as the built-in
NetCDF output axisem3d_synthetics.nc).'''
notes = '''One may further use nc2ascii.py to convert the output into ascii format.
'''
import argparse
from argparse import RawTextHelpFormatter
parser = argparse.ArgumentParser(description=aim, epilog=notes,
formatter_class=RawTextHelpFormatter)
parser.add_argument('-i', '--input', dest='in_surface_nc',
action='store', type=str, required=True,
help='NetCDF database of surface wavefield\n' +
'created by AxiSEM3D <required>')
parser.add_argument('-m', '--multi_file', dest='multi_file', action='store_true',
help='Does the NetCDF database consist of\n' +
'multiple files; default = False')
parser.add_argument('-o', '--output', dest='out_waveform_nc',
action='store', type=str, required=True,
help='NetCDF waveform database to store the\n' +
'extracted synthetics <required>')
parser.add_argument('-s', '--stations', dest='stations',
action='store', type=str, required=True,
help='list of stations, see OUT_STATIONS_FILE\n' +
'in inparam.time_src_recv <required>')
parser.add_argument('-r', '--crdsys', dest='crdsys', action='store',
type=str, default='geographic',
choices=['geographic', 'source-centered'],
help='coordinate system used in the station list,\n' +
'see OUT_STATIONS_SYSTEM in inparam.time_src_recv;\n' +
'default = geographic')
parser.add_argument('-d', '--duplicated', dest='duplicated', action='store',
type=str, default='rename',
choices=['ignore', 'rename', 'error'],
help='how to haddle stations with the same network\n' +
'and name, see OUT_STATIONS_DUPLICATED in\n' +
'inparam.time_src_recv; default = rename')
parser.add_argument('-c', '--components', dest='components', action='store',
type=str, default='RTZ', choices=['RTZ', 'ENZ', 'SPZ'],
help='seismogram components, see OUT_STATIONS_COMPONENTS\n' +
'in inparam.time_src_recv; default = RTZ')
parser.add_argument('-F', '--order_Fourier', dest='order_Fourier',
action='store', type=int, default=-1,
help='only compute the specified order;\n' +
'default = -1 (sum up all the orders)')
parser.add_argument('-f', '--factor_Fourier', dest='factor_Fourier',
action='store', type=complex, default=1.0,
help='factor to scale the Fourier coefficients,\n' +
'can be a complex number; default = 1.0')
parser.add_argument('-l', '--source_lat_lon', dest='source_lat_lon',
action='store', nargs=2, type=float, default=None,
help='specify source latitude and longitude;\n' +
'default = None (use those in the solver)')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='verbose mode')
args = parser.parse_args()
################### PARSER ###################
import numpy as np
from netCDF4 import Dataset
from obspy.geodetics import gps2dist_azimuth
import os
################### TOOLS ###################
def rotation_matrix(theta, phi):
return np.array([[np.cos(theta) * np.cos(phi), -np.sin(phi), np.sin(theta) * np.cos(phi)],
[np.cos(theta) * np.sin(phi), np.cos(phi), np.sin(theta) * np.sin(phi)],
[-np.sin(theta), 0., np.cos(theta)]])
def latlon2thetaphi(lat, lon, flattening):
temp = (1. - flattening) * (1. - flattening)
return np.pi / 2. - np.arctan(temp * np.tan(np.radians(lat))), np.radians(lon)
def thetaphi2latlon(theta, phi, flattening):
temp = 1. / (1. - flattening) / (1. - flattening)
return np.degrees(np.arctan(temp * np.tan(np.pi / 2. - theta))), np.degrees(phi)
def thetaphi2xyz(theta, phi):
return np.array([np.sin(theta) * np.cos(phi),
np.sin(theta) * np.sin(phi),
np.cos(theta)])
def xyz2thetaphi(xyz):
theta = np.arccos(xyz[2])
phi = np.arctan2(xyz[1], xyz[0])
return theta, phi
class SurfaceStation:
# class variables and methods
src_lat = None
src_lon = None
src_rmat = None
def setSource(srclat, srclon, srcflattening):
SurfaceStation.src_lat, SurfaceStation.src_lon = srclat, srclon
srctheta, srcphi = latlon2thetaphi(srclat, srclon, srcflattening)
SurfaceStation.src_rmat = rotation_matrix(srctheta, srcphi)
def __init__(self, network, name):
self.network = network
self.name = name
def setloc_geographic(self, lat, lon, flattening):
self.lat = lat
self.lon = lon
theta, phi = latlon2thetaphi(lat, lon, flattening)
xglb = thetaphi2xyz(theta, phi)
xsrc = SurfaceStation.src_rmat.T.dot(xglb)
self.dist, self.azimuth = xyz2thetaphi(xsrc)
d, az, baz = gps2dist_azimuth(SurfaceStation.src_lat, SurfaceStation.src_lon,
self.lat, self.lon, a=1., f=flattening)
self.baz = np.radians(baz)
def setloc_source_centered(self, dist, azimuth, flattening):
self.dist = dist
self.azimuth = azimuth
xsrc = thetaphi2xyz(dist, azimuth)
xglb = SurfaceStation.src_rmat.dot(xsrc)
theta, phi = xyz2thetaphi(xglb)
self.lat, self.lon = thetaphi2latlon(theta, phi, flattening)
d, az, baz = gps2dist_azimuth(SurfaceStation.src_lat, SurfaceStation.src_lon,
self.lat, self.lon, a=1., f=flattening)
self.baz = np.radians(baz)
def interpLagrange(target, bases):
nbases = len(bases)
results = np.zeros(nbases)
for dgr in np.arange(0, nbases):
prod1 = 1.
prod2 = 1.
for i in np.arange(0, nbases):
if i != dgr:
prod1 *= target - bases[i]
prod2 *= bases[dgr] - bases[i]
results[dgr] = prod1 / prod2
return results
################### TOOLS ###################
###### read surface database
if args.multi_file:
# create first
nc_surfs = []
for irank in np.arange(0, 99999):
fname = args.in_surface_nc + str(irank)
if os.path.isfile(fname):
nc = Dataset(fname, 'r')
print(str(nc.variables['time_points'][0]))
if nc.variables['time_points'][-1] < -1. or '--' in str(nc.variables['time_points'][0]):
print('Skip opening nc file %s' % (fname))
continue
nc_surfs.append(nc)
if args.verbose:
print('Done opening nc file %s' % (fname))
nc_surf = nc_surfs[0]
else:
nc_surf = Dataset(args.in_surface_nc, 'r')
if args.verbose:
print('Done opening nc file %s' % (args.in_surface_nc))
nc_surfs = [nc_surf]
# global attribute
if args.source_lat_lon is not None:
srclat = args.source_lat_lon[0]
srclon = args.source_lat_lon[1]
else:
srclat = nc_surf.source_latitude
srclon = nc_surf.source_longitude
srcdep = nc_surf.source_depth
srcflat = nc_surf.source_flattening
surfflat = nc_surf.surface_flattening
# time
var_time = nc_surf.variables['time_points'][:]
nstep = len(var_time)
solver_dtype = nc_surf.variables['time_points'].datatype
# theta
var_theta = nc_surf.variables['theta'][:]
nele = len(var_theta)
# GLL and GLJ
var_GLL = nc_surf.variables['GLL'][:]
var_GLJ = nc_surf.variables['GLJ'][:]
nPntEdge = len(var_GLL)
# element on rank
irank_ele = np.zeros(nele, dtype='int')
irank_ele.fill(-1)
for inc, nc in enumerate(nc_surfs):
keys = nc.variables.keys()
for eleTag in np.arange(nele):
key = 'edge_' + str(eleTag) + 'r'
if key in keys:
irank_ele[eleTag] = inc
# set source
SurfaceStation.setSource(srclat, srclon, srcflat)
###### read station info
station_info = np.loadtxt(args.stations, dtype=str, ndmin=2)
stations = {}
buried = 0
largest_depth = -1.
largest_depth_station = ''
for ist in np.arange(0, len(station_info)):
name = station_info[ist, 0]
network = station_info[ist, 1]
lat_theta = float(station_info[ist, 2])
lon_phi = float(station_info[ist, 3])
depth = float(station_info[ist, 5])
key = network + '.' + name
# ignore buried depth
if depth > 0.:
buried += 1
if largest_depth < depth:
largest_depth = depth
largest_depth_station = key
# duplicated stations
if key in stations:
if args.duplicated == 'error':
assert False, 'Duplicated station %s found in %s' \
% (key, args.stations)
if args.duplicated == 'rename':
append = 0
nameOriginal = name
while key in stations:
append += 1
name = nameOriginal + '__DUPLICATED' + str(append)
key = network + '.' + name
if args.duplicated == 'ignore':
continue
st = SurfaceStation(network, name)
# coordinate system
if args.crdsys == 'geographic':
st.setloc_geographic(lat_theta, lon_phi, surfflat)
else:
lat_theta = np.radians(lat_theta)
lon_phi = np.radians(lon_phi)
st.setloc_source_centered(lat_theta, lon_phi, surfflat)
stations[key] = st
# sort stations by distance
station_list = list(stations.values())
station_list.sort(key=lambda st: st.dist)
if buried > 0:
print('Warning: Ignoring buried depth of %d stations;' % (buried))
print(' the deepest station %s is buried at %.0f m' \
% (largest_depth_station, largest_depth))
###### prepare output
nc_wave = Dataset(args.out_waveform_nc, 'w')
# global attribute
nc_wave.source_latitude = srclat
nc_wave.source_longitude = srclon
nc_wave.source_depth = srcdep
# time
ncdim_nstep = 'ncdim_' + str(nstep)
nc_wave.createDimension(ncdim_nstep, size=nstep)
var_time_out = nc_wave.createVariable('time_points',
solver_dtype, (ncdim_nstep,))
var_time_out[:] = var_time[:]
# waveforms
nc_wave.createDimension('ncdim_3', size=3)
max_theta = np.amax(var_theta, axis=1)
eleTag_last = -1
for ist, station in enumerate(station_list):
# waveform
key = station.network + '.' + station.name
var_wave = nc_wave.createVariable(key + '.' + args.components,
solver_dtype, (ncdim_nstep, 'ncdim_3'))
# station info
var_wave.latitude = station.lat
var_wave.longitude = station.lon
var_wave.depth = 0.
# locate station
eleTag = np.searchsorted(max_theta, station.dist)
assert eleTag >= 0, 'Fail to locate station %s, dist = %f' \
% (key, station.dist)
theta0 = var_theta[eleTag, 0]
theta1 = var_theta[eleTag, 1]
eta = (station.dist - theta0) / (theta1 - theta0) * 2. - 1.
# weights considering axial condition
if eleTag == 0 or eleTag == nele - 1:
weights = interpLagrange(eta, var_GLJ)
else:
weights = interpLagrange(eta, var_GLL)
# Fourier
# NOTE: change to stepwise if memory issue occurs
if eleTag != eleTag_last:
nce = nc_surfs[irank_ele[eleTag]]
fourier_r = nce.variables['edge_' + str(eleTag) + 'r'][:, :]
fourier_i = nce.variables['edge_' + str(eleTag) + 'i'][:, :]
fourier = fourier_r + fourier_i * 1j
nu_p_1 = int(fourier_r.shape[1] / nPntEdge / 3)
eleTag_last = eleTag
# exp array
exparray = 2. * np.exp(np.arange(0, nu_p_1) * 1j * station.azimuth)
exparray[0] = 1.
if args.order_Fourier >= 0:
assert args.order_Fourier < nu_p_1, 'Specified Fourier order %d greater than maximum %d' \
% (args.order_Fourier, nu_p_1 - 1)
exparray = exparray[args.order_Fourier]
exparray *= args.factor_Fourier
# compute disp
spz = np.zeros((nstep, 3), dtype=solver_dtype)
fmat = fourier[:, :].reshape(nstep, 3, nPntEdge, nu_p_1)
if args.order_Fourier >= 0:
frow = fmat[:, :, :, args.order_Fourier]
spz[:, :] = (frow * exparray).real.dot(weights)
else:
spz[:, :] = fmat.dot(exparray).real.dot(weights)
# rotate
disp = np.zeros((nstep, 3), dtype=solver_dtype)
if args.components == 'SPZ':
disp = spz
else:
ur = spz[:, 0] * np.sin(station.dist) + spz[:, 2] * np.cos(station.dist)
ut = spz[:, 0] * np.cos(station.dist) - spz[:, 2] * np.sin(station.dist)
if args.components == 'ENZ':
disp[:, 0] = -ut * np.sin(self.baz) + spz[:, 1] * np.cos(self.baz)
disp[:, 1] = -ut * np.cos(self.baz) - spz[:, 1] * np.sin(self.baz)
disp[:, 2] = ur
else:
disp[:, 0] = ut
disp[:, 1] = spz[:, 1]
disp[:, 2] = ur
var_wave[:, :] = disp[:, :]
if args.verbose:
print('Done with station %s, %d / %d' % (key, ist + 1, len(station_list)))
for nc_s in nc_surfs:
nc_s.close()
nc_wave.close()
| [
"numpy.radians",
"obspy.geodetics.gps2dist_azimuth",
"numpy.arccos",
"numpy.tan",
"argparse.ArgumentParser",
"numpy.searchsorted",
"netCDF4.Dataset",
"os.path.isfile",
"numpy.zeros",
"numpy.arctan2",
"numpy.cos",
"numpy.sin",
"numpy.degrees",
"numpy.loadtxt",
"numpy.amax",
"numpy.arang... | [((843, 940), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'aim', 'epilog': 'notes', 'formatter_class': 'RawTextHelpFormatter'}), '(description=aim, epilog=notes, formatter_class=\n RawTextHelpFormatter)\n', (866, 940), False, 'import argparse\n'), ((8596, 8623), 'numpy.zeros', 'np.zeros', (['nele'], {'dtype': '"""int"""'}), "(nele, dtype='int')\n", (8604, 8623), True, 'import numpy as np\n'), ((8952, 8997), 'numpy.loadtxt', 'np.loadtxt', (['args.stations'], {'dtype': 'str', 'ndmin': '(2)'}), '(args.stations, dtype=str, ndmin=2)\n', (8962, 8997), True, 'import numpy as np\n'), ((10746, 10780), 'netCDF4.Dataset', 'Dataset', (['args.out_waveform_nc', '"""w"""'], {}), "(args.out_waveform_nc, 'w')\n", (10753, 10780), False, 'from netCDF4 import Dataset\n'), ((11175, 11201), 'numpy.amax', 'np.amax', (['var_theta'], {'axis': '(1)'}), '(var_theta, axis=1)\n', (11182, 11201), True, 'import numpy as np\n'), ((5162, 5179), 'numpy.arccos', 'np.arccos', (['xyz[2]'], {}), '(xyz[2])\n', (5171, 5179), True, 'import numpy as np\n'), ((5190, 5216), 'numpy.arctan2', 'np.arctan2', (['xyz[1]', 'xyz[0]'], {}), '(xyz[1], xyz[0])\n', (5200, 5216), True, 'import numpy as np\n'), ((6773, 6789), 'numpy.zeros', 'np.zeros', (['nbases'], {}), '(nbases)\n', (6781, 6789), True, 'import numpy as np\n'), ((6805, 6825), 'numpy.arange', 'np.arange', (['(0)', 'nbases'], {}), '(0, nbases)\n', (6814, 6825), True, 'import numpy as np\n'), ((7230, 7249), 'numpy.arange', 'np.arange', (['(0)', '(99999)'], {}), '(0, 99999)\n', (7239, 7249), True, 'import numpy as np\n'), ((7777, 7809), 'netCDF4.Dataset', 'Dataset', (['args.in_surface_nc', '"""r"""'], {}), "(args.in_surface_nc, 'r')\n", (7784, 7809), False, 'from netCDF4 import Dataset\n'), ((8728, 8743), 'numpy.arange', 'np.arange', (['nele'], {}), '(nele)\n', (8737, 8743), True, 'import numpy as np\n'), ((11592, 11632), 'numpy.searchsorted', 'np.searchsorted', (['max_theta', 'station.dist'], {}), '(max_theta, station.dist)\n', (11607, 11632), True, 'import numpy as np\n'), ((12891, 12931), 'numpy.zeros', 'np.zeros', (['(nstep, 3)'], {'dtype': 'solver_dtype'}), '((nstep, 3), dtype=solver_dtype)\n', (12899, 12931), True, 'import numpy as np\n'), ((13221, 13261), 'numpy.zeros', 'np.zeros', (['(nstep, 3)'], {'dtype': 'solver_dtype'}), '((nstep, 3), dtype=solver_dtype)\n', (13229, 13261), True, 'import numpy as np\n'), ((4746, 4761), 'numpy.radians', 'np.radians', (['lon'], {}), '(lon)\n', (4756, 4761), True, 'import numpy as np\n'), ((4931, 4946), 'numpy.degrees', 'np.degrees', (['phi'], {}), '(phi)\n', (4941, 4946), True, 'import numpy as np\n'), ((6045, 6154), 'obspy.geodetics.gps2dist_azimuth', 'gps2dist_azimuth', (['SurfaceStation.src_lat', 'SurfaceStation.src_lon', 'self.lat', 'self.lon'], {'a': '(1.0)', 'f': 'flattening'}), '(SurfaceStation.src_lat, SurfaceStation.src_lon, self.lat,\n self.lon, a=1.0, f=flattening)\n', (6061, 6154), False, 'from obspy.geodetics import gps2dist_azimuth\n'), ((6182, 6197), 'numpy.radians', 'np.radians', (['baz'], {}), '(baz)\n', (6192, 6197), True, 'import numpy as np\n'), ((6546, 6655), 'obspy.geodetics.gps2dist_azimuth', 'gps2dist_azimuth', (['SurfaceStation.src_lat', 'SurfaceStation.src_lon', 'self.lat', 'self.lon'], {'a': '(1.0)', 'f': 'flattening'}), '(SurfaceStation.src_lat, SurfaceStation.src_lon, self.lat,\n self.lon, a=1.0, f=flattening)\n', (6562, 6655), False, 'from obspy.geodetics import gps2dist_azimuth\n'), ((6683, 6698), 'numpy.radians', 'np.radians', (['baz'], {}), '(baz)\n', (6693, 6698), True, 'import numpy as np\n'), ((6882, 6902), 'numpy.arange', 'np.arange', (['(0)', 'nbases'], {}), '(0, nbases)\n', (6891, 6902), True, 'import numpy as np\n'), ((7310, 7331), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (7324, 7331), False, 'import os\n'), ((10225, 10246), 'numpy.radians', 'np.radians', (['lat_theta'], {}), '(lat_theta)\n', (10235, 10246), True, 'import numpy as np\n'), ((10265, 10284), 'numpy.radians', 'np.radians', (['lon_phi'], {}), '(lon_phi)\n', (10275, 10284), True, 'import numpy as np\n'), ((5110, 5123), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5116, 5123), True, 'import numpy as np\n'), ((7350, 7369), 'netCDF4.Dataset', 'Dataset', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (7357, 7369), False, 'from netCDF4 import Dataset\n'), ((4463, 4474), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (4469, 4474), True, 'import numpy as np\n'), ((4548, 4561), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4554, 4561), True, 'import numpy as np\n'), ((5010, 5023), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5016, 5023), True, 'import numpy as np\n'), ((5026, 5037), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (5032, 5037), True, 'import numpy as np\n'), ((5060, 5073), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5066, 5073), True, 'import numpy as np\n'), ((5076, 5087), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (5082, 5087), True, 'import numpy as np\n'), ((13349, 13369), 'numpy.sin', 'np.sin', (['station.dist'], {}), '(station.dist)\n', (13355, 13369), True, 'import numpy as np\n'), ((13384, 13404), 'numpy.cos', 'np.cos', (['station.dist'], {}), '(station.dist)\n', (13390, 13404), True, 'import numpy as np\n'), ((13430, 13450), 'numpy.cos', 'np.cos', (['station.dist'], {}), '(station.dist)\n', (13436, 13450), True, 'import numpy as np\n'), ((13465, 13485), 'numpy.sin', 'np.sin', (['station.dist'], {}), '(station.dist)\n', (13471, 13485), True, 'import numpy as np\n'), ((4339, 4352), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4345, 4352), True, 'import numpy as np\n'), ((4355, 4366), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (4361, 4366), True, 'import numpy as np\n'), ((4369, 4380), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (4375, 4380), True, 'import numpy as np\n'), ((4382, 4395), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4388, 4395), True, 'import numpy as np\n'), ((4398, 4409), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (4404, 4409), True, 'import numpy as np\n'), ((4434, 4447), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4440, 4447), True, 'import numpy as np\n'), ((4450, 4461), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (4456, 4461), True, 'import numpy as np\n'), ((4476, 4489), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4482, 4489), True, 'import numpy as np\n'), ((4492, 4503), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (4498, 4503), True, 'import numpy as np\n'), ((4529, 4542), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4535, 4542), True, 'import numpy as np\n'), ((4901, 4928), 'numpy.tan', 'np.tan', (['(np.pi / 2.0 - theta)'], {}), '(np.pi / 2.0 - theta)\n', (4907, 4928), True, 'import numpy as np\n'), ((12521, 12541), 'numpy.arange', 'np.arange', (['(0)', 'nu_p_1'], {}), '(0, nu_p_1)\n', (12530, 12541), True, 'import numpy as np\n'), ((13558, 13574), 'numpy.sin', 'np.sin', (['self.baz'], {}), '(self.baz)\n', (13564, 13574), True, 'import numpy as np\n'), ((13589, 13605), 'numpy.cos', 'np.cos', (['self.baz'], {}), '(self.baz)\n', (13595, 13605), True, 'import numpy as np\n'), ((13637, 13653), 'numpy.cos', 'np.cos', (['self.baz'], {}), '(self.baz)\n', (13643, 13653), True, 'import numpy as np\n'), ((13668, 13684), 'numpy.sin', 'np.sin', (['self.baz'], {}), '(self.baz)\n', (13674, 13684), True, 'import numpy as np\n'), ((4727, 4742), 'numpy.radians', 'np.radians', (['lat'], {}), '(lat)\n', (4737, 4742), True, 'import numpy as np\n')] |
import numpy as np
from .LowResHighResDataset import LowResHighResDataset, region_geometry
class NearestNeighborData(LowResHighResDataset):
def __init__(self, dataset: LowResHighResDataset, num_models=None, model_index=None, k=16):
super(NearestNeighborData, self).__init__(
dataset.geometry_lr, dataset.geometry_hr,
dataset.grid_names_lr, dataset.grid_names_hr,
dataset.grid_names_target
)
if isinstance(self.geometry_lr, dict):
assert len(self.geometry_lr.keys()) == 1, '[ERROR] NearestNeighborData is not thought to be used with multi-region datasets.'
self.geometry_lr = self.geometry_lr[list(self.geometry_lr.keys())[0]]
if isinstance(self.geometry_hr, dict):
assert len(self.geometry_hr.keys()) == 1, '[ERROR] NearestNeighborData is not thought to be used with multi-region datasets.'
self.geometry_hr = self.geometry_hr[list(self.geometry_hr.keys())[0]]
self.num_nearest_neighbors_lr = k
self.num_nearest_neighbors_hr = 12 * k
self._set_nearest_neighbor_indices(model_index, num_models)
self._read_dataset(dataset)
self._in_grid_mode = False
self._reset_mask_hr()
def _set_nearest_neighbor_indices(self, model_index, num_models):
mask_lr = self.geometry_lr.mask
lon_lr = self.geometry_lr.lon
lat_lr = self.geometry_lr.lat
mask_hr = self.geometry_hr.mask
lon_hr = self.geometry_hr.lon
lat_hr = self.geometry_hr.lat
max_num_models = np.sum(1 - mask_hr)
if num_models is None:
assert model_index is not None
assert len(model_index) <= max_num_models
assert max(model_index) < max_num_models
else:
if model_index is not None:
assert len(model_index) == num_models
assert len(model_index) <= max_num_models
assert max(model_index) < max_num_models
else:
model_index = np.arange(max_num_models).astype(int)
if num_models < max_num_models:
np.random.shuffle(model_index)
model_index = np.sort(model_index[:num_models])
self.num_models = len(model_index)
valid_lon_lr = lon_lr[mask_lr == 0]
valid_lat_lr = lat_lr[mask_lr == 0]
valid_lon_hr = lon_hr[mask_hr == 0]
valid_lat_hr = lat_hr[mask_hr == 0]
index_lon_lr, index_lat_lr = np.meshgrid(np.arange(self.shape_lr[1]), np.arange(self.shape_lr[0]))
index_lon_hr, index_lat_hr = np.meshgrid(np.arange(self.shape_hr[1]), np.arange(self.shape_hr[0]))
valid_index_lon_lr = index_lon_lr[mask_lr == 0].astype(int)
valid_index_lat_lr = index_lat_lr[mask_lr == 0].astype(int)
valid_index_lon_hr = index_lon_hr[mask_hr == 0].astype(int)
valid_index_lat_hr = index_lat_hr[mask_hr == 0].astype(int)
self.model_index_lon = valid_index_lon_hr[model_index]
self.model_index_lat = valid_index_lat_hr[model_index]
input_index_lon_lr = []
input_index_lat_lr = []
input_index_lon_hr = []
input_index_lat_hr = []
for i in model_index:
nn_dist = self._nearest_neighbor_metric(
lon=valid_lon_lr, lat=valid_lat_lr,
lon_0=valid_lon_hr[i], lat_0=valid_lat_hr[i]
)
rank_index_lr = np.argpartition(nn_dist, self.num_nearest_neighbors_lr)[:self.num_nearest_neighbors_lr]
input_index_lon_lr.append(valid_index_lon_lr[rank_index_lr])
input_index_lat_lr.append(valid_index_lat_lr[rank_index_lr])
self.input_index_lon_lr = np.array(input_index_lon_lr)
self.input_index_lat_lr = np.array(input_index_lat_lr)
for i in model_index:
nn_dist = self._nearest_neighbor_metric(
lon=valid_lon_hr, lat=valid_lat_hr,
lon_0=valid_lon_hr[i], lat_0=valid_lat_hr[i]
)
rank_index_hr = np.argpartition(nn_dist, self.num_nearest_neighbors_hr)[:self.num_nearest_neighbors_hr]
input_index_lon_hr.append(valid_index_lon_hr[rank_index_hr])
input_index_lat_hr.append(valid_index_lat_hr[rank_index_hr])
self.input_index_lon_hr = np.array(input_index_lon_hr)
self.input_index_lat_hr = np.array(input_index_lat_hr)
self.num_features = (
self.num_nearest_neighbors_lr * len(self.input_grids_lr()) +
self.num_nearest_neighbors_hr * len(self.input_grids_hr())
)
@staticmethod
def _nearest_neighbor_metric(lon=None, lat=None, lon_0=None, lat_0=None):
return np.abs(lon - lon_0) + np.abs(lat - lat_0)
def _read_dataset(self, dataset: LowResHighResDataset):
self.input_lr = {'static': [], 'dynamic': []}
if len(self.grid_names_lr['dynamic']):
data, _ = dataset.get_input_lr(self.grid_names_lr['dynamic'])
for idx_lat, idx_lon in zip(self.input_index_lat_lr, self.input_index_lon_lr):
self.input_lr['dynamic'].append(np.reshape(data[:, :, idx_lat, idx_lon], newshape=(data.shape[0], -1)))
self.input_lr.update(
{
'dynamic': np.stack(self.input_lr['dynamic'], axis=1)
if len(self.input_lr['dynamic']) > 0 else None
}
)
if len(self.grid_names_lr['static']):
data, _ = dataset.get_input_lr(self.grid_names_lr['static'])
for idx_lat, idx_lon in zip(self.input_index_lat_lr, self.input_index_lon_lr):
self.input_lr['static'].append(np.reshape(data[:, :, idx_lat, idx_lon], newshape=(data.shape[0], -1)))
self.input_lr.update(
{
'static': np.stack(self.input_lr['static'], axis=1)
if len(self.input_lr['static']) > 0 else None
}
)
self.input_hr = {'static': [], 'dynamic': []}
if len(self.grid_names_hr['dynamic']):
data, _ = dataset.get_input_hr(self.grid_names_hr['dynamic'])
for idx_lat, idx_lon in zip(self.input_index_lat_hr, self.input_index_lon_hr):
self.input_hr['dynamic'].append(np.reshape(data[:, :, idx_lat, idx_lon], newshape=(data.shape[0], -1)))
self.input_hr.update(
{
'dynamic': np.stack(self.input_hr['dynamic'], axis=1)
if len(self.input_hr['dynamic']) > 0 else None
}
)
if len(self.grid_names_hr['static']):
data, _ = dataset.get_input_hr(self.grid_names_hr['static'])
for idx_lat, idx_lon in zip(self.input_index_lat_hr, self.input_index_lon_hr):
self.input_hr['static'].append(np.reshape(data[:, :, idx_lat, idx_lon], newshape=(data.shape[0], -1)))
self.input_hr.update(
{
'static': np.stack(self.input_hr['static'], axis=1)
if len(self.input_hr['static']) > 0 else None
}
)
self.target = {'static': [], 'dynamic': []}
if len(self.grid_names_target['dynamic']):
data, _ = dataset.get_target(self.grid_names_target['dynamic'])
for idx_lat, idx_lon in zip(self.model_index_lat, self.model_index_lon):
self.target['dynamic'].append(np.reshape(data[:, :, idx_lat, idx_lon], newshape=(data.shape[0], -1)))
self.target.update(
{
'dynamic': np.stack(self.target['dynamic'], axis=1).transpose((0, 2, 1))
if len(self.target['dynamic']) > 0 else None
}
)
if len(self.grid_names_target['static']):
data, _ = dataset.get_target(self.grid_names_target['static'])
for idx_lat, idx_lon in zip(self.model_index_lat, self.model_index_lon):
self.target['static'].append(np.reshape(data[:, :, idx_lat, idx_lon], newshape=(data.shape[0], -1)))
self.target.update(
{
'static': np.stack(self.target['static'], axis=1).transpose((0, 2, 1))
if len(self.target['static']) > 0 else None
}
)
self._len = len(dataset)
def _reset_mask_hr(self):
mask_hr = np.ones_like(self.geometry_hr.mask)
mask_hr[self.model_index_lat, self.model_index_lon] = 0
self.geometry_hr = region_geometry(
self.geometry_hr.lon,
self.geometry_hr.lat,
mask_hr
)
def __len__(self):
return self._len
def __getitem__(self, item):
target = [self.target['dynamic'][item]]
if self.target['static'] is not None:
target.append(self.target['static'][0])
target = np.concatenate(target, axis=0)
input_lr = [self.input_lr['dynamic'][item]]
if self.input_lr['static'] is not None:
input_lr.append(self.input_lr['static'][0])
if len(input_lr) > 0:
input_lr = np.concatenate(input_lr, axis=-1)
else:
input_lr = []
input_hr = []
if self.input_hr['dynamic'] is not None:
input_hr.append(self.input_hr['dynamic'][item])
if self.input_hr['static'] is not None:
input_hr.append(self.input_hr['static'][0])
if len(input_hr) > 0:
input_hr = np.concatenate(input_hr, axis=-1)
else:
input_hr = []
mask_lr = self.geometry_lr.mask
mask_hr = self.geometry_hr.mask
return target, input_lr, input_hr, mask_lr, mask_hr
def get_input_lr(self, grid_names):
raise NotImplementedError()
def get_input_hr(self, grid_names):
raise NotImplementedError()
def get_target(self, grid_names):
raise NotImplementedError()
def set_input_lr(self, grid_names, data):
raise NotImplementedError()
def set_input_hr(self, grid_names, data):
raise NotImplementedError()
def set_target(self, grid_names, data):
raise NotImplementedError()
def grids(self):
raise NotImplementedError()
def samples(self):
raise NotImplementedError()
| [
"numpy.ones_like",
"numpy.abs",
"numpy.reshape",
"numpy.argpartition",
"numpy.sort",
"numpy.sum",
"numpy.array",
"numpy.stack",
"numpy.concatenate",
"numpy.arange",
"numpy.random.shuffle"
] | [((1569, 1588), 'numpy.sum', 'np.sum', (['(1 - mask_hr)'], {}), '(1 - mask_hr)\n', (1575, 1588), True, 'import numpy as np\n'), ((3718, 3746), 'numpy.array', 'np.array', (['input_index_lon_lr'], {}), '(input_index_lon_lr)\n', (3726, 3746), True, 'import numpy as np\n'), ((3781, 3809), 'numpy.array', 'np.array', (['input_index_lat_lr'], {}), '(input_index_lat_lr)\n', (3789, 3809), True, 'import numpy as np\n'), ((4318, 4346), 'numpy.array', 'np.array', (['input_index_lon_hr'], {}), '(input_index_lon_hr)\n', (4326, 4346), True, 'import numpy as np\n'), ((4381, 4409), 'numpy.array', 'np.array', (['input_index_lat_hr'], {}), '(input_index_lat_hr)\n', (4389, 4409), True, 'import numpy as np\n'), ((8266, 8301), 'numpy.ones_like', 'np.ones_like', (['self.geometry_hr.mask'], {}), '(self.geometry_hr.mask)\n', (8278, 8301), True, 'import numpy as np\n'), ((8754, 8784), 'numpy.concatenate', 'np.concatenate', (['target'], {'axis': '(0)'}), '(target, axis=0)\n', (8768, 8784), True, 'import numpy as np\n'), ((2516, 2543), 'numpy.arange', 'np.arange', (['self.shape_lr[1]'], {}), '(self.shape_lr[1])\n', (2525, 2543), True, 'import numpy as np\n'), ((2545, 2572), 'numpy.arange', 'np.arange', (['self.shape_lr[0]'], {}), '(self.shape_lr[0])\n', (2554, 2572), True, 'import numpy as np\n'), ((2623, 2650), 'numpy.arange', 'np.arange', (['self.shape_hr[1]'], {}), '(self.shape_hr[1])\n', (2632, 2650), True, 'import numpy as np\n'), ((2652, 2679), 'numpy.arange', 'np.arange', (['self.shape_hr[0]'], {}), '(self.shape_hr[0])\n', (2661, 2679), True, 'import numpy as np\n'), ((4715, 4734), 'numpy.abs', 'np.abs', (['(lon - lon_0)'], {}), '(lon - lon_0)\n', (4721, 4734), True, 'import numpy as np\n'), ((4737, 4756), 'numpy.abs', 'np.abs', (['(lat - lat_0)'], {}), '(lat - lat_0)\n', (4743, 4756), True, 'import numpy as np\n'), ((8994, 9027), 'numpy.concatenate', 'np.concatenate', (['input_lr'], {'axis': '(-1)'}), '(input_lr, axis=-1)\n', (9008, 9027), True, 'import numpy as np\n'), ((9356, 9389), 'numpy.concatenate', 'np.concatenate', (['input_hr'], {'axis': '(-1)'}), '(input_hr, axis=-1)\n', (9370, 9389), True, 'import numpy as np\n'), ((3449, 3504), 'numpy.argpartition', 'np.argpartition', (['nn_dist', 'self.num_nearest_neighbors_lr'], {}), '(nn_dist, self.num_nearest_neighbors_lr)\n', (3464, 3504), True, 'import numpy as np\n'), ((4049, 4104), 'numpy.argpartition', 'np.argpartition', (['nn_dist', 'self.num_nearest_neighbors_hr'], {}), '(nn_dist, self.num_nearest_neighbors_hr)\n', (4064, 4104), True, 'import numpy as np\n'), ((2147, 2177), 'numpy.random.shuffle', 'np.random.shuffle', (['model_index'], {}), '(model_index)\n', (2164, 2177), True, 'import numpy as np\n'), ((2212, 2245), 'numpy.sort', 'np.sort', (['model_index[:num_models]'], {}), '(model_index[:num_models])\n', (2219, 2245), True, 'import numpy as np\n'), ((5132, 5202), 'numpy.reshape', 'np.reshape', (['data[:, :, idx_lat, idx_lon]'], {'newshape': '(data.shape[0], -1)'}), '(data[:, :, idx_lat, idx_lon], newshape=(data.shape[0], -1))\n', (5142, 5202), True, 'import numpy as np\n'), ((5275, 5317), 'numpy.stack', 'np.stack', (["self.input_lr['dynamic']"], {'axis': '(1)'}), "(self.input_lr['dynamic'], axis=1)\n", (5283, 5317), True, 'import numpy as np\n'), ((5662, 5732), 'numpy.reshape', 'np.reshape', (['data[:, :, idx_lat, idx_lon]'], {'newshape': '(data.shape[0], -1)'}), '(data[:, :, idx_lat, idx_lon], newshape=(data.shape[0], -1))\n', (5672, 5732), True, 'import numpy as np\n'), ((5804, 5845), 'numpy.stack', 'np.stack', (["self.input_lr['static']"], {'axis': '(1)'}), "(self.input_lr['static'], axis=1)\n", (5812, 5845), True, 'import numpy as np\n'), ((6246, 6316), 'numpy.reshape', 'np.reshape', (['data[:, :, idx_lat, idx_lon]'], {'newshape': '(data.shape[0], -1)'}), '(data[:, :, idx_lat, idx_lon], newshape=(data.shape[0], -1))\n', (6256, 6316), True, 'import numpy as np\n'), ((6389, 6431), 'numpy.stack', 'np.stack', (["self.input_hr['dynamic']"], {'axis': '(1)'}), "(self.input_hr['dynamic'], axis=1)\n", (6397, 6431), True, 'import numpy as np\n'), ((6776, 6846), 'numpy.reshape', 'np.reshape', (['data[:, :, idx_lat, idx_lon]'], {'newshape': '(data.shape[0], -1)'}), '(data[:, :, idx_lat, idx_lon], newshape=(data.shape[0], -1))\n', (6786, 6846), True, 'import numpy as np\n'), ((6918, 6959), 'numpy.stack', 'np.stack', (["self.input_hr['static']"], {'axis': '(1)'}), "(self.input_hr['static'], axis=1)\n", (6926, 6959), True, 'import numpy as np\n'), ((7356, 7426), 'numpy.reshape', 'np.reshape', (['data[:, :, idx_lat, idx_lon]'], {'newshape': '(data.shape[0], -1)'}), '(data[:, :, idx_lat, idx_lon], newshape=(data.shape[0], -1))\n', (7366, 7426), True, 'import numpy as np\n'), ((7899, 7969), 'numpy.reshape', 'np.reshape', (['data[:, :, idx_lat, idx_lon]'], {'newshape': '(data.shape[0], -1)'}), '(data[:, :, idx_lat, idx_lon], newshape=(data.shape[0], -1))\n', (7909, 7969), True, 'import numpy as np\n'), ((2041, 2066), 'numpy.arange', 'np.arange', (['max_num_models'], {}), '(max_num_models)\n', (2050, 2066), True, 'import numpy as np\n'), ((7497, 7537), 'numpy.stack', 'np.stack', (["self.target['dynamic']"], {'axis': '(1)'}), "(self.target['dynamic'], axis=1)\n", (7505, 7537), True, 'import numpy as np\n'), ((8039, 8078), 'numpy.stack', 'np.stack', (["self.target['static']"], {'axis': '(1)'}), "(self.target['static'], axis=1)\n", (8047, 8078), True, 'import numpy as np\n')] |
import unittest
import logging
import sys
import numpy
from intervals.number import Interval as I
from intervals.methods import (intervalise,lo,hi)
from .interval_generator import pick_endpoints_at_random_uniform
class TestIntervalArithmetic(unittest.TestCase):
def test_addition_by_endpoints_analysis(self):
"""
Test addition 100 times between random intervals.
"""
# log = logging.getLogger("TestLog")
# log.debug("testing addition hundred times between random intervals")
x = intervalise(pick_endpoints_at_random_uniform(n=100))
y = intervalise(pick_endpoints_at_random_uniform(n=100))
for xi,yi in zip(x,y):
xi_op_yi = xi+yi
a,b = [lo(xi),hi(xi)], [lo(yi),hi(yi)]
c = [ai+bj for ai in a for bj in b] # endpoints analysis
ea = [min(c), max(c)]
self.assertAlmostEqual(lo(xi_op_yi), ea[0], places=7)
self.assertAlmostEqual(hi(xi_op_yi), ea[1], places=7)
def test_subtraction_by_endpoints_analysis(self):
"""
Test subtraction 100 times between random intervals.
"""
x = intervalise(pick_endpoints_at_random_uniform(n=100))
y = intervalise(pick_endpoints_at_random_uniform(n=100))
for xi,yi in zip(x,y):
xi_op_yi = xi-yi
a,b = [lo(xi),hi(xi)], [lo(yi),hi(yi)]
c = [ai-bj for ai in a for bj in b] # endpoints analysis
ea = [min(c), max(c)]
self.assertAlmostEqual(lo(xi_op_yi), ea[0], places=7)
self.assertAlmostEqual(hi(xi_op_yi), ea[1], places=7)
def test_multiplication_by_endpoints_analysis(self):
"""
Test multiplication 100 times between random intervals.
"""
x = intervalise(pick_endpoints_at_random_uniform(n=100))
y = intervalise(pick_endpoints_at_random_uniform(n=100))
for xi,yi in zip(x,y):
xi_op_yi = xi*yi
a,b = [lo(xi),hi(xi)], [lo(yi),hi(yi)]
c = [ai*bj for ai in a for bj in b] # endpoints analysis
ea = [min(c), max(c)]
self.assertAlmostEqual(lo(xi_op_yi), ea[0], places=7)
self.assertAlmostEqual(hi(xi_op_yi), ea[1], places=7)
def test_division_by_endpoints_analysis(self):
"""
Test division 100 times between random intervals.
"""
x = intervalise(pick_endpoints_at_random_uniform(n=100))
y = intervalise(pick_endpoints_at_random_uniform(n=100,left_bound=0.001))
for xi,yi in zip(x,y):
xi_plus_yi = xi/yi
a,b = [lo(xi),hi(xi)], [lo(yi),hi(yi)]
c = [ai/bj for ai in a for bj in b] # endpoints analysis
ea = [min(c), max(c)]
self.assertAlmostEqual(lo(xi_plus_yi), ea[0], places=7)
self.assertAlmostEqual(hi(xi_plus_yi), ea[1], places=7)
def test_four_operations_between_interval_2darrays(self):
"""
Test element-wise operations between two dimensional arrays of intervals.
"""
x = intervalise(pick_endpoints_at_random_uniform(shape=(100,4)))
y = intervalise(pick_endpoints_at_random_uniform(shape=(100,4),left_bound=0.001))
x_add_y = x+y
x_sub_y = x-y
x_mul_y = x*y
x_div_y = x/y
for xi,yi,z1,z2,z3,z4 in zip(x,y,x_add_y,x_sub_y,x_mul_y,x_div_y):
xi_add_yi = xi+yi
xi_sub_yi = xi-yi
xi_mul_yi = xi*yi
xi_div_yi = xi/yi
self.assertAlmostEqual(lo(z1), lo(xi_add_yi), places=7)
self.assertAlmostEqual(hi(z1), hi(xi_add_yi), places=7)
self.assertAlmostEqual(lo(z2), lo(xi_sub_yi), places=7)
self.assertAlmostEqual(hi(z2), hi(xi_sub_yi), places=7)
self.assertAlmostEqual(lo(z3), lo(xi_mul_yi), places=7)
self.assertAlmostEqual(hi(z3), hi(xi_mul_yi), places=7)
self.assertAlmostEqual(lo(z4), lo(xi_div_yi), places=7)
self.assertAlmostEqual(hi(z4), hi(xi_div_yi), places=7)
def test_four_operations_between_interval_3darrays(self):
"""
Test element-wise operations between three dimensional arrays of intervals.
"""
x = intervalise(pick_endpoints_at_random_uniform(shape=(10,3,3)))
y = intervalise(pick_endpoints_at_random_uniform(shape=(10,3,3),left_bound=0.001))
x_add_y = x+y
x_sub_y = x-y
x_mul_y = x*y
x_div_y = x/y
for xi,yi,z1,z2,z3,z4 in zip(x,y,x_add_y,x_sub_y,x_mul_y,x_div_y):
xi_add_yi = xi+yi
xi_sub_yi = xi-yi
xi_mul_yi = xi*yi
xi_div_yi = xi/yi
self.assertAlmostEqual(lo(z1), lo(xi_add_yi), places=7)
self.assertAlmostEqual(hi(z1), hi(xi_add_yi), places=7)
self.assertAlmostEqual(lo(z2), lo(xi_sub_yi), places=7)
self.assertAlmostEqual(hi(z2), hi(xi_sub_yi), places=7)
self.assertAlmostEqual(lo(z3), lo(xi_mul_yi), places=7)
self.assertAlmostEqual(hi(z3), hi(xi_mul_yi), places=7)
self.assertAlmostEqual(lo(z4), lo(xi_div_yi), places=7)
self.assertAlmostEqual(hi(z4), hi(xi_div_yi), places=7)
def test_four_operations_between_scalar_and_arrays(self):
"""
Test element-wise operations between array-like and scalar intervals.
"""
a = intervalise(pick_endpoints_at_random_uniform(n=1,left_bound=-1,right_bound=1))
y = intervalise(pick_endpoints_at_random_uniform(shape=(10,3,3),left_bound=0.001))
a_add_y = a+y
a_sub_y = a-y
a_mul_y = a*y
a_div_y = a/y
for yi,z1,z2,z3,z4 in zip(y,a_add_y,a_sub_y,a_mul_y,a_div_y):
ai_add_yi = a+yi
ai_sub_yi = a-yi
ai_mul_yi = a*yi
ai_div_yi = a/yi
self.assertAlmostEqual(lo(z1), lo(ai_add_yi), places=7)
self.assertAlmostEqual(hi(z1), hi(ai_add_yi), places=7)
self.assertAlmostEqual(lo(z2), lo(ai_sub_yi), places=7)
self.assertAlmostEqual(hi(z2), hi(ai_sub_yi), places=7)
self.assertAlmostEqual(lo(z3), lo(ai_mul_yi), places=7)
self.assertAlmostEqual(hi(z3), hi(ai_mul_yi), places=7)
self.assertAlmostEqual(lo(z4), lo(ai_div_yi), places=7)
self.assertAlmostEqual(hi(z4), hi(ai_div_yi), places=7)
def test_four_operations_between_arrays_and_scalars(self):
"""
Test element-wise operations between array-like and scalar intervals.
"""
a = intervalise(pick_endpoints_at_random_uniform(n=1,left_bound=0.001,right_bound=1))
y = intervalise(pick_endpoints_at_random_uniform(shape=(10,3,3),left_bound=0.001))
y_add_a = y+a
y_sub_a = y-a
y_mul_a = y*a
y_div_a = y/a
for yi,z1,z2,z3,z4 in zip(y,y_add_a,y_sub_a,y_mul_a,y_div_a):
yi_add_ai = yi+a
yi_sub_ai = yi-a
yi_mul_ai = yi*a
yi_div_ai = yi/a
self.assertAlmostEqual(lo(z1), lo(yi_add_ai), places=7)
self.assertAlmostEqual(hi(z1), hi(yi_add_ai), places=7)
self.assertAlmostEqual(lo(z2), lo(yi_sub_ai), places=7)
self.assertAlmostEqual(hi(z2), hi(yi_sub_ai), places=7)
self.assertAlmostEqual(lo(z3), lo(yi_mul_ai), places=7)
self.assertAlmostEqual(hi(z3), hi(yi_mul_ai), places=7)
self.assertAlmostEqual(lo(z4), lo(yi_div_ai), places=7)
self.assertAlmostEqual(hi(z4), hi(yi_div_ai), places=7)
def test_four_operations_between_interval_and_numeric(self):
"""
Test element-wise operations between array-like and non-interval numbers.
"""
a = -10 + numpy.random.rand() * 20 # a random number between -10 and 10
y = intervalise(pick_endpoints_at_random_uniform(n=100,left_bound=0.001))
for yi in y:
yi_add_a = yi+a
yi_sub_a = yi-a
yi_mul_a = yi*a
yi_div_a = yi/a
yy = [lo(yi),hi(yi)]
c_add = [bj+a for bj in yy] # endpoints analysis
c_sub = [bj-a for bj in yy] # endpoints analysis
c_mul = [bj*a for bj in yy] # endpoints analysis
c_div = [bj/a for bj in yy] # endpoints analysis
ea_add = [min(c_add), max(c_add)]
ea_sub = [min(c_sub), max(c_sub)]
ea_mul = [min(c_mul), max(c_mul)]
ea_div = [min(c_div), max(c_div)]
self.assertAlmostEqual(lo(yi_add_a), ea_add[0], places=7)
self.assertAlmostEqual(hi(yi_add_a), ea_add[1], places=7)
self.assertAlmostEqual(lo(yi_sub_a), ea_sub[0], places=7)
self.assertAlmostEqual(hi(yi_sub_a), ea_sub[1], places=7)
self.assertAlmostEqual(lo(yi_mul_a), ea_mul[0], places=7)
self.assertAlmostEqual(hi(yi_mul_a), ea_mul[1], places=7)
self.assertAlmostEqual(lo(yi_div_a), ea_div[0], places=7)
self.assertAlmostEqual(hi(yi_div_a), ea_div[1], places=7)
def test_four_operations_between_arrays_and_numeric(self):
"""
Test element-wise operations between array-like and non-interval numbers.
"""
a = -10 + numpy.random.rand() * 20
y = intervalise(pick_endpoints_at_random_uniform(shape=(7,4,3),left_bound=0.001))
y_add_a = y+a
y_sub_a = y-a
y_mul_a = y*a
y_div_a = y/a
for yi,z1,z2,z3,z4 in zip(y,y_add_a,y_sub_a,y_mul_a,y_div_a):
yi_add_ai = yi+a
yi_sub_ai = yi-a
yi_mul_ai = yi*a
yi_div_ai = yi/a
self.assertAlmostEqual(lo(z1), lo(yi_add_ai), places=7)
self.assertAlmostEqual(hi(z1), hi(yi_add_ai), places=7)
self.assertAlmostEqual(lo(z2), lo(yi_sub_ai), places=7)
self.assertAlmostEqual(hi(z2), hi(yi_sub_ai), places=7)
self.assertAlmostEqual(lo(z3), lo(yi_mul_ai), places=7)
self.assertAlmostEqual(hi(z3), hi(yi_mul_ai), places=7)
self.assertAlmostEqual(lo(z4), lo(yi_div_ai), places=7)
self.assertAlmostEqual(hi(z4), hi(yi_div_ai), places=7)
def parsing_degenerate_intervals(self):
a = numpy.random.rand()
x = I(a)
self.assertEqual(lo(x), a)
self.assertEqual(hi(x), a)
if __name__ == '__main__':
# logging.basicConfig(stream=sys.stderr,level=logging.DEBUG)
# unittest.TextTestRunner().run(TestIntervalArithmetic())
# logging.basicConfig( stream=sys.stderr )
# logging.getLogger( "SomeTest.testSomething" ).setLevel( logging.DEBUG )
unittest.main() | [
"intervals.methods.lo",
"numpy.random.rand",
"intervals.methods.hi",
"unittest.main",
"intervals.number.Interval"
] | [((10586, 10601), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10599, 10601), False, 'import unittest\n'), ((10194, 10213), 'numpy.random.rand', 'numpy.random.rand', ([], {}), '()\n', (10211, 10213), False, 'import numpy\n'), ((10226, 10230), 'intervals.number.Interval', 'I', (['a'], {}), '(a)\n', (10227, 10230), True, 'from intervals.number import Interval as I\n'), ((10256, 10261), 'intervals.methods.lo', 'lo', (['x'], {}), '(x)\n', (10258, 10261), False, 'from intervals.methods import intervalise, lo, hi\n'), ((10291, 10296), 'intervals.methods.hi', 'hi', (['x'], {}), '(x)\n', (10293, 10296), False, 'from intervals.methods import intervalise, lo, hi\n'), ((904, 916), 'intervals.methods.lo', 'lo', (['xi_op_yi'], {}), '(xi_op_yi)\n', (906, 916), False, 'from intervals.methods import intervalise, lo, hi\n'), ((970, 982), 'intervals.methods.hi', 'hi', (['xi_op_yi'], {}), '(xi_op_yi)\n', (972, 982), False, 'from intervals.methods import intervalise, lo, hi\n'), ((1521, 1533), 'intervals.methods.lo', 'lo', (['xi_op_yi'], {}), '(xi_op_yi)\n', (1523, 1533), False, 'from intervals.methods import intervalise, lo, hi\n'), ((1587, 1599), 'intervals.methods.hi', 'hi', (['xi_op_yi'], {}), '(xi_op_yi)\n', (1589, 1599), False, 'from intervals.methods import intervalise, lo, hi\n'), ((2144, 2156), 'intervals.methods.lo', 'lo', (['xi_op_yi'], {}), '(xi_op_yi)\n', (2146, 2156), False, 'from intervals.methods import intervalise, lo, hi\n'), ((2210, 2222), 'intervals.methods.hi', 'hi', (['xi_op_yi'], {}), '(xi_op_yi)\n', (2212, 2222), False, 'from intervals.methods import intervalise, lo, hi\n'), ((2774, 2788), 'intervals.methods.lo', 'lo', (['xi_plus_yi'], {}), '(xi_plus_yi)\n', (2776, 2788), False, 'from intervals.methods import intervalise, lo, hi\n'), ((2842, 2856), 'intervals.methods.hi', 'hi', (['xi_plus_yi'], {}), '(xi_plus_yi)\n', (2844, 2856), False, 'from intervals.methods import intervalise, lo, hi\n'), ((3524, 3530), 'intervals.methods.lo', 'lo', (['z1'], {}), '(z1)\n', (3526, 3530), False, 'from intervals.methods import intervalise, lo, hi\n'), ((3532, 3545), 'intervals.methods.lo', 'lo', (['xi_add_yi'], {}), '(xi_add_yi)\n', (3534, 3545), False, 'from intervals.methods import intervalise, lo, hi\n'), ((3592, 3598), 'intervals.methods.hi', 'hi', (['z1'], {}), '(z1)\n', (3594, 3598), False, 'from intervals.methods import intervalise, lo, hi\n'), ((3600, 3613), 'intervals.methods.hi', 'hi', (['xi_add_yi'], {}), '(xi_add_yi)\n', (3602, 3613), False, 'from intervals.methods import intervalise, lo, hi\n'), ((3660, 3666), 'intervals.methods.lo', 'lo', (['z2'], {}), '(z2)\n', (3662, 3666), False, 'from intervals.methods import intervalise, lo, hi\n'), ((3668, 3681), 'intervals.methods.lo', 'lo', (['xi_sub_yi'], {}), '(xi_sub_yi)\n', (3670, 3681), False, 'from intervals.methods import intervalise, lo, hi\n'), ((3728, 3734), 'intervals.methods.hi', 'hi', (['z2'], {}), '(z2)\n', (3730, 3734), False, 'from intervals.methods import intervalise, lo, hi\n'), ((3736, 3749), 'intervals.methods.hi', 'hi', (['xi_sub_yi'], {}), '(xi_sub_yi)\n', (3738, 3749), False, 'from intervals.methods import intervalise, lo, hi\n'), ((3796, 3802), 'intervals.methods.lo', 'lo', (['z3'], {}), '(z3)\n', (3798, 3802), False, 'from intervals.methods import intervalise, lo, hi\n'), ((3804, 3817), 'intervals.methods.lo', 'lo', (['xi_mul_yi'], {}), '(xi_mul_yi)\n', (3806, 3817), False, 'from intervals.methods import intervalise, lo, hi\n'), ((3864, 3870), 'intervals.methods.hi', 'hi', (['z3'], {}), '(z3)\n', (3866, 3870), False, 'from intervals.methods import intervalise, lo, hi\n'), ((3872, 3885), 'intervals.methods.hi', 'hi', (['xi_mul_yi'], {}), '(xi_mul_yi)\n', (3874, 3885), False, 'from intervals.methods import intervalise, lo, hi\n'), ((3932, 3938), 'intervals.methods.lo', 'lo', (['z4'], {}), '(z4)\n', (3934, 3938), False, 'from intervals.methods import intervalise, lo, hi\n'), ((3940, 3953), 'intervals.methods.lo', 'lo', (['xi_div_yi'], {}), '(xi_div_yi)\n', (3942, 3953), False, 'from intervals.methods import intervalise, lo, hi\n'), ((4000, 4006), 'intervals.methods.hi', 'hi', (['z4'], {}), '(z4)\n', (4002, 4006), False, 'from intervals.methods import intervalise, lo, hi\n'), ((4008, 4021), 'intervals.methods.hi', 'hi', (['xi_div_yi'], {}), '(xi_div_yi)\n', (4010, 4021), False, 'from intervals.methods import intervalise, lo, hi\n'), ((4686, 4692), 'intervals.methods.lo', 'lo', (['z1'], {}), '(z1)\n', (4688, 4692), False, 'from intervals.methods import intervalise, lo, hi\n'), ((4694, 4707), 'intervals.methods.lo', 'lo', (['xi_add_yi'], {}), '(xi_add_yi)\n', (4696, 4707), False, 'from intervals.methods import intervalise, lo, hi\n'), ((4754, 4760), 'intervals.methods.hi', 'hi', (['z1'], {}), '(z1)\n', (4756, 4760), False, 'from intervals.methods import intervalise, lo, hi\n'), ((4762, 4775), 'intervals.methods.hi', 'hi', (['xi_add_yi'], {}), '(xi_add_yi)\n', (4764, 4775), False, 'from intervals.methods import intervalise, lo, hi\n'), ((4822, 4828), 'intervals.methods.lo', 'lo', (['z2'], {}), '(z2)\n', (4824, 4828), False, 'from intervals.methods import intervalise, lo, hi\n'), ((4830, 4843), 'intervals.methods.lo', 'lo', (['xi_sub_yi'], {}), '(xi_sub_yi)\n', (4832, 4843), False, 'from intervals.methods import intervalise, lo, hi\n'), ((4890, 4896), 'intervals.methods.hi', 'hi', (['z2'], {}), '(z2)\n', (4892, 4896), False, 'from intervals.methods import intervalise, lo, hi\n'), ((4898, 4911), 'intervals.methods.hi', 'hi', (['xi_sub_yi'], {}), '(xi_sub_yi)\n', (4900, 4911), False, 'from intervals.methods import intervalise, lo, hi\n'), ((4958, 4964), 'intervals.methods.lo', 'lo', (['z3'], {}), '(z3)\n', (4960, 4964), False, 'from intervals.methods import intervalise, lo, hi\n'), ((4966, 4979), 'intervals.methods.lo', 'lo', (['xi_mul_yi'], {}), '(xi_mul_yi)\n', (4968, 4979), False, 'from intervals.methods import intervalise, lo, hi\n'), ((5026, 5032), 'intervals.methods.hi', 'hi', (['z3'], {}), '(z3)\n', (5028, 5032), False, 'from intervals.methods import intervalise, lo, hi\n'), ((5034, 5047), 'intervals.methods.hi', 'hi', (['xi_mul_yi'], {}), '(xi_mul_yi)\n', (5036, 5047), False, 'from intervals.methods import intervalise, lo, hi\n'), ((5094, 5100), 'intervals.methods.lo', 'lo', (['z4'], {}), '(z4)\n', (5096, 5100), False, 'from intervals.methods import intervalise, lo, hi\n'), ((5102, 5115), 'intervals.methods.lo', 'lo', (['xi_div_yi'], {}), '(xi_div_yi)\n', (5104, 5115), False, 'from intervals.methods import intervalise, lo, hi\n'), ((5162, 5168), 'intervals.methods.hi', 'hi', (['z4'], {}), '(z4)\n', (5164, 5168), False, 'from intervals.methods import intervalise, lo, hi\n'), ((5170, 5183), 'intervals.methods.hi', 'hi', (['xi_div_yi'], {}), '(xi_div_yi)\n', (5172, 5183), False, 'from intervals.methods import intervalise, lo, hi\n'), ((5850, 5856), 'intervals.methods.lo', 'lo', (['z1'], {}), '(z1)\n', (5852, 5856), False, 'from intervals.methods import intervalise, lo, hi\n'), ((5858, 5871), 'intervals.methods.lo', 'lo', (['ai_add_yi'], {}), '(ai_add_yi)\n', (5860, 5871), False, 'from intervals.methods import intervalise, lo, hi\n'), ((5918, 5924), 'intervals.methods.hi', 'hi', (['z1'], {}), '(z1)\n', (5920, 5924), False, 'from intervals.methods import intervalise, lo, hi\n'), ((5926, 5939), 'intervals.methods.hi', 'hi', (['ai_add_yi'], {}), '(ai_add_yi)\n', (5928, 5939), False, 'from intervals.methods import intervalise, lo, hi\n'), ((5986, 5992), 'intervals.methods.lo', 'lo', (['z2'], {}), '(z2)\n', (5988, 5992), False, 'from intervals.methods import intervalise, lo, hi\n'), ((5994, 6007), 'intervals.methods.lo', 'lo', (['ai_sub_yi'], {}), '(ai_sub_yi)\n', (5996, 6007), False, 'from intervals.methods import intervalise, lo, hi\n'), ((6054, 6060), 'intervals.methods.hi', 'hi', (['z2'], {}), '(z2)\n', (6056, 6060), False, 'from intervals.methods import intervalise, lo, hi\n'), ((6062, 6075), 'intervals.methods.hi', 'hi', (['ai_sub_yi'], {}), '(ai_sub_yi)\n', (6064, 6075), False, 'from intervals.methods import intervalise, lo, hi\n'), ((6122, 6128), 'intervals.methods.lo', 'lo', (['z3'], {}), '(z3)\n', (6124, 6128), False, 'from intervals.methods import intervalise, lo, hi\n'), ((6130, 6143), 'intervals.methods.lo', 'lo', (['ai_mul_yi'], {}), '(ai_mul_yi)\n', (6132, 6143), False, 'from intervals.methods import intervalise, lo, hi\n'), ((6190, 6196), 'intervals.methods.hi', 'hi', (['z3'], {}), '(z3)\n', (6192, 6196), False, 'from intervals.methods import intervalise, lo, hi\n'), ((6198, 6211), 'intervals.methods.hi', 'hi', (['ai_mul_yi'], {}), '(ai_mul_yi)\n', (6200, 6211), False, 'from intervals.methods import intervalise, lo, hi\n'), ((6258, 6264), 'intervals.methods.lo', 'lo', (['z4'], {}), '(z4)\n', (6260, 6264), False, 'from intervals.methods import intervalise, lo, hi\n'), ((6266, 6279), 'intervals.methods.lo', 'lo', (['ai_div_yi'], {}), '(ai_div_yi)\n', (6268, 6279), False, 'from intervals.methods import intervalise, lo, hi\n'), ((6326, 6332), 'intervals.methods.hi', 'hi', (['z4'], {}), '(z4)\n', (6328, 6332), False, 'from intervals.methods import intervalise, lo, hi\n'), ((6334, 6347), 'intervals.methods.hi', 'hi', (['ai_div_yi'], {}), '(ai_div_yi)\n', (6336, 6347), False, 'from intervals.methods import intervalise, lo, hi\n'), ((7018, 7024), 'intervals.methods.lo', 'lo', (['z1'], {}), '(z1)\n', (7020, 7024), False, 'from intervals.methods import intervalise, lo, hi\n'), ((7026, 7039), 'intervals.methods.lo', 'lo', (['yi_add_ai'], {}), '(yi_add_ai)\n', (7028, 7039), False, 'from intervals.methods import intervalise, lo, hi\n'), ((7086, 7092), 'intervals.methods.hi', 'hi', (['z1'], {}), '(z1)\n', (7088, 7092), False, 'from intervals.methods import intervalise, lo, hi\n'), ((7094, 7107), 'intervals.methods.hi', 'hi', (['yi_add_ai'], {}), '(yi_add_ai)\n', (7096, 7107), False, 'from intervals.methods import intervalise, lo, hi\n'), ((7154, 7160), 'intervals.methods.lo', 'lo', (['z2'], {}), '(z2)\n', (7156, 7160), False, 'from intervals.methods import intervalise, lo, hi\n'), ((7162, 7175), 'intervals.methods.lo', 'lo', (['yi_sub_ai'], {}), '(yi_sub_ai)\n', (7164, 7175), False, 'from intervals.methods import intervalise, lo, hi\n'), ((7222, 7228), 'intervals.methods.hi', 'hi', (['z2'], {}), '(z2)\n', (7224, 7228), False, 'from intervals.methods import intervalise, lo, hi\n'), ((7230, 7243), 'intervals.methods.hi', 'hi', (['yi_sub_ai'], {}), '(yi_sub_ai)\n', (7232, 7243), False, 'from intervals.methods import intervalise, lo, hi\n'), ((7290, 7296), 'intervals.methods.lo', 'lo', (['z3'], {}), '(z3)\n', (7292, 7296), False, 'from intervals.methods import intervalise, lo, hi\n'), ((7298, 7311), 'intervals.methods.lo', 'lo', (['yi_mul_ai'], {}), '(yi_mul_ai)\n', (7300, 7311), False, 'from intervals.methods import intervalise, lo, hi\n'), ((7358, 7364), 'intervals.methods.hi', 'hi', (['z3'], {}), '(z3)\n', (7360, 7364), False, 'from intervals.methods import intervalise, lo, hi\n'), ((7366, 7379), 'intervals.methods.hi', 'hi', (['yi_mul_ai'], {}), '(yi_mul_ai)\n', (7368, 7379), False, 'from intervals.methods import intervalise, lo, hi\n'), ((7426, 7432), 'intervals.methods.lo', 'lo', (['z4'], {}), '(z4)\n', (7428, 7432), False, 'from intervals.methods import intervalise, lo, hi\n'), ((7434, 7447), 'intervals.methods.lo', 'lo', (['yi_div_ai'], {}), '(yi_div_ai)\n', (7436, 7447), False, 'from intervals.methods import intervalise, lo, hi\n'), ((7494, 7500), 'intervals.methods.hi', 'hi', (['z4'], {}), '(z4)\n', (7496, 7500), False, 'from intervals.methods import intervalise, lo, hi\n'), ((7502, 7515), 'intervals.methods.hi', 'hi', (['yi_div_ai'], {}), '(yi_div_ai)\n', (7504, 7515), False, 'from intervals.methods import intervalise, lo, hi\n'), ((7716, 7735), 'numpy.random.rand', 'numpy.random.rand', ([], {}), '()\n', (7733, 7735), False, 'import numpy\n'), ((8011, 8017), 'intervals.methods.lo', 'lo', (['yi'], {}), '(yi)\n', (8013, 8017), False, 'from intervals.methods import intervalise, lo, hi\n'), ((8018, 8024), 'intervals.methods.hi', 'hi', (['yi'], {}), '(yi)\n', (8020, 8024), False, 'from intervals.methods import intervalise, lo, hi\n'), ((8493, 8505), 'intervals.methods.lo', 'lo', (['yi_add_a'], {}), '(yi_add_a)\n', (8495, 8505), False, 'from intervals.methods import intervalise, lo, hi\n'), ((8563, 8575), 'intervals.methods.hi', 'hi', (['yi_add_a'], {}), '(yi_add_a)\n', (8565, 8575), False, 'from intervals.methods import intervalise, lo, hi\n'), ((8633, 8645), 'intervals.methods.lo', 'lo', (['yi_sub_a'], {}), '(yi_sub_a)\n', (8635, 8645), False, 'from intervals.methods import intervalise, lo, hi\n'), ((8703, 8715), 'intervals.methods.hi', 'hi', (['yi_sub_a'], {}), '(yi_sub_a)\n', (8705, 8715), False, 'from intervals.methods import intervalise, lo, hi\n'), ((8773, 8785), 'intervals.methods.lo', 'lo', (['yi_mul_a'], {}), '(yi_mul_a)\n', (8775, 8785), False, 'from intervals.methods import intervalise, lo, hi\n'), ((8843, 8855), 'intervals.methods.hi', 'hi', (['yi_mul_a'], {}), '(yi_mul_a)\n', (8845, 8855), False, 'from intervals.methods import intervalise, lo, hi\n'), ((8913, 8925), 'intervals.methods.lo', 'lo', (['yi_div_a'], {}), '(yi_div_a)\n', (8915, 8925), False, 'from intervals.methods import intervalise, lo, hi\n'), ((8983, 8995), 'intervals.methods.hi', 'hi', (['yi_div_a'], {}), '(yi_div_a)\n', (8985, 8995), False, 'from intervals.methods import intervalise, lo, hi\n'), ((9205, 9224), 'numpy.random.rand', 'numpy.random.rand', ([], {}), '()\n', (9222, 9224), False, 'import numpy\n'), ((9629, 9635), 'intervals.methods.lo', 'lo', (['z1'], {}), '(z1)\n', (9631, 9635), False, 'from intervals.methods import intervalise, lo, hi\n'), ((9637, 9650), 'intervals.methods.lo', 'lo', (['yi_add_ai'], {}), '(yi_add_ai)\n', (9639, 9650), False, 'from intervals.methods import intervalise, lo, hi\n'), ((9697, 9703), 'intervals.methods.hi', 'hi', (['z1'], {}), '(z1)\n', (9699, 9703), False, 'from intervals.methods import intervalise, lo, hi\n'), ((9705, 9718), 'intervals.methods.hi', 'hi', (['yi_add_ai'], {}), '(yi_add_ai)\n', (9707, 9718), False, 'from intervals.methods import intervalise, lo, hi\n'), ((9765, 9771), 'intervals.methods.lo', 'lo', (['z2'], {}), '(z2)\n', (9767, 9771), False, 'from intervals.methods import intervalise, lo, hi\n'), ((9773, 9786), 'intervals.methods.lo', 'lo', (['yi_sub_ai'], {}), '(yi_sub_ai)\n', (9775, 9786), False, 'from intervals.methods import intervalise, lo, hi\n'), ((9833, 9839), 'intervals.methods.hi', 'hi', (['z2'], {}), '(z2)\n', (9835, 9839), False, 'from intervals.methods import intervalise, lo, hi\n'), ((9841, 9854), 'intervals.methods.hi', 'hi', (['yi_sub_ai'], {}), '(yi_sub_ai)\n', (9843, 9854), False, 'from intervals.methods import intervalise, lo, hi\n'), ((9901, 9907), 'intervals.methods.lo', 'lo', (['z3'], {}), '(z3)\n', (9903, 9907), False, 'from intervals.methods import intervalise, lo, hi\n'), ((9909, 9922), 'intervals.methods.lo', 'lo', (['yi_mul_ai'], {}), '(yi_mul_ai)\n', (9911, 9922), False, 'from intervals.methods import intervalise, lo, hi\n'), ((9969, 9975), 'intervals.methods.hi', 'hi', (['z3'], {}), '(z3)\n', (9971, 9975), False, 'from intervals.methods import intervalise, lo, hi\n'), ((9977, 9990), 'intervals.methods.hi', 'hi', (['yi_mul_ai'], {}), '(yi_mul_ai)\n', (9979, 9990), False, 'from intervals.methods import intervalise, lo, hi\n'), ((10037, 10043), 'intervals.methods.lo', 'lo', (['z4'], {}), '(z4)\n', (10039, 10043), False, 'from intervals.methods import intervalise, lo, hi\n'), ((10045, 10058), 'intervals.methods.lo', 'lo', (['yi_div_ai'], {}), '(yi_div_ai)\n', (10047, 10058), False, 'from intervals.methods import intervalise, lo, hi\n'), ((10105, 10111), 'intervals.methods.hi', 'hi', (['z4'], {}), '(z4)\n', (10107, 10111), False, 'from intervals.methods import intervalise, lo, hi\n'), ((10113, 10126), 'intervals.methods.hi', 'hi', (['yi_div_ai'], {}), '(yi_div_ai)\n', (10115, 10126), False, 'from intervals.methods import intervalise, lo, hi\n'), ((732, 738), 'intervals.methods.lo', 'lo', (['xi'], {}), '(xi)\n', (734, 738), False, 'from intervals.methods import intervalise, lo, hi\n'), ((739, 745), 'intervals.methods.hi', 'hi', (['xi'], {}), '(xi)\n', (741, 745), False, 'from intervals.methods import intervalise, lo, hi\n'), ((749, 755), 'intervals.methods.lo', 'lo', (['yi'], {}), '(yi)\n', (751, 755), False, 'from intervals.methods import intervalise, lo, hi\n'), ((756, 762), 'intervals.methods.hi', 'hi', (['yi'], {}), '(yi)\n', (758, 762), False, 'from intervals.methods import intervalise, lo, hi\n'), ((1349, 1355), 'intervals.methods.lo', 'lo', (['xi'], {}), '(xi)\n', (1351, 1355), False, 'from intervals.methods import intervalise, lo, hi\n'), ((1356, 1362), 'intervals.methods.hi', 'hi', (['xi'], {}), '(xi)\n', (1358, 1362), False, 'from intervals.methods import intervalise, lo, hi\n'), ((1366, 1372), 'intervals.methods.lo', 'lo', (['yi'], {}), '(yi)\n', (1368, 1372), False, 'from intervals.methods import intervalise, lo, hi\n'), ((1373, 1379), 'intervals.methods.hi', 'hi', (['yi'], {}), '(yi)\n', (1375, 1379), False, 'from intervals.methods import intervalise, lo, hi\n'), ((1972, 1978), 'intervals.methods.lo', 'lo', (['xi'], {}), '(xi)\n', (1974, 1978), False, 'from intervals.methods import intervalise, lo, hi\n'), ((1979, 1985), 'intervals.methods.hi', 'hi', (['xi'], {}), '(xi)\n', (1981, 1985), False, 'from intervals.methods import intervalise, lo, hi\n'), ((1989, 1995), 'intervals.methods.lo', 'lo', (['yi'], {}), '(yi)\n', (1991, 1995), False, 'from intervals.methods import intervalise, lo, hi\n'), ((1996, 2002), 'intervals.methods.hi', 'hi', (['yi'], {}), '(yi)\n', (1998, 2002), False, 'from intervals.methods import intervalise, lo, hi\n'), ((2602, 2608), 'intervals.methods.lo', 'lo', (['xi'], {}), '(xi)\n', (2604, 2608), False, 'from intervals.methods import intervalise, lo, hi\n'), ((2609, 2615), 'intervals.methods.hi', 'hi', (['xi'], {}), '(xi)\n', (2611, 2615), False, 'from intervals.methods import intervalise, lo, hi\n'), ((2619, 2625), 'intervals.methods.lo', 'lo', (['yi'], {}), '(yi)\n', (2621, 2625), False, 'from intervals.methods import intervalise, lo, hi\n'), ((2626, 2632), 'intervals.methods.hi', 'hi', (['yi'], {}), '(yi)\n', (2628, 2632), False, 'from intervals.methods import intervalise, lo, hi\n')] |
# MNIST image and label reader & dataset provider for tensorflow networks
# <NAME>
# <EMAIL>
import numpy as np
import struct
from PIL import Image
# read MNIST dataset - image
def read_image(filename):
raw = open(filename, 'rb')
magic_num = struct.unpack("i", raw.read(4)[::-1])[0]
if magic_num != 2051:
print(filename, ' could be wrong file, failed to read')
return
item_num = struct.unpack("i", raw.read(4)[::-1])[0]
row_num = struct.unpack("i", raw.read(4)[::-1])[0]
col_num = struct.unpack("i", raw.read(4)[::-1])[0]
image = np.zeros([item_num, row_num * col_num])
for i in range(0, item_num):
image[i, :] = np.fromstring(raw.read(row_num * col_num), dtype=np.uint8)
image = image.reshape(-1, row_num, col_num, 1)
return image
# read MNIST dataset - label
def read_label(filename):
raw = open(filename, 'rb')
magic_num = struct.unpack("i", raw.read(4)[::-1])[0]
if magic_num != 2049:
print(filename, ' could be wrong file, failed to read')
return
item_num = struct.unpack("i", raw.read(4)[::-1])[0]
label = np.zeros([item_num, 10])
class_ = []
for i in range(0, item_num):
temp = np.fromstring(raw.read(1), dtype=np.uint8)
label[i][temp] = 1
if (temp in class_) == False:
class_.append(temp)
class_num = len(np.unique(class_))
return label
def saveas_image(image_dir, label_dir, output_dir):
image = read_image(image_dir)
label = read_label(label_dir)
for i in range(image.shape[0]):
Image.fromarray(image[i]).save(output_dir + '/images/{0}.png'.format(str(i + 1)))
f = open(output_dir + '/labels/{0}.txt'.format(str(i+1)), 'w')
f.write(str(np.argmax(label[i])))
f.close()
if __name__ == '__main__':
image_dir = './train-images-idx3-ubyte'
label_dir = './train-labels-idx1-ubyte'
saveas_image(image_dir, label_dir, './train') | [
"numpy.argmax",
"numpy.zeros",
"numpy.unique",
"PIL.Image.fromarray"
] | [((594, 633), 'numpy.zeros', 'np.zeros', (['[item_num, row_num * col_num]'], {}), '([item_num, row_num * col_num])\n', (602, 633), True, 'import numpy as np\n'), ((1147, 1171), 'numpy.zeros', 'np.zeros', (['[item_num, 10]'], {}), '([item_num, 10])\n', (1155, 1171), True, 'import numpy as np\n'), ((1403, 1420), 'numpy.unique', 'np.unique', (['class_'], {}), '(class_)\n', (1412, 1420), True, 'import numpy as np\n'), ((1613, 1638), 'PIL.Image.fromarray', 'Image.fromarray', (['image[i]'], {}), '(image[i])\n', (1628, 1638), False, 'from PIL import Image\n'), ((1788, 1807), 'numpy.argmax', 'np.argmax', (['label[i]'], {}), '(label[i])\n', (1797, 1807), True, 'import numpy as np\n')] |
import os
import sys
from difflib import SequenceMatcher
from pyproj import Proj, transform
import numpy as np
import pandas as pd
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
def extract_loc(t_array, gps_data):
_xy = gps_data[[0, -1]]
pct = ((t_array * 1.0) / np.max(t_array))[:, np.newaxis]
pos = pct * (_xy[1] - _xy[0])[np.newaxis, :] + _xy[0]
return pos
def proc_detection(c_angle, view_extend_dist, f_lpr, f_traj, f_gps):
# get walking trajectory
WGS = Proj(init="epsg:4326")
utm_11 = Proj(init="epsg:26911")
df = pd.read_csv(f_lpr)
df_loc = pd.read_csv(f_gps, names=['lat', 'lon'])
xy = df_loc.apply(lambda r: transform(WGS, utm_11, r['lon'], r['lat']),
axis=1)
df_loc['x'] = xy.apply(lambda x: x[0])
df_loc['y'] = xy.apply(lambda x: x[1])
gps = df_loc[['x', 'y']].values
ts = df['time'].values
pos_intp = extract_loc(ts, gps)
# get camera location for each video frame image
traj = np.apply_along_axis(lambda x: transform(utm_11, WGS, x[0], x[1]),
1, np.unique(pos_intp, axis=0))
traj[:, [0, 1]] = traj[:, [1, 0]]
traj = traj[::-1]
traj = np.append(traj, np.unique(ts)[:, np.newaxis], axis=1)
traj = pd.DataFrame(traj, columns=['lat', 'lon', 't_vid'])
with open(f_traj, 'w') as wrt:
str_l = []
for row in traj.values:
str_l.append("%f,%f" % (row[1], row[0]))
wrt.write("%s" % ';'.join(str_l))
# get vehical location for each image
unit = pos_intp[-1] - pos_intp[0]
unit = unit[:, np.newaxis]
unit = unit / np.linalg.norm(unit)
dist = df['dist'].values
dist[dist == -1.0] = np.nan
dist += view_extend_dist
th = np.radians(c_angle)
rot_M = np.array([[np.cos(th), -np.sin(th)], [np.sin(th), np.cos(th)]])
rot_unit = np.dot(rot_M, unit)
pos_car = np.dot(dist[:, np.newaxis], rot_unit.T) + pos_intp
df_car = pd.DataFrame(pos_car, columns=['x', 'y'])
# update license plate by finding connect component in a similarity graph
df = pd.concat([df, df_car], axis=1)
df = df[~df['plate'].isna()]
plates = df['plate'].unique()
plate_count = df.groupby(['plate'])['time'].count()
# build graph as edge list
lp_rel = []
for lp_1 in plates:
m_score = []
for lp_2 in plates:
if lp_1 == lp_2:
m_score.append(0.0)
else:
scr = similar(lp_1, lp_2)
if scr > 0.5:
m_score.append(scr)
else:
m_score.append(-1.0)
lp_m = plates[np.argmax(m_score)]
if ((lp_1, lp_m) not in lp_rel) and ((lp_m, lp_1) not in lp_rel):
lp_rel.append((lp_1, lp_m))
# find connected components
con_comps = []
for lp_con in lp_rel:
new_comp = set()
new_con_comps = []
for comp in con_comps:
if (lp_con[0] in comp) or (lp_con[1] in comp):
new_comp |= comp
else:
new_con_comps.append(comp)
new_comp |= set(lp_con)
new_con_comps.append(new_comp)
con_comps = new_con_comps
# license plate with maximum occurence is the plate
# for the component (a.k.a. vehicle)
map_lp = {}
for comp in con_comps:
comp = list(comp)
counts = plate_count.loc[comp].values
lp_comp = comp[np.argmax(counts)]
for lp in comp:
map_lp[lp] = lp_comp
for index, row in df.iterrows():
p = df.loc[index, 'plate']
df.loc[index, 'plate'] = map_lp[p]
print("Raw VS. cleaned: %d VS. %d" % (plates.shape[0],
df['plate'].unique().shape[0]))
print("Raw")
print(plates,'\n')
print("Cleaned")
print(df['plate'].unique())
# update vehicle location based on updated plate number detection result
gp = df.groupby(['plate'])
result = gp['time'].agg(['first', 'last'])
result['plate'] = gp['plate'].agg(lambda x: x.value_counts().index[0])
result['x'] = gp['x'].mean()
result['y'] = gp['y'].mean()
ll = result.apply(lambda r: transform(utm_11, WGS, r['x'], r['y']), axis=1)
result['lat'] = ll.apply(lambda x: x[1])
result['lon'] = ll.apply(lambda x: x[0])
result = result.sort_values('first')
# return final result
result['img'] = gp['img'].first()
result = result[['plate', 'first', 'last', 'lat', 'lon', 'img']]
result.columns = ['plate', 'start_vid_t', 'end_vid_t',
'lat', 'lon', 'img_path']
result = result.sort_values('start_vid_t')
return result
if __name__ == "__main__":
if len(sys.argv) == 1:
camera_angle = 290 #70
view_dist_ext = 1.5
char_sim = 0.5
f_lprs = 'tmp2/all_frames_lps.csv'
f_gps = 'test3.gps'
lpr_out_f = 'test3.lpr'
traj_out_f = 'test3.traj'
else:
camera_angle = int(sys.argv[1])
view_dist_ext = int(sys.argv[2])
char_sim = float(sys.argv[3])
f_lprs = sys.argv[4]
f_gps = sys.argv[5]
lpr_out_f = sys.argv[6]
traj_out_f = sys.argv[7]
root_dir = os.path.normpath(os.path.join(__file__, '../' * 2))
f_lprs = os.path.join(root_dir, f_lprs)
f_gps = os.path.join(root_dir, f_gps)
lpr_out_f = os.path.join(root_dir, lpr_out_f)
traj_out_f = os.path.join(root_dir, traj_out_f)
res = proc_detection(
camera_angle, view_dist_ext, f_lprs, traj_out_f, f_gps)
res.to_csv(lpr_out_f, index=False)
| [
"numpy.radians",
"numpy.unique",
"pandas.read_csv",
"numpy.sin",
"difflib.SequenceMatcher",
"os.path.join",
"pyproj.transform",
"numpy.argmax",
"numpy.max",
"numpy.dot",
"numpy.cos",
"pyproj.Proj",
"numpy.linalg.norm",
"pandas.DataFrame",
"pandas.concat"
] | [((509, 531), 'pyproj.Proj', 'Proj', ([], {'init': '"""epsg:4326"""'}), "(init='epsg:4326')\n", (513, 531), False, 'from pyproj import Proj, transform\n'), ((545, 568), 'pyproj.Proj', 'Proj', ([], {'init': '"""epsg:26911"""'}), "(init='epsg:26911')\n", (549, 568), False, 'from pyproj import Proj, transform\n'), ((578, 596), 'pandas.read_csv', 'pd.read_csv', (['f_lpr'], {}), '(f_lpr)\n', (589, 596), True, 'import pandas as pd\n'), ((610, 650), 'pandas.read_csv', 'pd.read_csv', (['f_gps'], {'names': "['lat', 'lon']"}), "(f_gps, names=['lat', 'lon'])\n", (621, 650), True, 'import pandas as pd\n'), ((1272, 1323), 'pandas.DataFrame', 'pd.DataFrame', (['traj'], {'columns': "['lat', 'lon', 't_vid']"}), "(traj, columns=['lat', 'lon', 't_vid'])\n", (1284, 1323), True, 'import pandas as pd\n'), ((1755, 1774), 'numpy.radians', 'np.radians', (['c_angle'], {}), '(c_angle)\n', (1765, 1774), True, 'import numpy as np\n'), ((1866, 1885), 'numpy.dot', 'np.dot', (['rot_M', 'unit'], {}), '(rot_M, unit)\n', (1872, 1885), True, 'import numpy as np\n'), ((1964, 2005), 'pandas.DataFrame', 'pd.DataFrame', (['pos_car'], {'columns': "['x', 'y']"}), "(pos_car, columns=['x', 'y'])\n", (1976, 2005), True, 'import pandas as pd\n'), ((2094, 2125), 'pandas.concat', 'pd.concat', (['[df, df_car]'], {'axis': '(1)'}), '([df, df_car], axis=1)\n', (2103, 2125), True, 'import pandas as pd\n'), ((5274, 5304), 'os.path.join', 'os.path.join', (['root_dir', 'f_lprs'], {}), '(root_dir, f_lprs)\n', (5286, 5304), False, 'import os\n'), ((5317, 5346), 'os.path.join', 'os.path.join', (['root_dir', 'f_gps'], {}), '(root_dir, f_gps)\n', (5329, 5346), False, 'import os\n'), ((5363, 5396), 'os.path.join', 'os.path.join', (['root_dir', 'lpr_out_f'], {}), '(root_dir, lpr_out_f)\n', (5375, 5396), False, 'import os\n'), ((5414, 5448), 'os.path.join', 'os.path.join', (['root_dir', 'traj_out_f'], {}), '(root_dir, traj_out_f)\n', (5426, 5448), False, 'import os\n'), ((1107, 1134), 'numpy.unique', 'np.unique', (['pos_intp'], {'axis': '(0)'}), '(pos_intp, axis=0)\n', (1116, 1134), True, 'import numpy as np\n'), ((1635, 1655), 'numpy.linalg.norm', 'np.linalg.norm', (['unit'], {}), '(unit)\n', (1649, 1655), True, 'import numpy as np\n'), ((1900, 1939), 'numpy.dot', 'np.dot', (['dist[:, np.newaxis]', 'rot_unit.T'], {}), '(dist[:, np.newaxis], rot_unit.T)\n', (1906, 1939), True, 'import numpy as np\n'), ((5226, 5259), 'os.path.join', 'os.path.join', (['__file__', "('../' * 2)"], {}), "(__file__, '../' * 2)\n", (5238, 5259), False, 'import os\n'), ((163, 190), 'difflib.SequenceMatcher', 'SequenceMatcher', (['None', 'a', 'b'], {}), '(None, a, b)\n', (178, 190), False, 'from difflib import SequenceMatcher\n'), ((294, 309), 'numpy.max', 'np.max', (['t_array'], {}), '(t_array)\n', (300, 309), True, 'import numpy as np\n'), ((683, 725), 'pyproj.transform', 'transform', (['WGS', 'utm_11', "r['lon']", "r['lat']"], {}), "(WGS, utm_11, r['lon'], r['lat'])\n", (692, 725), False, 'from pyproj import Proj, transform\n'), ((1037, 1071), 'pyproj.transform', 'transform', (['utm_11', 'WGS', 'x[0]', 'x[1]'], {}), '(utm_11, WGS, x[0], x[1])\n', (1046, 1071), False, 'from pyproj import Proj, transform\n'), ((1223, 1236), 'numpy.unique', 'np.unique', (['ts'], {}), '(ts)\n', (1232, 1236), True, 'import numpy as np\n'), ((2649, 2667), 'numpy.argmax', 'np.argmax', (['m_score'], {}), '(m_score)\n', (2658, 2667), True, 'import numpy as np\n'), ((3436, 3453), 'numpy.argmax', 'np.argmax', (['counts'], {}), '(counts)\n', (3445, 3453), True, 'import numpy as np\n'), ((4182, 4220), 'pyproj.transform', 'transform', (['utm_11', 'WGS', "r['x']", "r['y']"], {}), "(utm_11, WGS, r['x'], r['y'])\n", (4191, 4220), False, 'from pyproj import Proj, transform\n'), ((1798, 1808), 'numpy.cos', 'np.cos', (['th'], {}), '(th)\n', (1804, 1808), True, 'import numpy as np\n'), ((1825, 1835), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (1831, 1835), True, 'import numpy as np\n'), ((1837, 1847), 'numpy.cos', 'np.cos', (['th'], {}), '(th)\n', (1843, 1847), True, 'import numpy as np\n'), ((1811, 1821), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (1817, 1821), True, 'import numpy as np\n')] |
import numpy as np
from scipy.stats import truncnorm, norm
def soft_threshold(r, gamma):
"""
soft-thresholding function
"""
return np.maximum(np.abs(r) - gamma, 0.0) * np.sign(r)
def df(r, gamma):
"""
divergence-free function
"""
eta = soft_threshold(r, gamma)
return eta - np.mean(eta != 0) * r
def GCAMP(w, beta, log=False):
shita = 0.7
communication_cost = 0
P, N, _ = w.shape
T = beta * shita / (P-1)
R = np.zeros((P, N, 1))
z = np.zeros((N, 1))
#STEP1
for p in range(1, P):
R[p] = np.abs(w[p]) > T
candidate = np.where(R[p])[0]
for n in candidate:
communication_cost += 1
send_to1(n, w[p, n])
#STEP2
S = [np.where(R[:, n])[0] for n in range(N)]
m = np.sum(R, axis=0)
U = np.empty((N, 1))
for n in range(N):
upper = (P - 1 - m[n]) * T
z[n] = w[0, n] + np.sum([w[p, n] for p in S[n]])
U[n] = np.abs(z[n]) + upper
F = (U > beta) * (m < (P-1))
candidate = np.where(F)[0]
for n in candidate:
communication_cost += 1
broadcast_others(n)
#STEP3
F_R = F * np.logical_not(R)
for p in range(1, P):
#print("p: {}".format(p))
candidate = np.where(F_R[p])[0]
for n in candidate:
communication_cost += 1
send_to1(n ,w[p, n])
if log:
print("Rp: {} \t F: {} \t F\\Rp: {}".format(np.sum(R), np.sum(F), np.sum(F_R)-np.sum(F)))
print("Total Communication Cost: {}".format(communication_cost))
print("="*50)
#STEP4
s = np.zeros((N, 1))
b = np.zeros((N, 1))
V = np.where(U > beta)[0].tolist()
for n in V:
b[n] = np.sum(w[:, n])
s[n] = soft_threshold(b[n], beta)
return s.real, communication_cost
def GCAMP_exp(w, tau_p, log=False):
shita = 0.7
tau = np.sum(tau_p)
communication_cost = 0
P, N, _ = w.shape
R = np.zeros((P, N, 1))
#STEP1
for p in range(1, P):
R[p] = np.square(w[p]) > tau_p[p] * shita
candidate = np.where(R[p])[0]
for n in candidate:
communication_cost += 1
send_to1(n, w[p, n])
#STEP2
S = [np.where(R[:, n])[0] for n in range(N)]
m = np.sum(R, axis=0)
U = np.empty((N, 1))
for n in range(N):
upper = np.sum([tau_p[p] for p in range(1, P) if p not in S[p]])
U[n] = (w[0, n] + np.sum(w[p, n] for p in S[n]))**2 + upper * shita
F = (U > tau) * (m < (P-1))
candidate = np.where(F)[0]
for n in candidate:
communication_cost += 1
broadcast_others(n)
#STEP3
F_R = F * np.logical_not(R)
for p in range(1, P):
#print("p: {}".format(p))
candidate = np.where(F_R[p])[0]
for n in candidate:
communication_cost += 1
send_to1(n ,w[p, n])
if log:
print("Rp: {} \t F: {} \t F\\Rp: {}".format(np.sum(R), np.sum(F), np.sum(F_R)-np.sum(F)))
print("Total Communication Cost: {}".format(communication_cost))
print("="*50)
#STEP4
s = np.zeros((N, 1))
V = np.where(U > tau)[0].tolist()
for n in V:
w_sum = np.sum(w[:, n])
s[n] = soft_threshold(w_sum, tau**0.5)
return s.real, communication_cost
def send_to1(n, w):
#print("n: {}, w: {}".format(n, w))
pass
def broadcast_others(n):
#print("n: {}".format(n))
pass
def GCOAMP(w, tau_p, log=False):
shita = 0.7
tau = np.sum(tau_p)
communication_cost = 0
P, N, _ = w.shape
R = np.zeros((P, N, 1))
z = [0] * N
#STEP1
for p in range(1, P):
R[p] = np.square(w[p]) > tau_p[p] * shita
candidate = np.where(R[p])[0]
for n in candidate:
communication_cost += 1
send_to1(n, w[p, n])
#STEP2
S = [np.where(R[:, n])[0] for n in range(N)]
m = np.sum(R, axis=0)
U = np.empty((N, 1))
for n in range(N):
upper = np.sum([tau_p[p] for p in range(1, P) if p not in S[p]])
z[n] = w[0, n] + np.sum([w[p, n] for p in S[n]])
U[n] = z[n]**2 + upper * shita
F = (U > tau) * (m < (P-1))
candidate = np.where(F)[0]
for n in candidate:
communication_cost += 1
broadcast_others(n)
#STEP3
F_R = F * np.logical_not(R)
for p in range(1, P):
#print("p: {}".format(p))
candidate = np.where(F_R[p])[0]
for n in candidate:
communication_cost += 1
send_to1(n ,w[p, n])
if log:
print("Rp: {} \t F: {} \t F\\Rp: {}".format(np.sum(R), np.sum(F), np.sum(F_R)-np.sum(F)))
print("Total Communication Cost: {}".format(communication_cost))
print("="*50)
#STEP4
u = np.zeros((N, 1))
b = np.zeros((N, 1))
V = np.where(U > tau)[0].tolist()
for n in V:
b[n] = np.sum(w[:, n])
u[n] = soft_threshold(b[n], tau**0.5)
#STEP5
#if approx: rand = beta * truncnorm.rvs(-1, 1, loc=0, scale=1, size=N-K)
#else : rand = Rrandom(u, beta, K)#(tau - tau_p[0])**0.5 * truncnorm.rvs(-1, 1, loc=0, scale=1, size=N-K)
Vc = [n for n in range(N) if n not in V]
for n in Vc:
b[n] = z[n]
b[n] += np.sum([rand(shita * tau_p[p]) for p in range(1, P) if p not in S[n]])
s = u - np.mean(u != 0)*b
return s.real, communication_cost
def rand(tau):
return tau**0.5 * truncnorm.rvs(-1, 1, loc=0, scale=1, size=1)
def Rrandom(u, t, K):
N = u.shape[0]
u0 = np.histogram(u, bins=N)
Pu = u0[0]/N
Pu = np.append(Pu, 0)
u1 = u0[1]
phi = lambda x: norm.pdf((x-u1)/t)/t
maxu = np.argmax(Pu)
phi_x = phi(u1[maxu])
max = np.max(np.sum(Pu * phi_x))
rand = np.empty(N-K)
for i in range(N-K):
while True:
x, y = np.random.rand(2)
a = -t + 2*t*x
phi_a = phi(a)
A = np.sum(Pu * phi_a)
if max*y <= A:
rand[i] = a
break
return rand | [
"numpy.mean",
"numpy.histogram",
"numpy.abs",
"numpy.random.rand",
"numpy.where",
"numpy.logical_not",
"numpy.argmax",
"numpy.square",
"numpy.append",
"numpy.sum",
"numpy.zeros",
"numpy.empty",
"numpy.sign",
"scipy.stats.norm.pdf",
"scipy.stats.truncnorm.rvs"
] | [((495, 514), 'numpy.zeros', 'np.zeros', (['(P, N, 1)'], {}), '((P, N, 1))\n', (503, 514), True, 'import numpy as np\n'), ((524, 540), 'numpy.zeros', 'np.zeros', (['(N, 1)'], {}), '((N, 1))\n', (532, 540), True, 'import numpy as np\n'), ((835, 852), 'numpy.sum', 'np.sum', (['R'], {'axis': '(0)'}), '(R, axis=0)\n', (841, 852), True, 'import numpy as np\n'), ((862, 878), 'numpy.empty', 'np.empty', (['(N, 1)'], {}), '((N, 1))\n', (870, 878), True, 'import numpy as np\n'), ((1678, 1694), 'numpy.zeros', 'np.zeros', (['(N, 1)'], {}), '((N, 1))\n', (1686, 1694), True, 'import numpy as np\n'), ((1704, 1720), 'numpy.zeros', 'np.zeros', (['(N, 1)'], {}), '((N, 1))\n', (1712, 1720), True, 'import numpy as np\n'), ((1967, 1980), 'numpy.sum', 'np.sum', (['tau_p'], {}), '(tau_p)\n', (1973, 1980), True, 'import numpy as np\n'), ((2041, 2060), 'numpy.zeros', 'np.zeros', (['(P, N, 1)'], {}), '((P, N, 1))\n', (2049, 2060), True, 'import numpy as np\n'), ((2373, 2390), 'numpy.sum', 'np.sum', (['R'], {'axis': '(0)'}), '(R, axis=0)\n', (2379, 2390), True, 'import numpy as np\n'), ((2400, 2416), 'numpy.empty', 'np.empty', (['(N, 1)'], {}), '((N, 1))\n', (2408, 2416), True, 'import numpy as np\n'), ((3231, 3247), 'numpy.zeros', 'np.zeros', (['(N, 1)'], {}), '((N, 1))\n', (3239, 3247), True, 'import numpy as np\n'), ((3637, 3650), 'numpy.sum', 'np.sum', (['tau_p'], {}), '(tau_p)\n', (3643, 3650), True, 'import numpy as np\n'), ((3711, 3730), 'numpy.zeros', 'np.zeros', (['(P, N, 1)'], {}), '((P, N, 1))\n', (3719, 3730), True, 'import numpy as np\n'), ((4060, 4077), 'numpy.sum', 'np.sum', (['R'], {'axis': '(0)'}), '(R, axis=0)\n', (4066, 4077), True, 'import numpy as np\n'), ((4087, 4103), 'numpy.empty', 'np.empty', (['(N, 1)'], {}), '((N, 1))\n', (4095, 4103), True, 'import numpy as np\n'), ((4943, 4959), 'numpy.zeros', 'np.zeros', (['(N, 1)'], {}), '((N, 1))\n', (4951, 4959), True, 'import numpy as np\n'), ((4969, 4985), 'numpy.zeros', 'np.zeros', (['(N, 1)'], {}), '((N, 1))\n', (4977, 4985), True, 'import numpy as np\n'), ((5732, 5755), 'numpy.histogram', 'np.histogram', (['u'], {'bins': 'N'}), '(u, bins=N)\n', (5744, 5755), True, 'import numpy as np\n'), ((5784, 5800), 'numpy.append', 'np.append', (['Pu', '(0)'], {}), '(Pu, 0)\n', (5793, 5800), True, 'import numpy as np\n'), ((5875, 5888), 'numpy.argmax', 'np.argmax', (['Pu'], {}), '(Pu)\n', (5884, 5888), True, 'import numpy as np\n'), ((5966, 5981), 'numpy.empty', 'np.empty', (['(N - K)'], {}), '(N - K)\n', (5974, 5981), True, 'import numpy as np\n'), ((194, 204), 'numpy.sign', 'np.sign', (['r'], {}), '(r)\n', (201, 204), True, 'import numpy as np\n'), ((1085, 1096), 'numpy.where', 'np.where', (['F'], {}), '(F)\n', (1093, 1096), True, 'import numpy as np\n'), ((1220, 1237), 'numpy.logical_not', 'np.logical_not', (['R'], {}), '(R)\n', (1234, 1237), True, 'import numpy as np\n'), ((1794, 1809), 'numpy.sum', 'np.sum', (['w[:, n]'], {}), '(w[:, n])\n', (1800, 1809), True, 'import numpy as np\n'), ((2642, 2653), 'numpy.where', 'np.where', (['F'], {}), '(F)\n', (2650, 2653), True, 'import numpy as np\n'), ((2777, 2794), 'numpy.logical_not', 'np.logical_not', (['R'], {}), '(R)\n', (2791, 2794), True, 'import numpy as np\n'), ((3321, 3336), 'numpy.sum', 'np.sum', (['w[:, n]'], {}), '(w[:, n])\n', (3327, 3336), True, 'import numpy as np\n'), ((4350, 4361), 'numpy.where', 'np.where', (['F'], {}), '(F)\n', (4358, 4361), True, 'import numpy as np\n'), ((4485, 4502), 'numpy.logical_not', 'np.logical_not', (['R'], {}), '(R)\n', (4499, 4502), True, 'import numpy as np\n'), ((5058, 5073), 'numpy.sum', 'np.sum', (['w[:, n]'], {}), '(w[:, n])\n', (5064, 5073), True, 'import numpy as np\n'), ((5624, 5668), 'scipy.stats.truncnorm.rvs', 'truncnorm.rvs', (['(-1)', '(1)'], {'loc': '(0)', 'scale': '(1)', 'size': '(1)'}), '(-1, 1, loc=0, scale=1, size=1)\n', (5637, 5668), False, 'from scipy.stats import truncnorm, norm\n'), ((5934, 5952), 'numpy.sum', 'np.sum', (['(Pu * phi_x)'], {}), '(Pu * phi_x)\n', (5940, 5952), True, 'import numpy as np\n'), ((330, 347), 'numpy.mean', 'np.mean', (['(eta != 0)'], {}), '(eta != 0)\n', (337, 347), True, 'import numpy as np\n'), ((602, 614), 'numpy.abs', 'np.abs', (['w[p]'], {}), '(w[p])\n', (608, 614), True, 'import numpy as np\n'), ((640, 654), 'numpy.where', 'np.where', (['R[p]'], {}), '(R[p])\n', (648, 654), True, 'import numpy as np\n'), ((786, 803), 'numpy.where', 'np.where', (['R[:, n]'], {}), '(R[:, n])\n', (794, 803), True, 'import numpy as np\n'), ((965, 996), 'numpy.sum', 'np.sum', (['[w[p, n] for p in S[n]]'], {}), '([w[p, n] for p in S[n]])\n', (971, 996), True, 'import numpy as np\n'), ((1013, 1025), 'numpy.abs', 'np.abs', (['z[n]'], {}), '(z[n])\n', (1019, 1025), True, 'import numpy as np\n'), ((1321, 1337), 'numpy.where', 'np.where', (['F_R[p]'], {}), '(F_R[p])\n', (1329, 1337), True, 'import numpy as np\n'), ((2122, 2137), 'numpy.square', 'np.square', (['w[p]'], {}), '(w[p])\n', (2131, 2137), True, 'import numpy as np\n'), ((2178, 2192), 'numpy.where', 'np.where', (['R[p]'], {}), '(R[p])\n', (2186, 2192), True, 'import numpy as np\n'), ((2324, 2341), 'numpy.where', 'np.where', (['R[:, n]'], {}), '(R[:, n])\n', (2332, 2341), True, 'import numpy as np\n'), ((2878, 2894), 'numpy.where', 'np.where', (['F_R[p]'], {}), '(F_R[p])\n', (2886, 2894), True, 'import numpy as np\n'), ((3809, 3824), 'numpy.square', 'np.square', (['w[p]'], {}), '(w[p])\n', (3818, 3824), True, 'import numpy as np\n'), ((3865, 3879), 'numpy.where', 'np.where', (['R[p]'], {}), '(R[p])\n', (3873, 3879), True, 'import numpy as np\n'), ((4011, 4028), 'numpy.where', 'np.where', (['R[:, n]'], {}), '(R[:, n])\n', (4019, 4028), True, 'import numpy as np\n'), ((4228, 4259), 'numpy.sum', 'np.sum', (['[w[p, n] for p in S[n]]'], {}), '([w[p, n] for p in S[n]])\n', (4234, 4259), True, 'import numpy as np\n'), ((4586, 4602), 'numpy.where', 'np.where', (['F_R[p]'], {}), '(F_R[p])\n', (4594, 4602), True, 'import numpy as np\n'), ((5524, 5539), 'numpy.mean', 'np.mean', (['(u != 0)'], {}), '(u != 0)\n', (5531, 5539), True, 'import numpy as np\n'), ((5840, 5862), 'scipy.stats.norm.pdf', 'norm.pdf', (['((x - u1) / t)'], {}), '((x - u1) / t)\n', (5848, 5862), False, 'from scipy.stats import truncnorm, norm\n'), ((6049, 6066), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (6063, 6066), True, 'import numpy as np\n'), ((6140, 6158), 'numpy.sum', 'np.sum', (['(Pu * phi_a)'], {}), '(Pu * phi_a)\n', (6146, 6158), True, 'import numpy as np\n'), ((168, 177), 'numpy.abs', 'np.abs', (['r'], {}), '(r)\n', (174, 177), True, 'import numpy as np\n'), ((1508, 1517), 'numpy.sum', 'np.sum', (['R'], {}), '(R)\n', (1514, 1517), True, 'import numpy as np\n'), ((1519, 1528), 'numpy.sum', 'np.sum', (['F'], {}), '(F)\n', (1525, 1528), True, 'import numpy as np\n'), ((1730, 1748), 'numpy.where', 'np.where', (['(U > beta)'], {}), '(U > beta)\n', (1738, 1748), True, 'import numpy as np\n'), ((3065, 3074), 'numpy.sum', 'np.sum', (['R'], {}), '(R)\n', (3071, 3074), True, 'import numpy as np\n'), ((3076, 3085), 'numpy.sum', 'np.sum', (['F'], {}), '(F)\n', (3082, 3085), True, 'import numpy as np\n'), ((3257, 3274), 'numpy.where', 'np.where', (['(U > tau)'], {}), '(U > tau)\n', (3265, 3274), True, 'import numpy as np\n'), ((4773, 4782), 'numpy.sum', 'np.sum', (['R'], {}), '(R)\n', (4779, 4782), True, 'import numpy as np\n'), ((4784, 4793), 'numpy.sum', 'np.sum', (['F'], {}), '(F)\n', (4790, 4793), True, 'import numpy as np\n'), ((4995, 5012), 'numpy.where', 'np.where', (['(U > tau)'], {}), '(U > tau)\n', (5003, 5012), True, 'import numpy as np\n'), ((1530, 1541), 'numpy.sum', 'np.sum', (['F_R'], {}), '(F_R)\n', (1536, 1541), True, 'import numpy as np\n'), ((1542, 1551), 'numpy.sum', 'np.sum', (['F'], {}), '(F)\n', (1548, 1551), True, 'import numpy as np\n'), ((2542, 2571), 'numpy.sum', 'np.sum', (['(w[p, n] for p in S[n])'], {}), '(w[p, n] for p in S[n])\n', (2548, 2571), True, 'import numpy as np\n'), ((3087, 3098), 'numpy.sum', 'np.sum', (['F_R'], {}), '(F_R)\n', (3093, 3098), True, 'import numpy as np\n'), ((3099, 3108), 'numpy.sum', 'np.sum', (['F'], {}), '(F)\n', (3105, 3108), True, 'import numpy as np\n'), ((4795, 4806), 'numpy.sum', 'np.sum', (['F_R'], {}), '(F_R)\n', (4801, 4806), True, 'import numpy as np\n'), ((4807, 4816), 'numpy.sum', 'np.sum', (['F'], {}), '(F)\n', (4813, 4816), True, 'import numpy as np\n')] |
#! crding = utf8
from pandas import *
import numpy as np
import os, sys, subprocess
import netCDF4
import twd97
import datetime
from calendar import monthrange
from scipy.io import FortranFile
from ptse_sub import CORRECT, add_PMS, check_nan, check_landsea, FillNan, WGS_TWD, Elev_YPM
#Main
P=subprocess.check_output('pwd',shell=True).decode('utf8').strip('\n')+'/'
teds=int(P.split('/')[3][-2:])
yr=2016+(teds-10)*3
ndays=365
if yr%4==0:ndays=366
s365=set([i*24 for i in range(ndays)])
nhrs=ndays*24
Hs=10 #cutting height of stacks
#Input the TEDS csv file
try:
df = read_csv('point.csv', encoding='big5')
except:
df = read_csv('point.csv')
# check_NOPandSCC(0)
df = check_nan(df)
# check and correct the X coordinates for isolated islands
df = check_landsea(df)
df = WGS_TWD(df)
df = Elev_YPM(df)
df=df.loc[(df.HEI>=Hs) & (df.NO_S.map(lambda x:x[0]=='P'))].reset_index(drop=True)
df['SUM']=[i+j+k+l+m for i,j,k,l,m in zip(df.SOX_EMI,df.NOX_EMI,df.CO_EMI,df.PM_EMI,df.NMHC_EMI)]
df=df.loc[df.SUM>0].reset_index(drop=True)
df['CP_NO'] = [i + j for i, j in zip(list(df['C_NO']), list(df['NO_S']))]
df['DY1']=[i*j for i,j in zip(df.DW1,df.WY1)]
df['HY1']=[i*j for i,j in zip(df.HD1,df.DY1)]
#71 factories with CEMS will emit (at ground) when stacks are operating
fname=P+'point_cems.csv'
cems=read_csv(fname)
val='SOX PM NOX FLOW X_BLANK1 X_BLANK2'.split()
nval=len(val)
if 'CP_NO' not in cems.columns: #pre-process
cems=cems.drop(cems.loc[cems.C_NO=='C_NO'].index).reset_index(drop=True)
cems['CP_NO'] = [i + j for i, j in zip(list(cems['C_NO']), list(cems['NO_S']))]
cems['PM']=[(i+j)/2 for i,j in zip(cems.SOX,cems.NOX)]
if max(cems.HOUR)>100:
cems['MDH']=[int(i*10000+j*100+k/100) for i,j,k in zip(cems.MONTH,cems.DATE,cems.HOUR)]
else:
cems['MDH']=[int(i*10000+j*100+k) for i,j,k in zip(cems.MONTH,cems.DATE,cems.HOUR)]
cems=pivot_table(cems,index=['CP_NO','MDH'],values=val,aggfunc=sum).reset_index()
#cems(df) convert to cemsM(matrix)
for MC in ['CP_NO','MDH']:
mc=MC.lower()
exec(mc+'=list(set(cems.'+MC+'))');exec(mc+'.sort()')
exec('n'+MC+'=len('+mc+')')
exec('d'+MC+'={'+mc+'[i]:i for i in range(n'+MC+')}')
exec('cems["i'+MC+'"]=[d'+MC+'[i] for i in cems.'+MC+']')
if len(mdh)!=ndays*24:sys.exit('mdh coverage not enough!')
cemsM=np.zeros(shape=(nMDH,nCP_NO,nval))
for i in range(nval):
cemsM[cems.iMDH[:],cems.iCP_NO[:],i]=cems[val[i]]
DD={}
for i in range(nval):
DD[val[i]]=cemsM[:,:,i].flatten()
DD['MDH'] =[i for i in mdh for j in cp_no]
DD['CP_NO']=[j for i in mdh for j in cp_no]
cems=DataFrame(DD)
cems['C_NO']=[i[:8] for i in cems.CP_NO]
cems['MD']=[i//100 for i in cems.MDH]
cems.set_index('CP_NO').to_csv(fname)
for MC in ['CP_NO','MDH','MD','C_NO']:
mc=MC.lower()
exec(mc+'=list(set(cems.'+MC+'))');exec(mc+'.sort()')
exec('n'+MC+'=len('+mc+')')
#Hour of Day pattern
cems['HR']=[i%100 for i in cems.MDH]
pv_cems1=pivot_table(cems,index=['C_NO','HR'],values='SOX',aggfunc=sum).reset_index()
cems_HROD=DataFrame({'C_NO':c_no})
cems_HROD['SOX_HR_ODER']=0
for ic in cems_HROD.index:
pv1=pv_cems1.loc[pv_cems1.C_NO==c_no[ic]]
pv3=pv1.sort_values('SOX',ascending=False).reset_index(drop=True)
cems_HROD.loc[ic,'SOX_HR_ODER']=''.join(['{:d} '.format(i) for i in pv3.HR])
#orders for DY1
pv_cems2=pivot_table(cems,index=['C_NO','MD'],values='FLOW',aggfunc=sum).reset_index()
#Indexing is an exhaustive process.
iMD=[mdh.index(i*100) for i in pv_cems2.MD] #change the MMDD into index sequence among MMDD00's
pv_cems2.MD=iMD
cems_DAOD=DataFrame({'C_NO':c_no})
cems_DAOD['FLOW_DA_ODER']=0
for ic in cems_DAOD.index:
pv1=pv_cems2.loc[pv_cems2.C_NO==c_no[ic]]
pv3=pv1.sort_values('FLOW',ascending=False).reset_index(drop=True)
cems_DAOD.loc[ic,'FLOW_DA_ODER']=''.join(['{:d} '.format(i) for i in pv3.MD])
dfxy=pivot_table(df,index='C_NO',values=['UTM_E','UTM_N'],aggfunc=np.mean).reset_index()
#booleans for pollutant selection
c2v={'NMHC':'PM','SOX':'SOX','NOX':'NOX','PM':'PM','CO':'NOX'} #point.csv vs cems.csv
BLS={c:df[c+'_EMI']>0 for c in c2v}
colT=['HD1','DY1','HY1']
col=['C_NO','CP_NO','HD1','DY1','HY1']+[i for i in df.columns if 'EMI' in i]
for spe in [s for s in [sys.argv[1]] if s in BLS]:
dfV=df[col].loc[BLS[spe]].reset_index(drop=True)
dfV1=pivot_table(dfV,index='CP_NO',values=spe+'_EMI',aggfunc=sum).reset_index()
dfV2=pivot_table(dfV,index='CP_NO',values=colT,aggfunc=np.mean).reset_index()
dfV=merge(dfV1,dfV2,on='CP_NO')
dfV['C_NO']=[i[:8] for i in dfV.CP_NO]
for c in colT:
dfV[c]=np.array(dfV[c],dtype=int)
a,b=list(set(dfV.C_NO)),list(set(cems.C_NO));a.sort();b.sort()
ab=[i for i in a if i in b]
cp=list(set(dfV.CP_NO))
cp.sort()
ons=np.zeros(shape=(len(cp),nMDH))#,dtype=int)
#other fatories without CEMS, take the nearest one
b1=set(b)-set(dfxy.C_NO) #cems factory but without UTM location
c1=[c for c in b if c not in b1 and c in a] #cems plant with X,Y
cemsX=np.array([list(dfxy.loc[dfxy.C_NO==c,'UTM_E'])[0] for c in c1])
cemsY=np.array([list(dfxy.loc[dfxy.C_NO==c,'UTM_N'])[0] for c in c1])
#loop for every factories
for c in [i for i in a if i not in b1]:
c_cems=c
if c not in ab:
x0,y0=list(dfxy.loc[dfxy.C_NO==c,'UTM_E'])[0],list(dfxy.loc[dfxy.C_NO==c,'UTM_N'])[0]
dist=(cemsX-x0)**2+(cemsY-y0)**2
idx=list(dist).index(min(dist))
c_cems=c1[idx]
pv2MD=np.array(list(cems_DAOD.loc[cems_DAOD.C_NO==c_cems,'FLOW_DA_ODER'])[0].split(),dtype=int)
if len(pv2MD)<ndays: pv2MD=np.array(list(pv2MD)+list(s365-set(pv2MD)))
df_cp=dfV.loc[dfV.C_NO==c].reset_index(drop=True)
#loop for every NO_S in this factory
for p in set(df_cp.CP_NO):
ip=cp.index(p)
if p in set(cems.CP_NO):
ons[ip,:]=cems.loc[cems.CP_NO==p,c2v[spe]]*nhrs
else:
dy1=dfV.DY1[ip]
hd1=dfV.HD1[ip]
md3=pv2MD[:dy1]
days=np.zeros(shape=(dy1,hd1),dtype=int)
if hd1==24:
hrs=np.array([ih for ih in range(24)],dtype=int)
else:
first=np.array(list(cems_HROD.loc[cems_HROD.C_NO==c_cems,'SOX_HR_ODER'])[0].split(),dtype=int)[0]
hrs=np.array([(first+ih)%24 for ih in range(hd1)])
for id in range(dy1):
days[id,:]=md3[id]+hrs[:]
idx=days.flatten()
ons[ip,idx]=1.
#other sources
fnameO=spe+'_ECP'+str(len(cp))+'_MDH'+str(len(mdh))+'_ONS.bin'
with FortranFile(fnameO, 'w') as f:
f.write_record(cp)
f.write_record(mdh)
f.write_record(ons)
| [
"subprocess.check_output",
"ptse_sub.check_nan",
"ptse_sub.check_landsea",
"scipy.io.FortranFile",
"numpy.array",
"numpy.zeros",
"ptse_sub.Elev_YPM",
"ptse_sub.WGS_TWD",
"sys.exit"
] | [((676, 689), 'ptse_sub.check_nan', 'check_nan', (['df'], {}), '(df)\n', (685, 689), False, 'from ptse_sub import CORRECT, add_PMS, check_nan, check_landsea, FillNan, WGS_TWD, Elev_YPM\n'), ((754, 771), 'ptse_sub.check_landsea', 'check_landsea', (['df'], {}), '(df)\n', (767, 771), False, 'from ptse_sub import CORRECT, add_PMS, check_nan, check_landsea, FillNan, WGS_TWD, Elev_YPM\n'), ((777, 788), 'ptse_sub.WGS_TWD', 'WGS_TWD', (['df'], {}), '(df)\n', (784, 788), False, 'from ptse_sub import CORRECT, add_PMS, check_nan, check_landsea, FillNan, WGS_TWD, Elev_YPM\n'), ((794, 806), 'ptse_sub.Elev_YPM', 'Elev_YPM', (['df'], {}), '(df)\n', (802, 806), False, 'from ptse_sub import CORRECT, add_PMS, check_nan, check_landsea, FillNan, WGS_TWD, Elev_YPM\n'), ((2297, 2333), 'numpy.zeros', 'np.zeros', ([], {'shape': '(nMDH, nCP_NO, nval)'}), '(shape=(nMDH, nCP_NO, nval))\n', (2305, 2333), True, 'import numpy as np\n'), ((2252, 2288), 'sys.exit', 'sys.exit', (['"""mdh coverage not enough!"""'], {}), "('mdh coverage not enough!')\n", (2260, 2288), False, 'import os, sys, subprocess\n'), ((4537, 4564), 'numpy.array', 'np.array', (['dfV[c]'], {'dtype': 'int'}), '(dfV[c], dtype=int)\n', (4545, 4564), True, 'import numpy as np\n'), ((6377, 6401), 'scipy.io.FortranFile', 'FortranFile', (['fnameO', '"""w"""'], {}), "(fnameO, 'w')\n", (6388, 6401), False, 'from scipy.io import FortranFile\n'), ((5876, 5913), 'numpy.zeros', 'np.zeros', ([], {'shape': '(dy1, hd1)', 'dtype': 'int'}), '(shape=(dy1, hd1), dtype=int)\n', (5884, 5913), True, 'import numpy as np\n'), ((296, 338), 'subprocess.check_output', 'subprocess.check_output', (['"""pwd"""'], {'shell': '(True)'}), "('pwd', shell=True)\n", (319, 338), False, 'import os, sys, subprocess\n')] |
import numpy as np
from numpy.linalg import inv
class GeoArray(np.ndarray):
def __new__(cls, input_array, crs=4326, mat=None):
obj = np.asarray(input_array).view(cls)
obj.crs, obj.mat = crs, mat.reshape((2,3))
return obj
def __array_finalize__(self, obj):
if obj is None: return
self.crs = getattr(obj, 'crs', '')
self.mat = getattr(obj, 'mat', None)
def __getitem__(self, item):
sliced = True & isinstance(item, tuple)
if sliced:
sliced &= len(item) in (2, 3)
sliced &= isinstance(item[1], slice)
sliced &= isinstance(item[0], slice)
def gs(s): return (s.start or 0, s.step or 1)
if sliced:
(s1,d1), (s2,d2) = gs(item[0]), gs(item[1])
obj = super(GeoArray, self).__getitem__(item)
if not obj.mat is None:
m, offset = obj.mat[:,1:], obj.mat[:,:1]
o = np.dot(m, [[s2],[s1]]) + offset
t = m * [d2, d1]
obj.mat = np.hstack((o,t))
return obj
if not sliced:
return super().__getitem__(item).__array__()
def __array_wrap__(self, out_arr, context=None):
if out_arr.shape[:2] != self.shape[:2]:
out_arr = out_arr.__array__()
return out_arr
@property
def imat(self):
imat = np.vstack((self.mat[:,[1,2,0]], [[0,0,1]]))
return np.linalg.inv(imat)[:2,[2,0,1]]
@property
def imat1(self): return self.imat.ravel()[[1,2,4,5,0,3]]
def project(self, x, y):
x += 0.5; y += 0.5
m, offset = self.mat[:,1:], self.mat[:,:1]
xy = np.array([x, y]).reshape((2,-1))
return np.dot(m, xy) + offset
def invpro(self, e, n):
m, offset = self.mat[:,1:], self.mat[:,:1]
en = np.array([e, n]).reshape((2,-1))
return np.dot(inv(m), en - offset) - 0.5
def channels(self, n=None):
if n is None:
return 1 if self.ndim==2 else self.shape[2]
else:
return self if self.ndim==2 else self[:,:,n]
def lookup(self, lut):
return GeoArray(lut[self], self.crs, self.mat)
def getbox(self):
return (self.shape[:2], self.crs, self.mat)
def frombox(shp, crs, mat, chan=1, dtype=np.uint8):
if chan>1: shp += (chan,)
return GeoArray(np.zeros(shp, dtype=dtype), crs, mat)
def geoarray(arr, crs=None, mat=np.array([[1,1,0],[1,0,1]])):
return GeoArray(arr, crs, mat)
if __name__ == '__main__':
prj = np.array([0,1,0, 0,0,1])
a = GeoArray(np.ones((5,5)), crs=4326, mat=prj)
print(a.crs)
print(a.mat)
b = a+1
print(a.crs)
print(a.mat)
c = a[1::2,1::2]
print(c.crs)
print(c.mat)
| [
"numpy.ones",
"numpy.hstack",
"numpy.asarray",
"numpy.array",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.dot",
"numpy.vstack"
] | [((2436, 2468), 'numpy.array', 'np.array', (['[[1, 1, 0], [1, 0, 1]]'], {}), '([[1, 1, 0], [1, 0, 1]])\n', (2444, 2468), True, 'import numpy as np\n'), ((2548, 2576), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 0, 1]'], {}), '([0, 1, 0, 0, 0, 1])\n', (2556, 2576), True, 'import numpy as np\n'), ((1386, 1434), 'numpy.vstack', 'np.vstack', (['(self.mat[:, [1, 2, 0]], [[0, 0, 1]])'], {}), '((self.mat[:, [1, 2, 0]], [[0, 0, 1]]))\n', (1395, 1434), True, 'import numpy as np\n'), ((2365, 2391), 'numpy.zeros', 'np.zeros', (['shp'], {'dtype': 'dtype'}), '(shp, dtype=dtype)\n', (2373, 2391), True, 'import numpy as np\n'), ((2590, 2605), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (2597, 2605), True, 'import numpy as np\n'), ((1445, 1464), 'numpy.linalg.inv', 'np.linalg.inv', (['imat'], {}), '(imat)\n', (1458, 1464), True, 'import numpy as np\n'), ((1722, 1735), 'numpy.dot', 'np.dot', (['m', 'xy'], {}), '(m, xy)\n', (1728, 1735), True, 'import numpy as np\n'), ((146, 169), 'numpy.asarray', 'np.asarray', (['input_array'], {}), '(input_array)\n', (156, 169), True, 'import numpy as np\n'), ((1049, 1066), 'numpy.hstack', 'np.hstack', (['(o, t)'], {}), '((o, t))\n', (1058, 1066), True, 'import numpy as np\n'), ((1674, 1690), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (1682, 1690), True, 'import numpy as np\n'), ((1838, 1854), 'numpy.array', 'np.array', (['[e, n]'], {}), '([e, n])\n', (1846, 1854), True, 'import numpy as np\n'), ((1893, 1899), 'numpy.linalg.inv', 'inv', (['m'], {}), '(m)\n', (1896, 1899), False, 'from numpy.linalg import inv\n'), ((958, 981), 'numpy.dot', 'np.dot', (['m', '[[s2], [s1]]'], {}), '(m, [[s2], [s1]])\n', (964, 981), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.model_selection import train_test_split, StratifiedKFold
import shutil
from src.model_torch import train_model_eegnet
from src.utils import set_seed
from src.utils import single_auc_loging
import codecs
from functools import reduce
import os
import sys
sys.path.append(os.path.join(os.path.split(os.getcwd())[0], 'data_loader'))
# from data import DataBuildClassifier
def prepare_dirs(experiment_res_dir, train_subject):
'''It creates (or clears, if exists) experiment folder with subjects '''
path_to_subj = os.path.join(experiment_res_dir, str(train_subject))
model_path = os.path.join(path_to_subj, 'checkpoints')
if os.path.isdir(path_to_subj):
shutil.rmtree(path_to_subj)
os.makedirs(model_path)
return path_to_subj
def write_results_table(subjs_test_stats, path_to_exp):
'''Data dict should contain '''
header = list(subjs_test_stats[list(subjs_test_stats.keys())[0]].keys())
with codecs.open('%s/res.txt' % path_to_exp, 'w', encoding='utf8') as f:
f.write(reduce(lambda x, y: x + ' {}'.format(y), header, 'subj') + '\n')
tmp_stats = {k: [] for k in header}
for subj in subjs_test_stats:
stats = subjs_test_stats[subj]
[tmp_stats[k].append(stats[k]) for k in header]
str_to_print = reduce(lambda x, y: x + '{:.2f}'.format(y), [stats[elem] for elem in header],
u'{}:'.format(subj))
f.write('{}\n'.format(str_to_print))
f.write(reduce(lambda x, y: x + u'{:.{prec}f}±{:.{prec}f}'.format(np.mean(y), np.std(y), prec=2),
[tmp_stats[k] for k in header], u'MEAN:'))
def separte_last_block(x, y, test_size=0.2):
x_t, x_nt = x[y == 1], x[y == 0]
x_t_tr, x_t_tst = train_test_split(x_t, test_size=test_size, shuffle=False)
x_nt_tr, x_nt_tst = train_test_split(x_nt, test_size=test_size, shuffle=False)
x_tr = np.concatenate((x_t_tr, x_nt_tr), axis=0)
y_tr = np.hstack((np.ones(x_t_tr.shape[0]), np.zeros(x_nt_tr.shape[0])))
x_tst = np.concatenate((x_t_tst, x_nt_tst), axis=0)
y_tst = np.hstack((np.ones(x_t_tst.shape[0]), np.zeros(x_nt_tst.shape[0])))
return x_tr, y_tr, x_tst, y_tst
def cv_per_subj_test(x, y, params, path_to_subj, test_on_last_block=False, plot_fold_history=False):
model_path = os.path.join(path_to_subj, 'checkpoints')
best_val_epochs = []
best_val_aucs = []
folds = 4 # To preserve split as 0.6 0.2 0.2
if test_on_last_block:
x_tr, y_tr, x_tst, y_tst = separte_last_block(x, y, test_size=0.2)
cv = StratifiedKFold(n_splits=folds, shuffle=True)
cv_splits = list(cv.split(x_tr, y_tr))
for fold, (train_idx, val_idx) in enumerate(cv_splits):
fold_model_path = os.path.join(model_path, '%d' % fold)
os.makedirs(fold_model_path)
x_tr_fold, y_tr_fold = x_tr[train_idx], y_tr[train_idx]
x_val_fold, y_val_fold = x_tr[val_idx], y_tr[val_idx]
val_history, fold_model = train_model_eegnet(x_tr_fold, y_tr_fold, params, (x_val_fold, y_val_fold), epochs=200,
batch_size=32, shuffle=True,
model_path=os.path.join(fold_model_path, 'model{}'.format(fold)))
best_val_epochs.append(np.argmax(val_history['val_auc']) + 1) # epochs count from 1 (not from 0)
best_val_aucs.append(np.max(val_history['val_auc']))
if plot_fold_history:
single_auc_loging(val_history, 'fold %d' % fold, fold_model_path)
if test_on_last_block:
test_history, final_model = train_model_eegnet(x_tr, y_tr, params, epochs=int(np.mean(best_val_epochs)),
validation_data=(x_tst, y_tst), batch_size=32, shuffle=True,
model_path=os.path.join(path_to_subj, 'naive_model'))
single_auc_loging(test_history, 'test_history', path_to_save=path_to_subj)
with codecs.open('%s/res.txt' % path_to_subj, 'w', encoding='utf8') as f:
f.write(u'Val auc %.02f±%.02f\n' % (np.mean(best_val_aucs), np.std(best_val_aucs)))
f.write('Test auc naive %.02f\n' % (test_history['val_auc'][-1]))
return {'val_auc': test_history['val_auc'][-1]}, final_model
if __name__ == '__main__':
random_state = 0
set_seed(seed_value=random_state)
EEGNET_VERSION = 4
params_v4 = {'resample_to': 369,
'D': 3,
'F1': 12,
'dropoutRate1': 0.52,
'dropoutRate2': 0.36,
'lr': 0.00066,
'norm_rate': 0.2756199103746462
}
if EEGNET_VERSION == 4:
params = params_v4
data = DataBuildClassifier('/home/likan_blk/BCI/NewData')
all_subjects = [25, 26, 27, 28, 29, 30, 32, 33, 34, 35, 36, 37, 38]
# all_subjects = [25,26]
experiment_res_dir = './res/cv_simple_ebci_torch/EEGNET_v%d/' % EEGNET_VERSION
subjects = data.get_data(all_subjects, shuffle=False, windows=[(0.2, 0.5)], baseline_window=(0.2, 0.3),
resample_to=params['resample_to'])
subjects_set = all_subjects
subjs_test_stats = {}
for train_subject in subjects_set:
path_to_subj = prepare_dirs(experiment_res_dir, train_subject)
x = subjects[train_subject][0]
x = x.transpose(0, 2, 1)[:, np.newaxis, :, :]
y = subjects[train_subject][1]
test_stats, model = cv_per_subj_test(x, y, params, path_to_subj, test_on_last_block=True,
plot_fold_history=True)
subjs_test_stats[train_subject] = test_stats
write_results_table(subjs_test_stats, path_to_exp=experiment_res_dir)
| [
"numpy.mean",
"numpy.ones",
"os.makedirs",
"sklearn.model_selection.train_test_split",
"numpy.std",
"os.path.join",
"numpy.argmax",
"src.utils.single_auc_loging",
"numpy.max",
"sklearn.model_selection.StratifiedKFold",
"os.getcwd",
"numpy.zeros",
"os.path.isdir",
"numpy.concatenate",
"sh... | [((623, 664), 'os.path.join', 'os.path.join', (['path_to_subj', '"""checkpoints"""'], {}), "(path_to_subj, 'checkpoints')\n", (635, 664), False, 'import os\n'), ((672, 699), 'os.path.isdir', 'os.path.isdir', (['path_to_subj'], {}), '(path_to_subj)\n', (685, 699), False, 'import os\n'), ((741, 764), 'os.makedirs', 'os.makedirs', (['model_path'], {}), '(model_path)\n', (752, 764), False, 'import os\n'), ((1792, 1849), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_t'], {'test_size': 'test_size', 'shuffle': '(False)'}), '(x_t, test_size=test_size, shuffle=False)\n', (1808, 1849), False, 'from sklearn.model_selection import train_test_split, StratifiedKFold\n'), ((1874, 1932), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_nt'], {'test_size': 'test_size', 'shuffle': '(False)'}), '(x_nt, test_size=test_size, shuffle=False)\n', (1890, 1932), False, 'from sklearn.model_selection import train_test_split, StratifiedKFold\n'), ((1944, 1985), 'numpy.concatenate', 'np.concatenate', (['(x_t_tr, x_nt_tr)'], {'axis': '(0)'}), '((x_t_tr, x_nt_tr), axis=0)\n', (1958, 1985), True, 'import numpy as np\n'), ((2075, 2118), 'numpy.concatenate', 'np.concatenate', (['(x_t_tst, x_nt_tst)'], {'axis': '(0)'}), '((x_t_tst, x_nt_tst), axis=0)\n', (2089, 2118), True, 'import numpy as np\n'), ((2355, 2396), 'os.path.join', 'os.path.join', (['path_to_subj', '"""checkpoints"""'], {}), "(path_to_subj, 'checkpoints')\n", (2367, 2396), False, 'import os\n'), ((2607, 2652), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'folds', 'shuffle': '(True)'}), '(n_splits=folds, shuffle=True)\n', (2622, 2652), False, 'from sklearn.model_selection import train_test_split, StratifiedKFold\n'), ((3951, 4025), 'src.utils.single_auc_loging', 'single_auc_loging', (['test_history', '"""test_history"""'], {'path_to_save': 'path_to_subj'}), "(test_history, 'test_history', path_to_save=path_to_subj)\n", (3968, 4025), False, 'from src.utils import single_auc_loging\n'), ((4390, 4423), 'src.utils.set_seed', 'set_seed', ([], {'seed_value': 'random_state'}), '(seed_value=random_state)\n', (4398, 4423), False, 'from src.utils import set_seed\n'), ((709, 736), 'shutil.rmtree', 'shutil.rmtree', (['path_to_subj'], {}), '(path_to_subj)\n', (722, 736), False, 'import shutil\n'), ((970, 1031), 'codecs.open', 'codecs.open', (["('%s/res.txt' % path_to_exp)", '"""w"""'], {'encoding': '"""utf8"""'}), "('%s/res.txt' % path_to_exp, 'w', encoding='utf8')\n", (981, 1031), False, 'import codecs\n'), ((2782, 2819), 'os.path.join', 'os.path.join', (['model_path', "('%d' % fold)"], {}), "(model_path, '%d' % fold)\n", (2794, 2819), False, 'import os\n'), ((2828, 2856), 'os.makedirs', 'os.makedirs', (['fold_model_path'], {}), '(fold_model_path)\n', (2839, 2856), False, 'import os\n'), ((4035, 4097), 'codecs.open', 'codecs.open', (["('%s/res.txt' % path_to_subj)", '"""w"""'], {'encoding': '"""utf8"""'}), "('%s/res.txt' % path_to_subj, 'w', encoding='utf8')\n", (4046, 4097), False, 'import codecs\n'), ((2008, 2032), 'numpy.ones', 'np.ones', (['x_t_tr.shape[0]'], {}), '(x_t_tr.shape[0])\n', (2015, 2032), True, 'import numpy as np\n'), ((2034, 2060), 'numpy.zeros', 'np.zeros', (['x_nt_tr.shape[0]'], {}), '(x_nt_tr.shape[0])\n', (2042, 2060), True, 'import numpy as np\n'), ((2142, 2167), 'numpy.ones', 'np.ones', (['x_t_tst.shape[0]'], {}), '(x_t_tst.shape[0])\n', (2149, 2167), True, 'import numpy as np\n'), ((2169, 2196), 'numpy.zeros', 'np.zeros', (['x_nt_tst.shape[0]'], {}), '(x_nt_tst.shape[0])\n', (2177, 2196), True, 'import numpy as np\n'), ((3440, 3470), 'numpy.max', 'np.max', (["val_history['val_auc']"], {}), "(val_history['val_auc'])\n", (3446, 3470), True, 'import numpy as np\n'), ((3514, 3579), 'src.utils.single_auc_loging', 'single_auc_loging', (['val_history', "('fold %d' % fold)", 'fold_model_path'], {}), "(val_history, 'fold %d' % fold, fold_model_path)\n", (3531, 3579), False, 'from src.utils import single_auc_loging\n'), ((329, 340), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (338, 340), False, 'import os\n'), ((3336, 3369), 'numpy.argmax', 'np.argmax', (["val_history['val_auc']"], {}), "(val_history['val_auc'])\n", (3345, 3369), True, 'import numpy as np\n'), ((3903, 3944), 'os.path.join', 'os.path.join', (['path_to_subj', '"""naive_model"""'], {}), "(path_to_subj, 'naive_model')\n", (3915, 3944), False, 'import os\n'), ((3694, 3718), 'numpy.mean', 'np.mean', (['best_val_epochs'], {}), '(best_val_epochs)\n', (3701, 3718), True, 'import numpy as np\n'), ((4149, 4171), 'numpy.mean', 'np.mean', (['best_val_aucs'], {}), '(best_val_aucs)\n', (4156, 4171), True, 'import numpy as np\n'), ((4173, 4194), 'numpy.std', 'np.std', (['best_val_aucs'], {}), '(best_val_aucs)\n', (4179, 4194), True, 'import numpy as np\n'), ((1589, 1599), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (1596, 1599), True, 'import numpy as np\n'), ((1601, 1610), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (1607, 1610), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/8/16 17:12
# @Author : zzy824
# @File : emojify_main.py
import numpy as np
from emo_utils import *
import emoji
import matplotlib.pyplot as plt
""" Baseline model: Emojifier-V1 """
X_train, Y_train = read_csv('data/train_emoji.csv')
X_test, Y_test = read_csv('data/tesss.csv')
maxLen = len(max(X_train, key=len).split())
# # test case for data
# index = 1
# print(X_train[index], label_to_emoji(Y_train[index]))
Y_oh_train = convert_to_one_hot(Y_train, C=5)
Y_oh_test = convert_to_one_hot(Y_test, C=5)
# # test case for one-hot data
# index = 50
# print(Y_train[index], "is converted into one hot", Y_oh_train[index])
# implementing emojifier V1
word_to_index, index_to_word, word_to_vec_map = read_glove_vecs('data/glove.6B.50d.txt')
# # test case for word to vec map
# word = "cucumber"
# index = 289846
# print("the index of", word, "in the vocabulary is", word_to_index[word])
# print("the", str(index) + "th word in the vocabulary is", index_to_word[index])
# sentence to avg
def sentence_to_avg(sentence, word_to_vec_map):
"""
Converts a sentence (string) into a list of words (strings). Extracts the GloVe representation of each word
and averages its value into a single vector encoding the meaning of the sentence.
Arguments:
sentence -- string, one training example from X
word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation
Returns:
avg -- average vector encoding information about the sentence, numpy-array of shape (50,)
"""
# Step 1: Split sentence into list of lower case words (≈ 1 line)
words = (sentence.lower()).split()
# Initialize the average word vector, should have the same shape as your word vectors.
avg = np.zeros(50)
# Step 2: average the word vectors. You can loop over the words in the list "words".
for w in words:
avg += word_to_vec_map[w]
avg = avg / len(words)
return avg
# # test case for sentence to avg
# avg = sentence_to_avg("Morrocan couscous is my favorite dish", word_to_vec_map)
# print("avg = ", avg)
# model
def model(X, Y, word_to_vec_map, learning_rate=0.01, num_iterations=400):
"""
Model to train word vector representations in numpy.
Arguments:
X -- input data, numpy array of sentences as strings, of shape (m, 1)
Y -- labels, numpy array of integers between 0 and 7, numpy-array of shape (m, 1)
word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation
learning_rate -- learning_rate for the stochastic gradient descent algorithm
num_iterations -- number of iterations
Returns:
pred -- vector of predictions, numpy-array of shape (m, 1)
W -- weight matrix of the softmax layer, of shape (n_y, n_h)
b -- bias of the softmax layer, of shape (n_y,)
"""
np.random.seed(1)
# Define number of training examples
m = Y.shape[0] # number of training examples
n_y = 5 # number of classes
n_h = 50 # dimensions of the GloVe vectors
# Initialize parameters using Xavier initialization
W = np.random.randn(n_y, n_h) / np.sqrt(n_h)
b = np.zeros((n_y,))
# Convert Y to Y_onehot with n_y classes
Y_oh = convert_to_one_hot(Y, C=n_y)
# Optimization loop
for t in range(num_iterations): # Loop over the number of iterations
for i in range(m): # Loop over the training examples
# Average the word vectors of the words from the j'th training example
avg = sentence_to_avg(X[i], word_to_vec_map)
# Forward propagate the avg through the softmax layer
z = np.dot(W, avg) + b
a = softmax(z)
# Compute cost using the j'th training label's one hot representation and "A" (the output of the softmax)
cost = -np.sum(Y_oh[i] * np.log(a))
# Compute gradients
dz = a - Y_oh[i]
dW = np.dot(dz.reshape(n_y, 1), avg.reshape(1, n_h))
db = dz
# Update parameters with Stochastic Gradient Descent
W = W - learning_rate * dW
b = b - learning_rate * db
if t % 100 == 0:
print("Epoch: " + str(t) + " --- cost = " + str(cost))
pred = predict(X, Y, W, b, word_to_vec_map)
return pred, W, b
# # test case for model V1
# print(X_train.shape)
# print(Y_train.shape)
# print(np.eye(5)[Y_train.reshape(-1)].shape)
# print(X_train[0])
# print(type(X_train))
# Y = np.asarray([5, 0, 0, 5, 4, 4, 4, 6, 6, 4, 1, 1, 5, 6, 6, 3, 6, 3, 4, 4])
# print(Y.shape)
#
# X = np.asarray(['I am going to the bar tonight', 'I love you', 'miss you my dear',
# 'Lets go party and drinks','Congrats on the new job','Congratulations',
# 'I am so happy for you', 'Why are you feeling bad', 'What is wrong with you',
# 'You totally deserve this prize', 'Let us go play football',
# 'Are you down for football this afternoon', 'Work hard play harder',
# 'It is suprising how people can be dumb sometimes',
# 'I am very disappointed','It is the best day in my life',
# 'I think I will end up alone','My life is so boring','Good job',
# 'Great so awesome'])
#
# print(X.shape)
# print(np.eye(5)[Y_train.reshape(-1)].shape)
# print(type(X_train))
# # train your model and examining test set performance
# pred, W, b = model(X_train, Y_train, word_to_vec_map)
# print(pred)
# print("Training set:")
# pred_train = predict(X_train, Y_train, W, b, word_to_vec_map)
# print('Test set:')
# pred_test = predict(X_test, Y_test, W, b, word_to_vec_map)
#
#
# X_my_sentences = np.array(["i adore you", "i love you", "funny lol", "lets play with a ball", "food is ready", "not feeling happy"])
# Y_my_labels = np.array([[0], [0], [2], [1], [4],[3]])
#
# pred = predict(X_my_sentences, Y_my_labels , W, b, word_to_vec_map)
# print_predictions(X_my_sentences, pred)
#
# print(Y_test.shape)
# print(' '+ label_to_emoji(0)+ ' ' + label_to_emoji(1) + ' ' + label_to_emoji(2)+ ' ' + label_to_emoji(3)+' ' + label_to_emoji(4))
# print(pd.crosstab(Y_test, pred_test.reshape(56,), rownames=['Actual'], colnames=['Predicted'], margins=True))
# plot_confusion_matrix(Y_test, pred_test) | [
"numpy.sqrt",
"numpy.log",
"numpy.dot",
"numpy.zeros",
"numpy.random.seed",
"numpy.random.randn"
] | [((1827, 1839), 'numpy.zeros', 'np.zeros', (['(50)'], {}), '(50)\n', (1835, 1839), True, 'import numpy as np\n'), ((2939, 2956), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2953, 2956), True, 'import numpy as np\n'), ((3244, 3260), 'numpy.zeros', 'np.zeros', (['(n_y,)'], {}), '((n_y,))\n', (3252, 3260), True, 'import numpy as np\n'), ((3195, 3220), 'numpy.random.randn', 'np.random.randn', (['n_y', 'n_h'], {}), '(n_y, n_h)\n', (3210, 3220), True, 'import numpy as np\n'), ((3223, 3235), 'numpy.sqrt', 'np.sqrt', (['n_h'], {}), '(n_h)\n', (3230, 3235), True, 'import numpy as np\n'), ((3732, 3746), 'numpy.dot', 'np.dot', (['W', 'avg'], {}), '(W, avg)\n', (3738, 3746), True, 'import numpy as np\n'), ((3934, 3943), 'numpy.log', 'np.log', (['a'], {}), '(a)\n', (3940, 3943), True, 'import numpy as np\n')] |
import numpy as np
from matplotlib import pyplot as plt
'''
Function for plotting training and validation curves.
'''
def learning_curves(history, multival=None, model_n=None, filepath=None, plot_from_epoch=0, plot_to_epoch=None):
n = len(history)
num_epochs = len(history[0]['loss'])
if plot_to_epoch is None:
plot_to_epoch = num_epochs
IoU_in_history = 'mIoU' in history[0].keys()
train_loss = np.zeros((n, num_epochs))
train_acc = np.zeros_like(train_loss)
val_loss = np.zeros_like(train_loss)
val_acc = np.zeros_like(train_loss)
if IoU_in_history:
train_mIoU = np.zeros_like(train_loss)
val_mIoU = np.zeros_like(train_loss)
# For the additional validation curves, which are at different scales
if multival is not None:
m = len(multival[0]['val_loss']) // num_epochs
val_loss_scales = np.zeros((n, num_epochs*m))
val_acc_scales = np.zeros_like(val_loss_scales)
for i in range(len(history)):
train_loss[i, :] = history[i]['loss']
train_acc[i, :] = history[i]['acc']
val_loss[i, :] = history[i]['val_loss']
val_acc[i, :] = history[i]['val_acc']
if IoU_in_history:
train_mIoU[i, :] = history[i]['mIoU']
val_mIoU[i, :] = history[i]['val_mIoU']
if multival is not None:
val_loss_scales[i, :] = multival[i]['val_loss']
val_acc_scales[i, :] = multival[i]['val_acc']
# Extracting mean values and standard deviations
mean_train_loss = np.mean(train_loss, 0)
std_train_loss = np.std(train_loss, 0, ddof=1)
mean_train_acc = np.mean(train_acc, 0)
std_train_acc = np.std(train_acc, 0, ddof=1)
mean_val_loss = np.mean(val_loss, 0)
std_val_loss = np.std(val_loss, 0, ddof=1)
mean_val_acc = np.mean(val_acc, 0)
std_val_acc = np.std(val_acc, 0, ddof=1)
if IoU_in_history:
mean_train_mIoU = np.mean(train_mIoU, 0)
std_train_mIoU = np.std(train_mIoU, 0, ddof=1)
mean_val_mIoU = np.mean(val_mIoU, 0)
std_val_mIoU = np.std(val_mIoU, 0, ddof=1)
# Third dimension corresponds to scales
if multival is not None:
val_loss_scales = np.reshape(val_loss_scales, [n, num_epochs, m])
val_acc_scales = np.reshape(val_acc_scales, [n, num_epochs, m])
mean_val_loss_scales, mean_val_acc_scales = np.mean(val_loss_scales, axis=0), np.mean(val_acc_scales, axis=0)
std_val_loss_scales, std_val_acc_scales = np.std(val_loss_scales, axis=0, ddof=1), np.std(val_acc_scales, axis=0, ddof=1)
# if n == 0:
# std_train_loss = 0
# std_train_acc = 0
# std_val_loss = 0
# std_val_acc = 0
# std_val_loss_scales = 0
# std_val_acc_scales = 0
# else:
# std_train_loss = np.std(train_loss, 0, ddof=1)
# std_train_acc = np.std(train_acc, 0, ddof=1)
# std_val_loss = np.std(val_loss, 0, ddof=1)
# std_val_acc = np.std(val_acc, 0, ddof=1)
# std_val_loss_scales = np.std(val_loss_scales, axis=0, ddof=1)
# std_val_acc_scales = np.std(val_acc_scales, axis=0, ddof=1)
if filepath is not None:
plt.ioff()
# Plotting mean Loss curves with stds
plt.figure(figsize=(10, 10))
plt.title(model_n + '_loss')
plt.plot(range(plot_to_epoch - plot_from_epoch), mean_train_loss[plot_from_epoch:plot_to_epoch], color="g", label='training')
plt.fill_between(np.arange(plot_to_epoch - plot_from_epoch), mean_train_loss[plot_from_epoch:plot_to_epoch] - std_train_loss[plot_from_epoch:plot_to_epoch],
mean_train_loss[plot_from_epoch:plot_to_epoch] + std_train_loss[plot_from_epoch:plot_to_epoch], alpha=0.2, color="g")
plt.plot(range(plot_to_epoch - plot_from_epoch), mean_val_loss[plot_from_epoch:plot_to_epoch], color='r', label='validation')
plt.fill_between(range(plot_to_epoch - plot_from_epoch), mean_val_loss[plot_from_epoch:plot_to_epoch] - std_val_loss[plot_from_epoch:plot_to_epoch],
mean_val_loss[plot_from_epoch:plot_to_epoch] + std_val_loss[plot_from_epoch:plot_to_epoch], alpha=0.2, color='r')
if multival is not None:
for i in range(m):
plt.plot(range(plot_to_epoch - plot_from_epoch), mean_val_loss_scales[plot_from_epoch:plot_to_epoch, i], label=f'validation_{i+1}')
plt.fill_between(range(plot_to_epoch - plot_from_epoch), mean_val_loss_scales[plot_from_epoch:plot_to_epoch, i] - std_val_loss_scales[plot_from_epoch:plot_to_epoch, i],
mean_val_loss_scales[plot_from_epoch:plot_to_epoch, i] + std_val_loss_scales[plot_from_epoch:plot_to_epoch, i], alpha=0.2, )
plt.xlabel("Epoch number")
plt.ylabel("Loss")
plt.legend()
if filepath is not None:
plt.savefig(filepath + model_n + '_loss' + '.png')
# Plotting mean Accuracy curves with stds
plt.figure(figsize=(10, 10))
plt.title(model_n + '_acc')
plt.ylim(0, 1.05)
plt.plot(range(plot_to_epoch - plot_from_epoch), mean_train_acc[plot_from_epoch:plot_to_epoch], color="g", label='training')
plt.fill_between(np.arange(plot_to_epoch - plot_from_epoch), np.maximum(0, mean_train_acc[plot_from_epoch:plot_to_epoch] - std_train_acc[plot_from_epoch:plot_to_epoch]),
np.minimum(1, mean_train_acc[plot_from_epoch:plot_to_epoch] + std_train_acc[plot_from_epoch:plot_to_epoch]), alpha=0.2, color="g")
plt.plot(range(plot_to_epoch - plot_from_epoch), mean_val_acc[plot_from_epoch:plot_to_epoch], color='r', label='validation')
plt.fill_between(range(plot_to_epoch - plot_from_epoch), np.maximum(0, mean_val_acc[plot_from_epoch:plot_to_epoch] - std_val_acc[plot_from_epoch:plot_to_epoch]),
np.minimum(1, mean_val_acc[plot_from_epoch:plot_to_epoch] + std_val_acc[plot_from_epoch:plot_to_epoch]), alpha=0.2, color='r')
if multival is not None:
for i in range(m):
plt.plot(range(plot_to_epoch - plot_from_epoch), mean_val_acc_scales[plot_from_epoch:plot_to_epoch, i], label=f'validation_{i+1}')
plt.fill_between(range(plot_to_epoch - plot_from_epoch), np.maximum(0, mean_val_acc_scales[plot_from_epoch:plot_to_epoch, i] - std_val_acc_scales[plot_from_epoch:plot_to_epoch, i]),
np.minimum(1, mean_val_acc_scales[plot_from_epoch:plot_to_epoch, i] + std_val_acc_scales[plot_from_epoch:plot_to_epoch, i]), alpha=0.2)
plt.xlabel("Epoch number")
plt.ylabel("Accuracy")
plt.legend()
if filepath is not None:
plt.savefig(filepath + model_n + '_acc' + '.png')
if IoU_in_history:
plt.figure(figsize=(10, 10))
plt.title(model_n + '_mIoU')
plt.ylim(0, 1.05)
plt.plot(range(plot_to_epoch - plot_from_epoch), mean_train_mIoU[plot_from_epoch:plot_to_epoch], color="g",
label='training')
plt.fill_between(np.arange(plot_to_epoch - plot_from_epoch), np.maximum(0, mean_train_mIoU[plot_from_epoch:plot_to_epoch] - std_train_mIoU[plot_from_epoch:plot_to_epoch]),
np.minimum(1, mean_train_mIoU[plot_from_epoch:plot_to_epoch] + std_train_mIoU[plot_from_epoch:plot_to_epoch]),
alpha=0.2, color="g")
plt.plot(range(plot_to_epoch - plot_from_epoch), mean_val_mIoU[plot_from_epoch:plot_to_epoch], color='r',
label='validation')
plt.fill_between(range(plot_to_epoch - plot_from_epoch), np.maximum(0, mean_val_mIoU[plot_from_epoch:plot_to_epoch] - std_val_mIoU[plot_from_epoch:plot_to_epoch]),
np.minimum(1, mean_val_mIoU[plot_from_epoch:plot_to_epoch] + std_val_mIoU[plot_from_epoch:plot_to_epoch]),
alpha=0.2, color='r')
plt.xlabel("Epoch number")
plt.ylabel("mIoU")
plt.legend()
if filepath is not None:
if IoU_in_history:
plt.savefig(filepath + model_n + '_mIoU' + '.png')
else:
plt.show()
plt.close('all')
return mean_train_loss, std_train_loss, mean_train_acc, std_train_acc, mean_val_loss, std_val_loss, mean_val_acc, std_val_acc | [
"numpy.mean",
"numpy.reshape",
"matplotlib.pyplot.savefig",
"numpy.minimum",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.close",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.std",
"matplotlib.pyplot.title",
"nump... | [((429, 454), 'numpy.zeros', 'np.zeros', (['(n, num_epochs)'], {}), '((n, num_epochs))\n', (437, 454), True, 'import numpy as np\n'), ((471, 496), 'numpy.zeros_like', 'np.zeros_like', (['train_loss'], {}), '(train_loss)\n', (484, 496), True, 'import numpy as np\n'), ((512, 537), 'numpy.zeros_like', 'np.zeros_like', (['train_loss'], {}), '(train_loss)\n', (525, 537), True, 'import numpy as np\n'), ((552, 577), 'numpy.zeros_like', 'np.zeros_like', (['train_loss'], {}), '(train_loss)\n', (565, 577), True, 'import numpy as np\n'), ((1539, 1561), 'numpy.mean', 'np.mean', (['train_loss', '(0)'], {}), '(train_loss, 0)\n', (1546, 1561), True, 'import numpy as np\n'), ((1583, 1612), 'numpy.std', 'np.std', (['train_loss', '(0)'], {'ddof': '(1)'}), '(train_loss, 0, ddof=1)\n', (1589, 1612), True, 'import numpy as np\n'), ((1634, 1655), 'numpy.mean', 'np.mean', (['train_acc', '(0)'], {}), '(train_acc, 0)\n', (1641, 1655), True, 'import numpy as np\n'), ((1676, 1704), 'numpy.std', 'np.std', (['train_acc', '(0)'], {'ddof': '(1)'}), '(train_acc, 0, ddof=1)\n', (1682, 1704), True, 'import numpy as np\n'), ((1726, 1746), 'numpy.mean', 'np.mean', (['val_loss', '(0)'], {}), '(val_loss, 0)\n', (1733, 1746), True, 'import numpy as np\n'), ((1766, 1793), 'numpy.std', 'np.std', (['val_loss', '(0)'], {'ddof': '(1)'}), '(val_loss, 0, ddof=1)\n', (1772, 1793), True, 'import numpy as np\n'), ((1813, 1832), 'numpy.mean', 'np.mean', (['val_acc', '(0)'], {}), '(val_acc, 0)\n', (1820, 1832), True, 'import numpy as np\n'), ((1851, 1877), 'numpy.std', 'np.std', (['val_acc', '(0)'], {'ddof': '(1)'}), '(val_acc, 0, ddof=1)\n', (1857, 1877), True, 'import numpy as np\n'), ((3231, 3259), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (3241, 3259), True, 'from matplotlib import pyplot as plt\n'), ((3264, 3292), 'matplotlib.pyplot.title', 'plt.title', (["(model_n + '_loss')"], {}), "(model_n + '_loss')\n", (3273, 3292), True, 'from matplotlib import pyplot as plt\n'), ((4684, 4710), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch number"""'], {}), "('Epoch number')\n", (4694, 4710), True, 'from matplotlib import pyplot as plt\n'), ((4715, 4733), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (4725, 4733), True, 'from matplotlib import pyplot as plt\n'), ((4738, 4750), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4748, 4750), True, 'from matplotlib import pyplot as plt\n'), ((4891, 4919), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (4901, 4919), True, 'from matplotlib import pyplot as plt\n'), ((4924, 4951), 'matplotlib.pyplot.title', 'plt.title', (["(model_n + '_acc')"], {}), "(model_n + '_acc')\n", (4933, 4951), True, 'from matplotlib import pyplot as plt\n'), ((4957, 4974), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1.05)'], {}), '(0, 1.05)\n', (4965, 4974), True, 'from matplotlib import pyplot as plt\n'), ((6438, 6464), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch number"""'], {}), "('Epoch number')\n", (6448, 6464), True, 'from matplotlib import pyplot as plt\n'), ((6469, 6491), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (6479, 6491), True, 'from matplotlib import pyplot as plt\n'), ((6496, 6508), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6506, 6508), True, 'from matplotlib import pyplot as plt\n'), ((7977, 7993), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (7986, 7993), True, 'from matplotlib import pyplot as plt\n'), ((622, 647), 'numpy.zeros_like', 'np.zeros_like', (['train_loss'], {}), '(train_loss)\n', (635, 647), True, 'import numpy as np\n'), ((667, 692), 'numpy.zeros_like', 'np.zeros_like', (['train_loss'], {}), '(train_loss)\n', (680, 692), True, 'import numpy as np\n'), ((878, 907), 'numpy.zeros', 'np.zeros', (['(n, num_epochs * m)'], {}), '((n, num_epochs * m))\n', (886, 907), True, 'import numpy as np\n'), ((931, 961), 'numpy.zeros_like', 'np.zeros_like', (['val_loss_scales'], {}), '(val_loss_scales)\n', (944, 961), True, 'import numpy as np\n'), ((1928, 1950), 'numpy.mean', 'np.mean', (['train_mIoU', '(0)'], {}), '(train_mIoU, 0)\n', (1935, 1950), True, 'import numpy as np\n'), ((1976, 2005), 'numpy.std', 'np.std', (['train_mIoU', '(0)'], {'ddof': '(1)'}), '(train_mIoU, 0, ddof=1)\n', (1982, 2005), True, 'import numpy as np\n'), ((2030, 2050), 'numpy.mean', 'np.mean', (['val_mIoU', '(0)'], {}), '(val_mIoU, 0)\n', (2037, 2050), True, 'import numpy as np\n'), ((2074, 2101), 'numpy.std', 'np.std', (['val_mIoU', '(0)'], {'ddof': '(1)'}), '(val_mIoU, 0, ddof=1)\n', (2080, 2101), True, 'import numpy as np\n'), ((2202, 2249), 'numpy.reshape', 'np.reshape', (['val_loss_scales', '[n, num_epochs, m]'], {}), '(val_loss_scales, [n, num_epochs, m])\n', (2212, 2249), True, 'import numpy as np\n'), ((2275, 2321), 'numpy.reshape', 'np.reshape', (['val_acc_scales', '[n, num_epochs, m]'], {}), '(val_acc_scales, [n, num_epochs, m])\n', (2285, 2321), True, 'import numpy as np\n'), ((3173, 3183), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (3181, 3183), True, 'from matplotlib import pyplot as plt\n'), ((3445, 3487), 'numpy.arange', 'np.arange', (['(plot_to_epoch - plot_from_epoch)'], {}), '(plot_to_epoch - plot_from_epoch)\n', (3454, 3487), True, 'import numpy as np\n'), ((4788, 4838), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(filepath + model_n + '_loss' + '.png')"], {}), "(filepath + model_n + '_loss' + '.png')\n", (4799, 4838), True, 'from matplotlib import pyplot as plt\n'), ((5125, 5167), 'numpy.arange', 'np.arange', (['(plot_to_epoch - plot_from_epoch)'], {}), '(plot_to_epoch - plot_from_epoch)\n', (5134, 5167), True, 'import numpy as np\n'), ((5169, 5281), 'numpy.maximum', 'np.maximum', (['(0)', '(mean_train_acc[plot_from_epoch:plot_to_epoch] - std_train_acc[\n plot_from_epoch:plot_to_epoch])'], {}), '(0, mean_train_acc[plot_from_epoch:plot_to_epoch] - std_train_acc\n [plot_from_epoch:plot_to_epoch])\n', (5179, 5281), True, 'import numpy as np\n'), ((5299, 5411), 'numpy.minimum', 'np.minimum', (['(1)', '(mean_train_acc[plot_from_epoch:plot_to_epoch] + std_train_acc[\n plot_from_epoch:plot_to_epoch])'], {}), '(1, mean_train_acc[plot_from_epoch:plot_to_epoch] + std_train_acc\n [plot_from_epoch:plot_to_epoch])\n', (5309, 5411), True, 'import numpy as np\n'), ((5621, 5729), 'numpy.maximum', 'np.maximum', (['(0)', '(mean_val_acc[plot_from_epoch:plot_to_epoch] - std_val_acc[plot_from_epoch:\n plot_to_epoch])'], {}), '(0, mean_val_acc[plot_from_epoch:plot_to_epoch] - std_val_acc[\n plot_from_epoch:plot_to_epoch])\n', (5631, 5729), True, 'import numpy as np\n'), ((5747, 5855), 'numpy.minimum', 'np.minimum', (['(1)', '(mean_val_acc[plot_from_epoch:plot_to_epoch] + std_val_acc[plot_from_epoch:\n plot_to_epoch])'], {}), '(1, mean_val_acc[plot_from_epoch:plot_to_epoch] + std_val_acc[\n plot_from_epoch:plot_to_epoch])\n', (5757, 5855), True, 'import numpy as np\n'), ((6547, 6596), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(filepath + model_n + '_acc' + '.png')"], {}), "(filepath + model_n + '_acc' + '.png')\n", (6558, 6596), True, 'from matplotlib import pyplot as plt\n'), ((6629, 6657), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (6639, 6657), True, 'from matplotlib import pyplot as plt\n'), ((6666, 6694), 'matplotlib.pyplot.title', 'plt.title', (["(model_n + '_mIoU')"], {}), "(model_n + '_mIoU')\n", (6675, 6694), True, 'from matplotlib import pyplot as plt\n'), ((6704, 6721), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1.05)'], {}), '(0, 1.05)\n', (6712, 6721), True, 'from matplotlib import pyplot as plt\n'), ((7748, 7774), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch number"""'], {}), "('Epoch number')\n", (7758, 7774), True, 'from matplotlib import pyplot as plt\n'), ((7783, 7801), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mIoU"""'], {}), "('mIoU')\n", (7793, 7801), True, 'from matplotlib import pyplot as plt\n'), ((7810, 7822), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7820, 7822), True, 'from matplotlib import pyplot as plt\n'), ((7961, 7971), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7969, 7971), True, 'from matplotlib import pyplot as plt\n'), ((2374, 2406), 'numpy.mean', 'np.mean', (['val_loss_scales'], {'axis': '(0)'}), '(val_loss_scales, axis=0)\n', (2381, 2406), True, 'import numpy as np\n'), ((2408, 2439), 'numpy.mean', 'np.mean', (['val_acc_scales'], {'axis': '(0)'}), '(val_acc_scales, axis=0)\n', (2415, 2439), True, 'import numpy as np\n'), ((2490, 2529), 'numpy.std', 'np.std', (['val_loss_scales'], {'axis': '(0)', 'ddof': '(1)'}), '(val_loss_scales, axis=0, ddof=1)\n', (2496, 2529), True, 'import numpy as np\n'), ((2531, 2569), 'numpy.std', 'np.std', (['val_acc_scales'], {'axis': '(0)', 'ddof': '(1)'}), '(val_acc_scales, axis=0, ddof=1)\n', (2537, 2569), True, 'import numpy as np\n'), ((6898, 6940), 'numpy.arange', 'np.arange', (['(plot_to_epoch - plot_from_epoch)'], {}), '(plot_to_epoch - plot_from_epoch)\n', (6907, 6940), True, 'import numpy as np\n'), ((6942, 7055), 'numpy.maximum', 'np.maximum', (['(0)', '(mean_train_mIoU[plot_from_epoch:plot_to_epoch] - std_train_mIoU[\n plot_from_epoch:plot_to_epoch])'], {}), '(0, mean_train_mIoU[plot_from_epoch:plot_to_epoch] -\n std_train_mIoU[plot_from_epoch:plot_to_epoch])\n', (6952, 7055), True, 'import numpy as np\n'), ((7078, 7191), 'numpy.minimum', 'np.minimum', (['(1)', '(mean_train_mIoU[plot_from_epoch:plot_to_epoch] + std_train_mIoU[\n plot_from_epoch:plot_to_epoch])'], {}), '(1, mean_train_mIoU[plot_from_epoch:plot_to_epoch] +\n std_train_mIoU[plot_from_epoch:plot_to_epoch])\n', (7088, 7191), True, 'import numpy as np\n'), ((7453, 7563), 'numpy.maximum', 'np.maximum', (['(0)', '(mean_val_mIoU[plot_from_epoch:plot_to_epoch] - std_val_mIoU[\n plot_from_epoch:plot_to_epoch])'], {}), '(0, mean_val_mIoU[plot_from_epoch:plot_to_epoch] - std_val_mIoU[\n plot_from_epoch:plot_to_epoch])\n', (7463, 7563), True, 'import numpy as np\n'), ((7585, 7695), 'numpy.minimum', 'np.minimum', (['(1)', '(mean_val_mIoU[plot_from_epoch:plot_to_epoch] + std_val_mIoU[\n plot_from_epoch:plot_to_epoch])'], {}), '(1, mean_val_mIoU[plot_from_epoch:plot_to_epoch] + std_val_mIoU[\n plot_from_epoch:plot_to_epoch])\n', (7595, 7695), True, 'import numpy as np\n'), ((7892, 7942), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(filepath + model_n + '_mIoU' + '.png')"], {}), "(filepath + model_n + '_mIoU' + '.png')\n", (7903, 7942), True, 'from matplotlib import pyplot as plt\n'), ((6143, 6270), 'numpy.maximum', 'np.maximum', (['(0)', '(mean_val_acc_scales[plot_from_epoch:plot_to_epoch, i] - std_val_acc_scales\n [plot_from_epoch:plot_to_epoch, i])'], {}), '(0, mean_val_acc_scales[plot_from_epoch:plot_to_epoch, i] -\n std_val_acc_scales[plot_from_epoch:plot_to_epoch, i])\n', (6153, 6270), True, 'import numpy as np\n'), ((6297, 6424), 'numpy.minimum', 'np.minimum', (['(1)', '(mean_val_acc_scales[plot_from_epoch:plot_to_epoch, i] + std_val_acc_scales\n [plot_from_epoch:plot_to_epoch, i])'], {}), '(1, mean_val_acc_scales[plot_from_epoch:plot_to_epoch, i] +\n std_val_acc_scales[plot_from_epoch:plot_to_epoch, i])\n', (6307, 6424), True, 'import numpy as np\n')] |
import numpy as np
#Creando la matriz
tablero = np.zeros(30)
tableroFuturo = np.zeros(30)
#Estado inicial
tablero[1] = 1
tablero[4] = 1
tablero[5] = 1
tablero[7] = 1
tablero[9] = 1
tablero[11] = 1
tablero[13] = 1
tablero[14] = 1
contador = 0
def buscarCelulas(matrizBC):
for j in range(30):
valor = matrizBC[j]
buscarVecinos(matrizBC,j, valor)
global tableroFuturo
cambio(matrizBC, tableroFuturo)
rellenarZero()
'''
global tableroFuturo
buscarVecinos(matrizBC,2,3,0)
print(tableroFuturo)'''
def buscarVecinos(matriz, n, valor):
global contador
#Si n=0
if(n==0):
for j in range(n+1,n+3):
print("n = ",j, "valor = ",matriz[j], "n=0") #BORRAR ESTO
if(j==n):
pass
else:
if(int(matriz[j]) == 1):
contador = contador + 1
#Si n=1
elif(n==1):
for j in range(n-1,n+3):
print("n = ",j, "valor = ",matriz[j],"n=1") #BORRAR ESTO
if(j==n):
pass
else:
if(int(matriz[j]) == 1):
contador = contador + 1
#Si 2<=n<=27
elif(n>=2 and n<=27):
for j in range(n-2,n+3):
print("n = ",j, "valor = ",matriz[j],"2<=n<=27") #BORRAR ESTO
if(j==n):
pass
else:
if(int(matriz[j]) == 1):
contador = contador + 1
#Si n=28
elif(n==28):
for j in range(n-2,n+2):
print("n = ",j, "valor = ",matriz[j],"n=28") #BORRAR ESTO
if(j==n):
pass
else:
if(int(matriz[j]) == 1):
contador = contador + 1
#Si n=29
elif(n==29):
for j in range(n-2,n):
print("n = ",j, "valor = ",matriz[j],"n=29") #BORRAR ESTO
if(j==n):
pass
else:
if(int(matriz[j]) == 1):
contador = contador + 1
print("Contador=",contador,"buscando en n: ",n) #BORRAR ESTE PRINT
vm = vidaMuerte(contador, valor)
global tableroFuturo
#Cambiar valor AUN FALTA VER COMO
if(vm == True):
tableroFuturo[n] = 1
elif(vm == False):
tableroFuturo[n] = 0
contador = 0
#Verificar vida o muerte de celula
def vidaMuerte(c, v):
if(v == 0):
if(c==3):
return True
else:
return False
elif(v == 1):
if(c>=2):
return True
else:
return False
#Reiniciar tablero futuro
def rellenarZero():
global tableroFuturo
tableroFuturo = np.zeros(30)
#Cambiar tableroFuturo por tablero
def cambio(t, tf):
for j in range(30):
t[j] = tf[j]
#Iniciar el Juego
#Estado inicial
print("Estado inicial")
print(tablero)
for i in range(3):
print("/nIteración: ", i+1)
buscarCelulas(tablero)
print(tablero)
termino = 0
for i in tablero:
if (i == 1):
termino = termino + 1
if(termino == 30):
print("Llego hasta la meta. Sobrepoblación.")
break
| [
"numpy.zeros"
] | [((52, 64), 'numpy.zeros', 'np.zeros', (['(30)'], {}), '(30)\n', (60, 64), True, 'import numpy as np\n'), ((82, 94), 'numpy.zeros', 'np.zeros', (['(30)'], {}), '(30)\n', (90, 94), True, 'import numpy as np\n'), ((2263, 2275), 'numpy.zeros', 'np.zeros', (['(30)'], {}), '(30)\n', (2271, 2275), True, 'import numpy as np\n')] |
""" String representation for various data objects
"""
from collections import OrderedDict as odict
import numpy as np
import json
import dimarray as da
from dimarray.config import get_option
def str_attrs(meta, indent=4):
return "\n".join([" "*indent+"{}: {}".format(key, repr(meta[key])) for key in meta.keys()])
def repr_attrs(meta):
return "\n".join([" "*4+"{}: {}".format(key, repr(meta[key])) for key in meta.keys()])
# return repr(odict(zip(meta.keys(), meta.values())))
# return repr({k:meta[k] for k in meta.keys()})
# return json.dumps({k:meta[k] for k in meta.keys()}, sort_keys=True,indent=4,separators=(',', ': '))
# return json.dumps(odict(zip(meta.keys(), meta.values())))
def repr_axis(self, metadata=False):
dim = self.name
size = self.size
first, last = self._bounds()
if self.dtype.kind == 'M':
first, last = str(first), str(last)
elif self.dtype.kind == 'f':
pass
else:
first, last = repr(first), repr(last)
# if getattr(self.dtype, 'kind', None) == 'f':
repr_ = "{dim} ({size}): {first} to {last}".format(dim=dim, size=size, first=first, last=last)
# else:
# repr_ = "{dim} ({size}): {first} to {last}".format(dim=dim, size=size, first=repr(first), last=repr(last))
if metadata and len(self.attrs)>0:
repr_ += "\n"+str_attrs(self.attrs)
return repr_
def repr_axes(self, metadata=False):
return "\n".join([ "{i} / {axis}".format(i=i, axis=repr_axis(ax, metadata=metadata) )
for i, ax in enumerate(self)])
str_axes = repr_axes
def repr_dimarray_inline(self, metadata=False, name=None):
" inline representation of a dimarray (without its name"
if name is None and hasattr(self, 'name'):
name = self.name
dims = self.dims
if len(dims) == 0:
val = self.values[()]
if val.ndim == 1 and val.size == 1:
val = val[0]
descr = repr(val)
else:
descr = repr(dims)
repr_ = ": ".join([name, descr])
if metadata and len(self.attrs)>0:
repr_ += "\n"+str_attrs(self.attrs)
return repr_
def repr_dimarray(self, metadata=False, lazy=False):
header = self.__class__.__name__
# lazy = not isinstance(self, da.DimArray))
if lazy:
header = header + ": "+repr(self.name)+" (%i"%self.size+")"
else:
header = self.__class__.__name__.lower() + ": " + stats_dimarray(self)
lines = [header]
# axes
if self.ndim > 0:
lines.append(repr_axes(self.axes, metadata=metadata))
# metadata
if metadata and len(self.attrs) > 0:
lines.append("attributes:")
lines.append(repr_attrs(self.attrs) )
# lines.append(str_attrs(self.attrs, indent=8) )
# the data itself
if lazy:
# line = "array(...)" if self.ndim > 0 else str(self[0])
# line = self.name+("(...)" if self.ndim > 0 else repr((self[0],)))
line = ""
elif self.size > get_option('display.max'):
line = "array(...)"
else:
line = repr(self.values)
if line:
lines.append(line)
return "\n".join(lines)
str_dimarray = repr_dimarray
def stats_dimarray(self, backcompatibility=True):
""" descriptive statistics
"""
try:
if self.ndim > 0:
nonnull = np.size(self.values[~np.isnan(self.values)])
else:
nonnull = int(~np.isnan(self.values))
except TypeError: # e.g. object
nonnull = self.size
if backcompatibility:
stats = "{} non-null elements ({} null)".format(nonnull, self.size-nonnull)
else:
desc = odict()
if nonnull < self.size:
desc['nans']=self.size-nonnull
try:
# numeric types
desc['min']=self.min(skipna=True)
desc['max']=self.max(skipna=True)
except:
pass
stats = ", ".join([k+':'+desc[k] for k in desc])
return stats
def repr_dataset(self, metadata=False):
# variable names
# nms = [nm for nm in self.keys() if nm not in self.dims]
nms = [nm for nm in self.keys()]
# header
if not isinstance(self, da.Dataset):
header = self.__class__.__name__+" of %s variables (%s)" % (len(nms), self.nc.file_format)
else:
header = "Dataset of %s variables" % len(nms)
if len(nms) == 1: header = header.replace('variables','variable')
lines = []
lines.append(header)
# display dimensions name, size, first and last value
if len(self.axes) > 0:
if metadata:
lines.append("")
lines.append("//dimensions:")
lines.append(repr_axes(self.axes, metadata=metadata))
# display variables name, shape and dimensions
if len(self.keys()) > 0:
if metadata:
lines.append("")
lines.append("//variables:")
for nm in nms:
dims = self.dims
line = repr_dimarray_inline(self[nm], metadata=metadata, name=nm)
lines.append(line)
# Global Meta"data
if metadata and len(self.attrs) > 0:
# if len(self.attrs) > 0:
# lines.append("//global attributes:\n"+str_attrs(self.attrs))
lines.append("")
lines.append("//global attributes:")
lines.append(repr_attrs(self.attrs))
return "\n".join(lines)
str_dataset = repr_dataset
| [
"collections.OrderedDict",
"dimarray.config.get_option",
"numpy.isnan"
] | [((3620, 3627), 'collections.OrderedDict', 'odict', ([], {}), '()\n', (3625, 3627), True, 'from collections import OrderedDict as odict\n'), ((2966, 2991), 'dimarray.config.get_option', 'get_option', (['"""display.max"""'], {}), "('display.max')\n", (2976, 2991), False, 'from dimarray.config import get_option\n'), ((3396, 3417), 'numpy.isnan', 'np.isnan', (['self.values'], {}), '(self.values)\n', (3404, 3417), True, 'import numpy as np\n'), ((3331, 3352), 'numpy.isnan', 'np.isnan', (['self.values'], {}), '(self.values)\n', (3339, 3352), True, 'import numpy as np\n')] |
# MIT License
#
# Copyright (c) 2016-2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import numpy as np
import alib
import vnep_approx
import os
try:
import pickle as pickle
except ImportError:
import pickle
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
import logging
logger = logging.getLogger(__name__)
def extract_parameter_range(scenario_parameter_space_dict, key):
if not isinstance(scenario_parameter_space_dict, dict):
return None
for generator_name, value in scenario_parameter_space_dict.items():
if generator_name == key:
return [key], value
if isinstance(value, list):
if len(value) != 1:
continue
value = value[0]
result = extract_parameter_range(value, key)
if result is not None:
path, values = result
return [generator_name, 0] + path, values
elif isinstance(value, dict):
result = extract_parameter_range(value, key)
if result is not None:
path, values = result
return [generator_name] + path, values
return None
def lookup_scenarios_having_specific_values(scenario_parameter_space_dict, path, value):
current_path = path[:]
current_dict = scenario_parameter_space_dict
while len(current_path) > 0:
if isinstance(current_path[0], str):
current_dict = current_dict[current_path[0]]
current_path.pop(0)
elif current_path[0] == 0:
current_path.pop(0)
# print current_dict
return current_dict[value]
def lookup_scenario_parameter_room_dicts_on_path(scenario_parameter_space_dict, path):
current_path = path[:]
current_dict_or_list = scenario_parameter_space_dict
dicts_on_path = []
while len(current_path) > 0:
dicts_on_path.append(current_dict_or_list)
if isinstance(current_path[0], str):
current_dict_or_list = current_dict_or_list[current_path[0]]
current_path.pop(0)
elif isinstance(current_path[0], int):
current_dict_or_list = current_dict_or_list[int(current_path[0])]
current_path.pop(0)
else:
raise RuntimeError("Could not lookup dicts.")
return dicts_on_path
def evaluate_baseline_and_randround(dc_seplp_dynvmp,
seplp_dynvmp_algorithm_id,
seplp_dynvmp_execution_config,
dc_randround,
randround_algorithm_id,
randround_execution_config,
exclude_generation_parameters=None,
parameter_filter_keys=None,
show_plot=False,
save_plot=True,
forbidden_scenario_ids=None,
output_path="./",
output_filetype="png",
request_sets=[[40,60],[80,100]]):
""" Main function for evaluation, creating plots and saving them in a specific directory hierarchy.
A large variety of plots is created. For heatmaps, a generic plotter is used while for general
comparison plots (ECDF and scatter) an own class is used. The plots that shall be generated cannot
be controlled at the moment but the respective plotters can be easily adjusted.
:param dc_seplp_dynvmp: unpickled datacontainer of baseline experiments (e.g. MIP)
:param seplp_dynvmp_algorithm_id: algorithm id of the baseline algorithm
:param seplp_dynvmp_execution_config: execution config (numeric) of the baseline algorithm execution
:param dc_randround: unpickled datacontainer of randomized rounding experiments
:param randround_algorithm_id: algorithm id of the randround algorithm
:param randround_execution_config: execution config (numeric) of the randround algorithm execution
:param exclude_generation_parameters: specific generation parameters that shall be excluded from the evaluation.
These won't show in the plots and will also not be shown on axis labels etc.
:param parameter_filter_keys: name of parameters according to which the results shall be filtered
:param show_plot: Boolean: shall plots be shown
:param save_plot: Boolean: shall the plots be saved
:param forbidden_scenario_ids: list / set of scenario ids that shall not be considered in the evaluation
:param output_path: path to which the results shall be written
:param output_filetype: filetype supported by matplotlib to export figures
:return: None
"""
if forbidden_scenario_ids is None:
forbidden_scenario_ids = set()
if exclude_generation_parameters is not None:
for key, values_to_exclude in exclude_generation_parameters.items():
parameter_filter_path, parameter_values = extract_parameter_range(
dc_seplp_dynvmp.scenario_parameter_container.scenarioparameter_room, key)
parameter_dicts_baseline = lookup_scenario_parameter_room_dicts_on_path(
dc_seplp_dynvmp.scenario_parameter_container.scenarioparameter_room, parameter_filter_path)
parameter_dicts_seplpdynvmp = lookup_scenario_parameter_room_dicts_on_path(
dc_randround.scenario_parameter_container.scenarioparameter_room, parameter_filter_path)
for value_to_exclude in values_to_exclude:
if value_to_exclude not in parameter_values:
raise RuntimeError("The value {} is not contained in the list of parameter values {} for key {}".format(
value_to_exclude, parameter_values, key
))
#add respective scenario ids to the set of forbidden scenario ids
forbidden_scenario_ids.update(set(lookup_scenarios_having_specific_values(
dc_seplp_dynvmp.scenario_parameter_container.scenario_parameter_dict, parameter_filter_path, value_to_exclude)))
#remove the respective values from the scenario parameter room such that these are not considered when
#constructing e.g. axes
parameter_dicts_baseline[-1][key] = [value for value in parameter_dicts_baseline[-1][key] if
value not in values_to_exclude]
parameter_dicts_seplpdynvmp[-1][key] = [value for value in parameter_dicts_seplpdynvmp[-1][key] if
value not in values_to_exclude]
sep_lp_dynvmp_data_set = {scenario_index:
dc_seplp_dynvmp.algorithm_scenario_solution_dictionary[seplp_dynvmp_algorithm_id][
scenario_index][seplp_dynvmp_execution_config]
for scenario_index in
list(dc_seplp_dynvmp.algorithm_scenario_solution_dictionary[
seplp_dynvmp_algorithm_id].keys()) if scenario_index not in forbidden_scenario_ids}
randround_data_set = {scenario_index:
dc_randround.algorithm_scenario_solution_dictionary[randround_algorithm_id][
scenario_index][randround_execution_config]
for scenario_index in
list(dc_randround.algorithm_scenario_solution_dictionary[
randround_algorithm_id].keys()) if scenario_index not in forbidden_scenario_ids}
plot_comparison_separation_dynvmp_vs_lp(sep_lp_dynvmp_data_set=sep_lp_dynvmp_data_set,
randround_data_set=randround_data_set,
dc_seplp_dynvmp=dc_seplp_dynvmp,
request_sets=request_sets,
output_path=output_path,
output_filetype=output_filetype)
def plot_comparison_separation_dynvmp_vs_lp(sep_lp_dynvmp_data_set,
randround_data_set,
dc_seplp_dynvmp,
request_sets,
output_path,
output_filetype):
logger.info(sep_lp_dynvmp_data_set)
scenarioparameter_room = dc_seplp_dynvmp.scenario_parameter_container.scenarioparameter_room
scenario_parameter_dict = dc_seplp_dynvmp.scenario_parameter_container.scenario_parameter_dict
filter_path_number_of_requests, list_number_of_requests = extract_parameter_range(scenarioparameter_room,
"number_of_requests")
logger.info(list_number_of_requests)
fix, ax = plt.subplots(figsize=(5, 3.5))
def get_color(value):
return plt.cm.inferno(value)
colors = [get_color(0.5),get_color(0.0), get_color(0.75), get_color(0.25)] #get_color(0.7),
#colors = [get_color(0.75), get_color(0.55), get_color(0.35), get_color(0.0)]
linestyles = ['-', ':']
with_td = matplotlib.lines.Line2D([], [], color='#333333', linestyle=linestyles[0], label=r"incl. $\mathcal{T}_r$ comp.", linewidth=2)
wo_td = matplotlib.lines.Line2D([], [], color='#333333', linestyle=linestyles[1], label=r"excl. $\mathcal{T}_r$ comp.", linewidth=2.75)
second_legend_handlers = []
max_observed_value = 0
for request_number_index, number_of_requests_ in enumerate(request_sets):
scenario_ids_to_consider = set()
for number_of_requests in number_of_requests_:
#do the code!
scenario_ids_of_requests = lookup_scenarios_having_specific_values(scenario_parameter_dict, filter_path_number_of_requests, number_of_requests)
scenario_ids_to_consider = scenario_ids_to_consider.union(scenario_ids_of_requests)
speedups_real = []
speedups_wotd = [] # without tree decomposition
relative_speedup_sep_lp_wo_td = []
for scenario_id in scenario_ids_to_consider:
seplp_with_decomposition = sep_lp_dynvmp_data_set[scenario_id].lp_time_preprocess + sep_lp_dynvmp_data_set[scenario_id].lp_time_optimization
seplp_without_decomposition = seplp_with_decomposition - (sep_lp_dynvmp_data_set[scenario_id].lp_time_tree_decomposition.mean * sep_lp_dynvmp_data_set[scenario_id].lp_time_tree_decomposition.value_count)
randround_lp_runtime = randround_data_set[scenario_id].meta_data.time_preprocessing + \
randround_data_set[scenario_id].meta_data.time_optimization + \
randround_data_set[scenario_id].meta_data.time_postprocessing
relative_speedup_sep_lp_wo_td.append(seplp_with_decomposition / seplp_without_decomposition)
speedups_real.append(randround_lp_runtime / seplp_with_decomposition)
speedups_wotd.append(randround_lp_runtime / seplp_without_decomposition)
speedup_real = sorted(speedups_real)
speedup_wotd = sorted(speedups_wotd)
logger.info("Relative when excluding tree decomposition computation {} requests:\n"
"mean: {}\n".format(number_of_requests,
np.mean(relative_speedup_sep_lp_wo_td)))
logger.info("Relative speedup compared to cactus LP for {} requests:\n"
"with tree decomposition (mean): {}\n"
"without tree decomposition (mean): {}".format(number_of_requests,
np.mean(speedups_real),
np.mean(speedups_wotd)))
max_observed_value = np.maximum(max_observed_value, speedup_real[-1])
yvals = np.arange(1, len(speedup_real) + 1) / float(len(speedup_real))
yvals = np.insert(yvals, 0, 0.0, axis=0)
yvals = np.append(yvals, [1.0])
speedup_real.append(max_observed_value)
speedup_real.insert(0, 0.5)
ax.semilogx(speedup_real, yvals, color=colors[request_number_index], linestyle=linestyles[0],
linewidth=2.75, alpha=1)
max_observed_value = np.maximum(max_observed_value, speedup_wotd[-1])
yvals = np.arange(1, len(speedup_wotd) + 1) / float(len(speedup_wotd))
yvals = np.insert(yvals, 0, 0.0, axis=0)
yvals = np.append(yvals, [1.0])
speedup_wotd.append(max_observed_value)
speedup_wotd.insert(0, 0.5)
ax.semilogx(speedup_wotd, yvals, color=colors[request_number_index], linestyle=linestyles[1],
linewidth=2.75, alpha=1)
if len(number_of_requests_) == 2:
second_legend_handlers.append(matplotlib.lines.Line2D([], [], color=colors[request_number_index], alpha=1, linestyle="-",
label=("{} & {}".format(number_of_requests_[0], number_of_requests_[1])).ljust(3), linewidth=2.5))
else:
second_legend_handlers.append(
matplotlib.lines.Line2D([], [], color=colors[request_number_index], alpha=1, linestyle="-",
label=("{}".format(number_of_requests_[0])).ljust(
3), linewidth=2.5))
first_legend = plt.legend(handles=[with_td, wo_td], loc=4, fontsize=14, title="", handletextpad=.35,
borderaxespad=0.1, borderpad=0.2, handlelength=1)
first_legend.get_frame().set_alpha(1.0)
first_legend.get_frame().set_facecolor("#FFFFFF")
plt.setp(first_legend.get_title(), fontsize=15)
plt.gca().add_artist(first_legend)
# ax.tick_params(labelright=True)
# print second_legend_handlers
second_legend = plt.legend(handles=second_legend_handlers, loc=2, fontsize=14, title="#requests", handletextpad=.35,
borderaxespad=0.175, borderpad=0.2, handlelength=2)
#plt.gca().add_artist(second_legend)
plt.setp(second_legend.get_title(), fontsize=15)
second_legend.get_frame().set_alpha(1.0)
second_legend.get_frame().set_facecolor("#FFFFFF")
# first_legend = plt.legend(title="Bound($\mathrm{MIP}_{\mathrm{MCF}})$", handles=root_legend_handlers, loc=(0.225,0.0125), fontsize=14, handletextpad=0.35, borderaxespad=0.175, borderpad=0.2)
# plt.setp(first_legend.get_title(), fontsize='15')
# plt.gca().add_artist(first_legend)
# plt.setp("TITLE", fontsize='15')
ax.set_title("Cactus LP Runtime Comparison", fontsize=17)
ax.set_xlabel(r"Speedup: time($\mathsf{LP}_{\mathsf{Cactus}}$) / time($\mathsf{LP}_{\mathsf{DynVMP}}$)",
fontsize=16)
ax.set_ylabel("ECDF", fontsize=16)
ax.set_xlim(0.4, max_observed_value * 1.15)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(14)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(14)
ax.set_xticks([0.5, 1, 5, 20, 60, ], minor=False)
ax.set_xticks([2, 3, 4, 10, 30, 40], minor=True)
ax.set_yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0], minor=False)
ax.set_yticks([0.1, 0.3, 0.5, 0.7, 0.9], minor=True)
# ax.set_yticks([x*0.1 for x in range(1,10)], minor=True)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.set_xticklabels([], minor=True)
ax.grid(True, which="both", linestyle=":", color='k', alpha=0.7, linewidth=0.33)
plt.tight_layout()
file_to_write = os.path.join(output_path, "ecdf_speedup_cactus_lp_vs_separation_dynvmp." + output_filetype)
plt.savefig(file_to_write)
def plot_comparison_separation_dynvmp_vs_lp_orig(sep_lp_dynvmp_data_set,
randround_data_set,
dc_seplp_dynvmp):
logger.info(sep_lp_dynvmp_data_set)
scenarioparameter_room = dc_seplp_dynvmp.scenario_parameter_container.scenarioparameter_room
scenario_parameter_dict = dc_seplp_dynvmp.scenario_parameter_container.scenario_parameter_dict
filter_path_number_of_requests, list_number_of_requests = extract_parameter_range(scenarioparameter_room,
"number_of_requests")
logger.info(list_number_of_requests)
fix, ax = plt.subplots(figsize=(5, 3.5))
def get_color(value):
return plt.cm.inferno(value)
colors = [get_color(0.75), get_color(0.55),get_color(0.35),get_color(0.0)]
linestyles = ['-', ':']
with_td = matplotlib.lines.Line2D([], [], color='#333333', linestyle=linestyles[0], label=r"incl. $\mathcal{T}_r$ comp.", linewidth=2)
wo_td = matplotlib.lines.Line2D([], [], color='#333333', linestyle=linestyles[1], label=r"excl. $\mathcal{T}_r$ comp.", linewidth=2.75)
second_legend_handlers = []
max_observed_value = 0
for request_number_index, number_of_requests in enumerate(list_number_of_requests):
#do the code!
scenario_ids_of_requests = lookup_scenarios_having_specific_values(scenario_parameter_dict, filter_path_number_of_requests, number_of_requests)
speedups_real = []
speedups_wotd = [] # without tree decomposition
relative_speedup_sep_lp_wo_td = []
for scenario_id in scenario_ids_of_requests:
seplp_with_decomposition = sep_lp_dynvmp_data_set[scenario_id].lp_time_preprocess + sep_lp_dynvmp_data_set[scenario_id].lp_time_optimization
seplp_without_decomposition = seplp_with_decomposition - (sep_lp_dynvmp_data_set[scenario_id].lp_time_tree_decomposition.mean * sep_lp_dynvmp_data_set[scenario_id].lp_time_tree_decomposition.value_count)
randround_lp_runtime = randround_data_set[scenario_id].meta_data.time_preprocessing + \
randround_data_set[scenario_id].meta_data.time_optimization + \
randround_data_set[scenario_id].meta_data.time_postprocessing
relative_speedup_sep_lp_wo_td.append(seplp_with_decomposition / seplp_without_decomposition)
speedups_real.append(randround_lp_runtime / seplp_with_decomposition)
speedups_wotd.append(randround_lp_runtime / seplp_without_decomposition)
speedup_real = sorted(speedups_real)
speedup_wotd = sorted(speedups_wotd)
logger.info("Relative when excluding tree decomposition computation {} requests:\n"
"mean: {}\n".format(number_of_requests,
np.mean(relative_speedup_sep_lp_wo_td)))
logger.info("Relative speedup compared to cactus LP for {} requests:\n"
"with tree decomposition (mean): {}\n"
"without tree decomposition (mean): {}".format(number_of_requests,
np.mean(speedups_real),
np.mean(speedups_wotd)))
max_observed_value = np.maximum(max_observed_value, speedup_real[-1])
yvals = np.arange(1, len(speedup_real) + 1) / float(len(speedup_real))
yvals = np.insert(yvals, 0, 0.0, axis=0)
yvals = np.append(yvals, [1.0])
speedup_real.append(max_observed_value)
speedup_real.insert(0, 0.5)
ax.semilogx(speedup_real, yvals, color=colors[request_number_index], linestyle=linestyles[0],
linewidth=2.75, alpha=1)
max_observed_value = np.maximum(max_observed_value, speedup_wotd[-1])
yvals = np.arange(1, len(speedup_wotd) + 1) / float(len(speedup_wotd))
yvals = np.insert(yvals, 0, 0.0, axis=0)
yvals = np.append(yvals, [1.0])
speedup_wotd.append(max_observed_value)
speedup_wotd.insert(0, 0.5)
ax.semilogx(speedup_wotd, yvals, color=colors[request_number_index], linestyle=linestyles[1],
linewidth=2.75, alpha=1)
second_legend_handlers.append(matplotlib.lines.Line2D([], [], color=colors[request_number_index], alpha=1, linestyle="-",
label=("{}".format(number_of_requests)).ljust(3), linewidth=2.5))
first_legend = plt.legend(handles=[with_td, wo_td], loc=4, fontsize=11, title="", handletextpad=.35,
borderaxespad=0.1, borderpad=0.2, handlelength=2.5)
first_legend.get_frame().set_alpha(1.0)
first_legend.get_frame().set_facecolor("#FFFFFF")
plt.setp(first_legend.get_title(), fontsize=12)
plt.gca().add_artist(first_legend)
# ax.tick_params(labelright=True)
# print second_legend_handlers
second_legend = plt.legend(handles=second_legend_handlers, loc=2, fontsize=11, title="#requests", handletextpad=.35,
borderaxespad=0.175, borderpad=0.2, handlelength=2)
#plt.gca().add_artist(second_legend)
plt.setp(second_legend.get_title(), fontsize=12)
second_legend.get_frame().set_alpha(1.0)
second_legend.get_frame().set_facecolor("#FFFFFF")
# first_legend = plt.legend(title="Bound($\mathrm{MIP}_{\mathrm{MCF}})$", handles=root_legend_handlers, loc=(0.225,0.0125), fontsize=14, handletextpad=0.35, borderaxespad=0.175, borderpad=0.2)
# plt.setp(first_legend.get_title(), fontsize='15')
# plt.gca().add_artist(first_legend)
# plt.setp("TITLE", fontsize='15')
ax.set_title("Cactus LP Runtime Comparison", fontsize=17)
ax.set_xlabel(r"Speedup: Time($\mathsf{LP}_{\mathsf{Cactus}}$) / Time($\mathsf{LP}_{\mathsf{DynVMP}}$)",
fontsize=16)
ax.set_ylabel("ECDF", fontsize=16)
ax.set_xlim(0.4, max_observed_value * 1.15)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(13)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(13)
ax.set_xticks([0.5, 1, 5, 20, 60, ], minor=False)
ax.set_xticks([2, 3, 4, 10, 30, 40], minor=True)
ax.set_yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0], minor=False)
ax.set_yticks([0.1, 0.3, 0.5, 0.7, 0.9], minor=True)
# ax.set_yticks([x*0.1 for x in range(1,10)], minor=True)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.set_xticklabels([], minor=True)
ax.grid(True, which="both", linestyle=":", color='k', alpha=0.7, linewidth=0.33)
plt.tight_layout()
plt.savefig("ecdf_speedup_cactus_lp_vs_separation_dynvmp.pdf")
| [
"logging.getLogger",
"numpy.insert",
"numpy.mean",
"matplotlib.pyplot.savefig",
"matplotlib.use",
"matplotlib.pyplot.gca",
"os.path.join",
"matplotlib.pyplot.cm.inferno",
"numpy.append",
"matplotlib.ticker.ScalarFormatter",
"matplotlib.pyplot.tight_layout",
"numpy.maximum",
"matplotlib.lines... | [((1261, 1284), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (1275, 1284), False, 'import matplotlib\n'), ((1348, 1375), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1365, 1375), False, 'import logging\n'), ((10199, 10229), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 3.5)'}), '(figsize=(5, 3.5))\n', (10211, 10229), True, 'from matplotlib import pyplot as plt\n'), ((10516, 10645), 'matplotlib.lines.Line2D', 'matplotlib.lines.Line2D', (['[]', '[]'], {'color': '"""#333333"""', 'linestyle': 'linestyles[0]', 'label': '"""incl. $\\\\mathcal{T}_r$ comp."""', 'linewidth': '(2)'}), "([], [], color='#333333', linestyle=linestyles[0],\n label='incl. $\\\\mathcal{T}_r$ comp.', linewidth=2)\n", (10539, 10645), False, 'import matplotlib\n'), ((10654, 10785), 'matplotlib.lines.Line2D', 'matplotlib.lines.Line2D', (['[]', '[]'], {'color': '"""#333333"""', 'linestyle': 'linestyles[1]', 'label': '"""excl. $\\\\mathcal{T}_r$ comp."""', 'linewidth': '(2.75)'}), "([], [], color='#333333', linestyle=linestyles[1],\n label='excl. $\\\\mathcal{T}_r$ comp.', linewidth=2.75)\n", (10677, 10785), False, 'import matplotlib\n'), ((14796, 14936), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[with_td, wo_td]', 'loc': '(4)', 'fontsize': '(14)', 'title': '""""""', 'handletextpad': '(0.35)', 'borderaxespad': '(0.1)', 'borderpad': '(0.2)', 'handlelength': '(1)'}), "(handles=[with_td, wo_td], loc=4, fontsize=14, title='',\n handletextpad=0.35, borderaxespad=0.1, borderpad=0.2, handlelength=1)\n", (14806, 14936), True, 'from matplotlib import pyplot as plt\n'), ((15246, 15408), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': 'second_legend_handlers', 'loc': '(2)', 'fontsize': '(14)', 'title': '"""#requests"""', 'handletextpad': '(0.35)', 'borderaxespad': '(0.175)', 'borderpad': '(0.2)', 'handlelength': '(2)'}), "(handles=second_legend_handlers, loc=2, fontsize=14, title=\n '#requests', handletextpad=0.35, borderaxespad=0.175, borderpad=0.2,\n handlelength=2)\n", (15256, 15408), True, 'from matplotlib import pyplot as plt\n'), ((16909, 16927), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (16925, 16927), True, 'from matplotlib import pyplot as plt\n'), ((16948, 17043), 'os.path.join', 'os.path.join', (['output_path', "('ecdf_speedup_cactus_lp_vs_separation_dynvmp.' + output_filetype)"], {}), "(output_path, 'ecdf_speedup_cactus_lp_vs_separation_dynvmp.' +\n output_filetype)\n", (16960, 17043), False, 'import os\n'), ((17044, 17070), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_to_write'], {}), '(file_to_write)\n', (17055, 17070), True, 'from matplotlib import pyplot as plt\n'), ((17788, 17818), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 3.5)'}), '(figsize=(5, 3.5))\n', (17800, 17818), True, 'from matplotlib import pyplot as plt\n'), ((18006, 18135), 'matplotlib.lines.Line2D', 'matplotlib.lines.Line2D', (['[]', '[]'], {'color': '"""#333333"""', 'linestyle': 'linestyles[0]', 'label': '"""incl. $\\\\mathcal{T}_r$ comp."""', 'linewidth': '(2)'}), "([], [], color='#333333', linestyle=linestyles[0],\n label='incl. $\\\\mathcal{T}_r$ comp.', linewidth=2)\n", (18029, 18135), False, 'import matplotlib\n'), ((18144, 18275), 'matplotlib.lines.Line2D', 'matplotlib.lines.Line2D', (['[]', '[]'], {'color': '"""#333333"""', 'linestyle': 'linestyles[1]', 'label': '"""excl. $\\\\mathcal{T}_r$ comp."""', 'linewidth': '(2.75)'}), "([], [], color='#333333', linestyle=linestyles[1],\n label='excl. $\\\\mathcal{T}_r$ comp.', linewidth=2.75)\n", (18167, 18275), False, 'import matplotlib\n'), ((21693, 21835), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[with_td, wo_td]', 'loc': '(4)', 'fontsize': '(11)', 'title': '""""""', 'handletextpad': '(0.35)', 'borderaxespad': '(0.1)', 'borderpad': '(0.2)', 'handlelength': '(2.5)'}), "(handles=[with_td, wo_td], loc=4, fontsize=11, title='',\n handletextpad=0.35, borderaxespad=0.1, borderpad=0.2, handlelength=2.5)\n", (21703, 21835), True, 'from matplotlib import pyplot as plt\n'), ((22145, 22307), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': 'second_legend_handlers', 'loc': '(2)', 'fontsize': '(11)', 'title': '"""#requests"""', 'handletextpad': '(0.35)', 'borderaxespad': '(0.175)', 'borderpad': '(0.2)', 'handlelength': '(2)'}), "(handles=second_legend_handlers, loc=2, fontsize=11, title=\n '#requests', handletextpad=0.35, borderaxespad=0.175, borderpad=0.2,\n handlelength=2)\n", (22155, 22307), True, 'from matplotlib import pyplot as plt\n'), ((23808, 23826), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (23824, 23826), True, 'from matplotlib import pyplot as plt\n'), ((23831, 23893), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ecdf_speedup_cactus_lp_vs_separation_dynvmp.pdf"""'], {}), "('ecdf_speedup_cactus_lp_vs_separation_dynvmp.pdf')\n", (23842, 23893), True, 'from matplotlib import pyplot as plt\n'), ((10272, 10293), 'matplotlib.pyplot.cm.inferno', 'plt.cm.inferno', (['value'], {}), '(value)\n', (10286, 10293), True, 'from matplotlib import pyplot as plt\n'), ((13188, 13236), 'numpy.maximum', 'np.maximum', (['max_observed_value', 'speedup_real[-1]'], {}), '(max_observed_value, speedup_real[-1])\n', (13198, 13236), True, 'import numpy as np\n'), ((13332, 13364), 'numpy.insert', 'np.insert', (['yvals', '(0)', '(0.0)'], {'axis': '(0)'}), '(yvals, 0, 0.0, axis=0)\n', (13341, 13364), True, 'import numpy as np\n'), ((13381, 13404), 'numpy.append', 'np.append', (['yvals', '[1.0]'], {}), '(yvals, [1.0])\n', (13390, 13404), True, 'import numpy as np\n'), ((13666, 13714), 'numpy.maximum', 'np.maximum', (['max_observed_value', 'speedup_wotd[-1]'], {}), '(max_observed_value, speedup_wotd[-1])\n', (13676, 13714), True, 'import numpy as np\n'), ((13810, 13842), 'numpy.insert', 'np.insert', (['yvals', '(0)', '(0.0)'], {'axis': '(0)'}), '(yvals, 0, 0.0, axis=0)\n', (13819, 13842), True, 'import numpy as np\n'), ((13859, 13882), 'numpy.append', 'np.append', (['yvals', '[1.0]'], {}), '(yvals, [1.0])\n', (13868, 13882), True, 'import numpy as np\n'), ((16742, 16777), 'matplotlib.ticker.ScalarFormatter', 'matplotlib.ticker.ScalarFormatter', ([], {}), '()\n', (16775, 16777), False, 'import matplotlib\n'), ((17861, 17882), 'matplotlib.pyplot.cm.inferno', 'plt.cm.inferno', (['value'], {}), '(value)\n', (17875, 17882), True, 'from matplotlib import pyplot as plt\n'), ((20488, 20536), 'numpy.maximum', 'np.maximum', (['max_observed_value', 'speedup_real[-1]'], {}), '(max_observed_value, speedup_real[-1])\n', (20498, 20536), True, 'import numpy as np\n'), ((20632, 20664), 'numpy.insert', 'np.insert', (['yvals', '(0)', '(0.0)'], {'axis': '(0)'}), '(yvals, 0, 0.0, axis=0)\n', (20641, 20664), True, 'import numpy as np\n'), ((20681, 20704), 'numpy.append', 'np.append', (['yvals', '[1.0]'], {}), '(yvals, [1.0])\n', (20690, 20704), True, 'import numpy as np\n'), ((20966, 21014), 'numpy.maximum', 'np.maximum', (['max_observed_value', 'speedup_wotd[-1]'], {}), '(max_observed_value, speedup_wotd[-1])\n', (20976, 21014), True, 'import numpy as np\n'), ((21110, 21142), 'numpy.insert', 'np.insert', (['yvals', '(0)', '(0.0)'], {'axis': '(0)'}), '(yvals, 0, 0.0, axis=0)\n', (21119, 21142), True, 'import numpy as np\n'), ((21159, 21182), 'numpy.append', 'np.append', (['yvals', '[1.0]'], {}), '(yvals, [1.0])\n', (21168, 21182), True, 'import numpy as np\n'), ((23641, 23676), 'matplotlib.ticker.ScalarFormatter', 'matplotlib.ticker.ScalarFormatter', ([], {}), '()\n', (23674, 23676), False, 'import matplotlib\n'), ((15116, 15125), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (15123, 15125), True, 'from matplotlib import pyplot as plt\n'), ((22015, 22024), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (22022, 22024), True, 'from matplotlib import pyplot as plt\n'), ((12704, 12742), 'numpy.mean', 'np.mean', (['relative_speedup_sep_lp_wo_td'], {}), '(relative_speedup_sep_lp_wo_td)\n', (12711, 12742), True, 'import numpy as np\n'), ((13040, 13062), 'numpy.mean', 'np.mean', (['speedups_real'], {}), '(speedups_real)\n', (13047, 13062), True, 'import numpy as np\n'), ((13131, 13153), 'numpy.mean', 'np.mean', (['speedups_wotd'], {}), '(speedups_wotd)\n', (13138, 13153), True, 'import numpy as np\n'), ((20004, 20042), 'numpy.mean', 'np.mean', (['relative_speedup_sep_lp_wo_td'], {}), '(relative_speedup_sep_lp_wo_td)\n', (20011, 20042), True, 'import numpy as np\n'), ((20340, 20362), 'numpy.mean', 'np.mean', (['speedups_real'], {}), '(speedups_real)\n', (20347, 20362), True, 'import numpy as np\n'), ((20431, 20453), 'numpy.mean', 'np.mean', (['speedups_wotd'], {}), '(speedups_wotd)\n', (20438, 20453), True, 'import numpy as np\n')] |
import cv2,os
import numpy as np
from keras.applications.vgg16 import decode_predictions
from keras.applications import ResNet50, Xception, InceptionV3, VGG16, VGG19
from keras.preprocessing import image as Image
from keras.applications.vgg16 import preprocess_input
from tqdm import tqdm
from skimage import feature
#LBP feature
class LocalBinaryPatterns:
def __init__(self, numPoints, radius):
# store the number of points and radius
self.numPoints = numPoints
self.radius = radius
def describe(self, image, eps=1e-7):
# compute the Local Binary Pattern representation
# of the image, and then use the LBP representation
# to build the histogram of patterns
lbp = feature.local_binary_pattern(image, self.numPoints,
self.radius, method="nri_uniform")
(hist, _) = np.histogram(lbp.ravel(),
bins=np.arange(0, self.numPoints*(self.numPoints-1) + 3),
range=(0, self.numPoints*(self.numPoints-1) + 2))
# normalize the histogram
hist = hist.astype("float")
hist /= (hist.sum() + eps)
# return the histogram of Local Binary Patterns
return hist
def embedding_load(embedding_path):
embedding_vector = {}
f = open(embedding_path,'r',encoding='utf8')
for line in tqdm(f):
value = line.split(' ')
word = value[0]
coef = np.array(value[1:], dtype='float32')
embedding_vector[word] = coef
f.close()
return embedding_vector
#tag embedding feature
def im_tag_embedding_feature_extraction(img_path, model,embedding_vector,im_size):
img = Image.load_img(img_path, target_size=(im_size, im_size))
x = Image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
yhat = model.predict(x)
labels = decode_predictions(yhat,top=10)
#print(labels[0])
words = []
for label in labels[0]:
word = label[1]
#print(word)
words.append(word)
tags_matrix = []
zero_array = np.zeros(300)
for tag in words:
if tag in embedding_vector.keys():
tag_embedding = embedding_vector[tag]
tags_matrix.append(np.array(tag_embedding))
zero_array = zero_array+np.array(tag_embedding)
tag_feature = zero_array / len(tags_matrix)
return list(tag_feature)
if __name__ == '__main__':
embedding_path = '../data/GoogleNews_vectors_negative_300d.txt'
embedding_vector = embedding_load(embedding_path)
im_path = '../data/politifact_images/' #../data/gossipcop_images/
model_vgg16 = VGG16(weights='imagenet', include_top=True)
model_vgg19 = VGG19(weights='imagenet', include_top=True)
model_resnet = ResNet50(weights='imagenet', include_top=True)
model_inception = InceptionV3(weights='imagenet', include_top=True)
model_xception = Xception(weights='imagenet', include_top=True)
lbp_feature_dict = {}
vgg16_tags_embedding_feature_dict = {}
vgg19_tags_embedding_feature_dict = {}
resnet_tags_embedding_feature_dict = {}
inception_tags_embedding_feature_dict = {}
xception_tags_embedding_feature_dict = {}
i=0
for im in os.listdir(im_path):
try:
print(im)
i += 1
if i%100 == 0:
print(i)
#read image data
image = cv2.imread(im_path+im)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# features extraction
desc = LocalBinaryPatterns(8, 1.0)
hist_LBP = desc.describe(gray)
vgg16_tags_embedding_feature = im_tag_embedding_feature_extraction(im_path+im, model_vgg16, embedding_vector, 224)
vgg19_tags_embedding_feature = im_tag_embedding_feature_extraction(im_path+im, model_vgg19, embedding_vector, 224)
resnet_tags_embedding_feature = im_tag_embedding_feature_extraction(im_path+im, model_resnet, embedding_vector, 224)
inception_tags_embedding_feature = im_tag_embedding_feature_extraction(im_path+im, model_inception, embedding_vector, 299)
xception_tags_embedding_feature = im_tag_embedding_feature_extraction(im_path+im, model_xception, embedding_vector, 299)
lbp_feature_dict[im] = list(hist_LBP)
vgg16_tags_embedding_feature_dict[im] = vgg16_tags_embedding_feature
vgg19_tags_embedding_feature_dict[im] = vgg19_tags_embedding_feature
resnet_tags_embedding_feature_dict[im] = resnet_tags_embedding_feature
inception_tags_embedding_feature_dict[im] = inception_tags_embedding_feature
xception_tags_embedding_feature_dict[im] = xception_tags_embedding_feature
except Exception:
print('error image',im)
continue
# save features
np.save('politifact_image_LBP_feature.npy', lbp_feature_dict)
np.save('politifact_image_tag_vgg16_feature.npy', vgg16_tags_embedding_feature_dict)
np.save('politifact_image_tag_vgg19_feature.npy', vgg19_tags_embedding_feature_dict)
np.save('politifact_image_tag_resnet_feature.npy', resnet_tags_embedding_feature_dict)
np.save('politifact_image_tag_inception_feature.npy', inception_tags_embedding_feature_dict)
np.save('politifact_image_tag_xception_feature.npy', xception_tags_embedding_feature_dict) | [
"keras.preprocessing.image.img_to_array",
"numpy.array",
"numpy.arange",
"numpy.save",
"keras.applications.Xception",
"os.listdir",
"keras.applications.vgg16.preprocess_input",
"keras.applications.VGG16",
"keras.applications.VGG19",
"keras.applications.InceptionV3",
"cv2.cvtColor",
"keras.appl... | [((1350, 1357), 'tqdm.tqdm', 'tqdm', (['f'], {}), '(f)\n', (1354, 1357), False, 'from tqdm import tqdm\n'), ((1674, 1730), 'keras.preprocessing.image.load_img', 'Image.load_img', (['img_path'], {'target_size': '(im_size, im_size)'}), '(img_path, target_size=(im_size, im_size))\n', (1688, 1730), True, 'from keras.preprocessing import image as Image\n'), ((1740, 1763), 'keras.preprocessing.image.img_to_array', 'Image.img_to_array', (['img'], {}), '(img)\n', (1758, 1763), True, 'from keras.preprocessing import image as Image\n'), ((1773, 1798), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1787, 1798), True, 'import numpy as np\n'), ((1808, 1827), 'keras.applications.vgg16.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (1824, 1827), False, 'from keras.applications.vgg16 import preprocess_input\n'), ((1873, 1905), 'keras.applications.vgg16.decode_predictions', 'decode_predictions', (['yhat'], {'top': '(10)'}), '(yhat, top=10)\n', (1891, 1905), False, 'from keras.applications.vgg16 import decode_predictions\n'), ((2088, 2101), 'numpy.zeros', 'np.zeros', (['(300)'], {}), '(300)\n', (2096, 2101), True, 'import numpy as np\n'), ((2667, 2710), 'keras.applications.VGG16', 'VGG16', ([], {'weights': '"""imagenet"""', 'include_top': '(True)'}), "(weights='imagenet', include_top=True)\n", (2672, 2710), False, 'from keras.applications import ResNet50, Xception, InceptionV3, VGG16, VGG19\n'), ((2730, 2773), 'keras.applications.VGG19', 'VGG19', ([], {'weights': '"""imagenet"""', 'include_top': '(True)'}), "(weights='imagenet', include_top=True)\n", (2735, 2773), False, 'from keras.applications import ResNet50, Xception, InceptionV3, VGG16, VGG19\n'), ((2794, 2840), 'keras.applications.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""', 'include_top': '(True)'}), "(weights='imagenet', include_top=True)\n", (2802, 2840), False, 'from keras.applications import ResNet50, Xception, InceptionV3, VGG16, VGG19\n'), ((2864, 2913), 'keras.applications.InceptionV3', 'InceptionV3', ([], {'weights': '"""imagenet"""', 'include_top': '(True)'}), "(weights='imagenet', include_top=True)\n", (2875, 2913), False, 'from keras.applications import ResNet50, Xception, InceptionV3, VGG16, VGG19\n'), ((2936, 2982), 'keras.applications.Xception', 'Xception', ([], {'weights': '"""imagenet"""', 'include_top': '(True)'}), "(weights='imagenet', include_top=True)\n", (2944, 2982), False, 'from keras.applications import ResNet50, Xception, InceptionV3, VGG16, VGG19\n'), ((3268, 3287), 'os.listdir', 'os.listdir', (['im_path'], {}), '(im_path)\n', (3278, 3287), False, 'import cv2, os\n'), ((4924, 4985), 'numpy.save', 'np.save', (['"""politifact_image_LBP_feature.npy"""', 'lbp_feature_dict'], {}), "('politifact_image_LBP_feature.npy', lbp_feature_dict)\n", (4931, 4985), True, 'import numpy as np\n'), ((4991, 5079), 'numpy.save', 'np.save', (['"""politifact_image_tag_vgg16_feature.npy"""', 'vgg16_tags_embedding_feature_dict'], {}), "('politifact_image_tag_vgg16_feature.npy',\n vgg16_tags_embedding_feature_dict)\n", (4998, 5079), True, 'import numpy as np\n'), ((5081, 5169), 'numpy.save', 'np.save', (['"""politifact_image_tag_vgg19_feature.npy"""', 'vgg19_tags_embedding_feature_dict'], {}), "('politifact_image_tag_vgg19_feature.npy',\n vgg19_tags_embedding_feature_dict)\n", (5088, 5169), True, 'import numpy as np\n'), ((5171, 5261), 'numpy.save', 'np.save', (['"""politifact_image_tag_resnet_feature.npy"""', 'resnet_tags_embedding_feature_dict'], {}), "('politifact_image_tag_resnet_feature.npy',\n resnet_tags_embedding_feature_dict)\n", (5178, 5261), True, 'import numpy as np\n'), ((5263, 5359), 'numpy.save', 'np.save', (['"""politifact_image_tag_inception_feature.npy"""', 'inception_tags_embedding_feature_dict'], {}), "('politifact_image_tag_inception_feature.npy',\n inception_tags_embedding_feature_dict)\n", (5270, 5359), True, 'import numpy as np\n'), ((5361, 5455), 'numpy.save', 'np.save', (['"""politifact_image_tag_xception_feature.npy"""', 'xception_tags_embedding_feature_dict'], {}), "('politifact_image_tag_xception_feature.npy',\n xception_tags_embedding_feature_dict)\n", (5368, 5455), True, 'import numpy as np\n'), ((750, 841), 'skimage.feature.local_binary_pattern', 'feature.local_binary_pattern', (['image', 'self.numPoints', 'self.radius'], {'method': '"""nri_uniform"""'}), "(image, self.numPoints, self.radius, method=\n 'nri_uniform')\n", (778, 841), False, 'from skimage import feature\n'), ((1433, 1469), 'numpy.array', 'np.array', (['value[1:]'], {'dtype': '"""float32"""'}), "(value[1:], dtype='float32')\n", (1441, 1469), True, 'import numpy as np\n'), ((3451, 3475), 'cv2.imread', 'cv2.imread', (['(im_path + im)'], {}), '(im_path + im)\n', (3461, 3475), False, 'import cv2, os\n'), ((3494, 3533), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (3506, 3533), False, 'import cv2, os\n'), ((915, 970), 'numpy.arange', 'np.arange', (['(0)', '(self.numPoints * (self.numPoints - 1) + 3)'], {}), '(0, self.numPoints * (self.numPoints - 1) + 3)\n', (924, 970), True, 'import numpy as np\n'), ((2252, 2275), 'numpy.array', 'np.array', (['tag_embedding'], {}), '(tag_embedding)\n', (2260, 2275), True, 'import numpy as np\n'), ((2314, 2337), 'numpy.array', 'np.array', (['tag_embedding'], {}), '(tag_embedding)\n', (2322, 2337), True, 'import numpy as np\n')] |
import copy
import os
import shutil
import sys
import time
import PyTango
import numpy
import p05.common.PyTangoProxyConstants as proxies
import p05.tools.misc as misc
from p05.nanoCameras import FLIeh2_nanoCam, Hamamatsu_nanoCam, KIT_nanoCam, Lambda_nanoCam, PCO_nanoCam, \
PixelLink_nanoCam, Zyla_nanoCam
from p05.scripts.OptimizePitch import OptimizePitch
# TODO very long, have to split, e.g. scanscripts, GetPETRA, Beam, Image
class NanoScriptHelper():
"""Class to help scripting for nanotomography measurement.
Class initialization parameters:
<pmac>: PMACdict instance
<group>: abbreviation for group name (e.g. 'hzg')
<beamtime>: name of the current beamtime (e.g. 'nanoXTM_201302')
<prefix>: prefix for the scan (logfile and images)
assumptions made in class:
- usage of EH1 FLI camera
- usage of hzgpp05ct2 for data storage
"""
def __init__(self, pmac, currScript, group, beamtime, prefix, exptime= None, \
usePCO = False, useSmarAct = True, \
useDiode = False, closeShutter = True, useStatusServer = True, \
usePCOcamware = False, useEHD = False, useASAP = True, useASAPcomm = False, \
DCMdetune = 0.0, useEnviroLog = False, useNGM = False, disableSideBunchReacquisition = True, \
useHamamatsu=False, usePixelLink=False, useZyla=False, useKIT=False, useLambda=False,
useHamaTrigger=False, logRotPos=False):
"""
Class initialization:
<pmac>: PMACdict instance
<group>: abbreviation for group name (e.g. 'hzg')
<beamtime>: name of the current beamtime (e.g. 'nanoXTM_201302')
<prefix>: prefix for the scan (logfile and images)
"""
self.sGroup = group
self.sBeamtime = beamtime
self.sPrefix = prefix
# TODO remove all hardcoded lins (e.g.t:/current/ or d:/hzg/) and set all links in one place at the beginning, then use only aliases
if useASAP:
#self.sPath = 't:/current/scratch_bl/%s/' %(self.sPrefix)
self.sPath = 't:/current/raw/%s/' %(self.sPrefix)
elif useASAP == False and useASAPcomm == True:
self.sPath = 't:/current/scratch_bl/%s/' %(self.sPrefix)
else:
#self.sPath = 'd:/%s/%s/%s/' %(self.sGroup, self.sBeamtime, self.sPrefix)
self.sPath = 'd:/hzg/' + str(beamtime) + '/%s/' %(self.sPrefix)
if useZyla or useKIT or usePCO or useLambda:
if useASAP:
self.sPath_Cam = '/gpfs/current/raw/%s/' %(self.sPrefix)
elif useASAP == False and useASAPcomm == True:
self.sPath_Cam = '/gpfs/current/scratch_bl/%s/' %(self.sPrefix)
else:
self.sPath_Cam = '/home/p05user/data/' + str(beamtime) + '/%s/' %(self.sPrefix)
self.sPathBeamLogs = self.sPath+'beamLogs/'
self.sLogfile = self.sPath + '%s__LogScan.log' %(self.sPrefix)
self.sCameraLogfile = self.sPath + '%s__LogCamera.log' %(self.sPrefix)
self.sMotorLogFile = self.sPath + '%s__LogMotors.log' %(self.sPrefix)
self.sBeamLogFile = self.sPath + '%s__LogBeam.log' %(self.sPrefix)
if not os.path.exists(os.path.split(self.sLogfile)[0]):
os.makedirs(os.path.split(self.sLogfile)[0])
shutil.copy2(currScript, self.sPath + '%s__LogScript.py.log' %(self.sPrefix))
self.starttime = time.time()
self.exptime = exptime
#Boolean variables:
self.useKIT = useKIT
self.useSmarAct = useSmarAct
self.usePCO = usePCO
self.usePCOcamware = usePCOcamware
self.useEHD = useEHD
self.useStatusServer = useStatusServer
self.closeShutter = closeShutter
self.useDiode = useDiode
self.DCMdetune = DCMdetune
self.useEnviroLog = useEnviroLog
self.disableSideBunchReacquisition = disableSideBunchReacquisition
self.useHamamatsu = useHamamatsu
self.useHamaTrigger = useHamaTrigger
self.usePixelLink = usePixelLink
self.useLambda = useLambda
self.logRotPos = logRotPos
self.useZyla = useZyla
self.useNGM = useNGM
#########################################
######## initialize camera ##############
#########################################
self.camera = None
if self.useEHD:
self.camera = FLIeh2_nanoCam(imageDir = self.sPath, exptime = self.exptime)
# TODO move all PyTango.DeviceProxy into dedicated file. Here use only links to that file
# TODO reorganize if statements
if self.useHamamatsu:
self.camera = 'Hamamatsu'
self.hamamatsu = Hamamatsu_nanoCam(imageDir = self.sPath, exptime = self.exptime)
self.tTrigger = PyTango.DeviceProxy(proxies.register_eh2_out03) # proxies.dac_eh1_02
self.tTriggerOut = PyTango.DeviceProxy(proxies.register_eh2_in08)
# self.tTrigger = PyTango.DeviceProxy(proxies.register_eh2_out01)
self.currimage = None
#self.hamamatsu.sendCommand('StartVideoAcq')
#Hamamatsu_nanoCam(exptime=self.exptime)
#print('~~~~~~~~~ !!!! ~~~~~~~~~ Attention: Hamamatsu image application running?')
#tmp = raw_input('~~~~~~~~~ !!!! ~~~~~~~~~ Continue? y / n: ')
#if tmp not in ['yes', 'Y', 'y']:
#print('Aborting...')
#sys.exit()
if self.useHamaTrigger:
self.tTrigger = PyTango.DeviceProxy(proxies.register_eh2_out03) # (proxies.dac_eh1_01)
self.tTriggerOut = PyTango.DeviceProxy(proxies.register_eh2_in08)
if self.usePixelLink:
self.camera = 'PixelLink'
self.hamamatsu = PixelLink_nanoCam(imageDir = self.sPath, exptime = 1.0)
self.tTrigger = PyTango.DeviceProxy(proxies.dac_eh1_01) # proxies.dac_eh1_02
self.currimage = None
if self.useZyla:
self.camera = 'Zyla'
self.hamamatsu = Zyla_nanoCam(imageDir = self.sPath, exptime = self.exptime)
self.tTrigger = PyTango.DeviceProxy(proxies.register_eh2_out03) # proxies.dac_eh1_02
#self.tTrigger = PyTango.DeviceProxy(proxies.register_eh2_out01)
self.currimage = None
if self.useKIT:
self.camera = 'KIT'
self.hamamatsu = KIT_nanoCam(imageDir = self.sPath_Cam, exptime = self.exptime)
self.tTrigger = PyTango.DeviceProxy(proxies.register_eh1_out01)
self.currimage = None
if self.useLambda:
self.camera = 'Lambda'
self.hamamatsu = Lambda_nanoCam(imageDir=self.sPath_Cam, exptime=self.exptime)
self.tTrigger = PyTango.DeviceProxy(proxies.register_eh2_out03)
self.currimage = None
if self.usePCO:
self.camera = 'PCO'
self.hamamatsu = PCO_nanoCam(imageDir = self.sPath_Cam, exptime = self.exptime)
self.tTrigger = PyTango.DeviceProxy(proxies.dac_eh1_01) # proxies.dac_eh1_02
# self.tTrigger = PyTango.DeviceProxy(proxies.register_eh2_out01)
self.currimage = None
# for Hergens rotations stage
if self.useNGM:
self.rot_Stage_NGM = PyTango.DeviceProxy(proxies.labmotion_exp01)
#########################################
######## initialize TANGO ###############
#########################################
self.tPMAC = pmac
self.tQBPM = PyTango.DeviceProxy(proxies.tQBPM_i404_exp02)
self.tPETRAinfo = PyTango.DeviceProxy(proxies.tPETRA_globals)
self.tPETRAcell4 = PyTango.DeviceProxy(proxies.tPETRA_undbpos_cell4)
self.tPETRAnbCleaning = PyTango.DeviceProxy(proxies.tPETRAnbCleaning_umschaltmanager)
self.tBeamShutter = PyTango.DeviceProxy(proxies.tBeam_shutter)
self.tPitch = PyTango.DeviceProxy(proxies.motor_mono_01_tPitch)
self.tRoll = PyTango.DeviceProxy(proxies.motor_mono_02_tRoll)
self.tUndulator = PyTango.DeviceProxy(proxies.tUndulator_1)
self.tScintiY = PyTango.DeviceProxy(proxies.motor_eh1_05_tScintiY)
self.tLensY = PyTango.DeviceProxy(proxies.motor_eh1_06_tLensY)
self.tCamRot = PyTango.DeviceProxy(proxies.motor_eh1_07_tCamRot)
self.tDCMenergy = PyTango.DeviceProxy(proxies.dcmener_s01_01_tDCMenergy)
if self.useStatusServer:
self.tStatusServer = PyTango.DeviceProxy(proxies.tStatusServer_p05ct)
#self.tStatusServer = PyTango.DeviceProxy('//hzgpp05ct1:10000/p05/status_server/p05beamline') #gives wrong values for current?
#self.tStatusServer.command_inout('createAttributesGroup',['beamcurrent','/PETRA/Idc/Buffer-0/I.SCH'])
#self.tStatusServer.command_inout('stopCollectData')
#self.tStatusServer.command_inout('eraseData')
#self.tStatusServer.command_inout('startCollectData')
self.StatusServerSendCommand('stopCollectData')
self.StatusServerSendCommand('eraseData')
#self.StatusServerSendCommand('startCollectData')
if self.useSmarAct:
self.SM = numpy.zeros(4, dtype = object)
self.SM[0] = PyTango.DeviceProxy(proxies.smaract_eh1_cha0)
self.SM[1] = PyTango.DeviceProxy(proxies.smaract_eh1_cha1)
self.SM[2] = PyTango.DeviceProxy(proxies.smaract_eh1_cha3)
self.SM[3] = PyTango.DeviceProxy(proxies.smaract_eh1_cha4)
# self.SM[4] = PyTango.DeviceProxy(proxies.smaract_eh1_cha2)
if useDiode:
self.tDiode = PyTango.DeviceProxy(proxies.tDiode_adc_eh1_01)
if self.useEnviroLog:
self.Environ = numpy.zeros(6, dtype=object)
self.Environ[0] = PyTango.DeviceProxy(proxies.adc_eh1_01)
self.Environ[1] = PyTango.DeviceProxy(proxies.adc_eh1_02)
self.Environ[2] = PyTango.DeviceProxy(proxies.adc_eh1_03)
self.Environ[3] = PyTango.DeviceProxy(proxies.adc_eh1_04)
self.Environ[4] = PyTango.DeviceProxy(proxies.adc_eh1_05)
self.Environ[5] = PyTango.DeviceProxy(proxies.adc_eh1_06)
#########################################
######## initialize logging #############
#########################################
if os.path.exists(self.sLogfile):
print('~~~~~~~~~ !!!! ~~~~~~~~~ Warning: Logfile exists')
tmp = input(r'~~~~~~~~~ !!!! ~~~~~~~~~ Continue? y / n: ')
if tmp not in ['yes', 'Y', 'y']:
print('Aborting...')
sys.exit()
# Prepare _LogScan
self.fLog = open(self.sLogfile, 'w')
self.fLog.write('#00: image identifier\n#01: infostr\n#02: image number I\n#03: image number II\n#04: current number\n#04: file number\
\n#06: timestamp\n#07: PETRA Beam Current\n#08: Position of rotation stage \n')
if self.useEnviroLog: # Not implemented at the moment
self.fLog.write('#24: Air temperature 01\n#25: Air humidity 01\n#26: Air temperature 02\
\n#27:Air humidity 02\n#28: Air temperature 03\n#29: Air humidity 03\n')
self.fLog.write('#starttime = %e\n' %self.starttime)
tmp = '%s\t%s\t%s\t%s\t%s\t' %(self.__FormattedString('#00', 18), self.__FormattedString('#01', 5), self.__FormattedString('#02', 5),\
self.__FormattedString('#03', 5), self.__FormattedString('#04', 5))+ \
'#05:\t#06:\t\t#07:\t\t#08::'
if self.useEnviroLog: tmp += '\t\t#22:\t#23:\t\t#24:\t\t#25:\t\t#26:\t\t#27:\t\t#28:\t\t#29:'
self.fLog.write(tmp + '\n')
# Prepare _LogMotor
self.fMotorLog = open(self.sMotorLogFile, 'w')
__list = ['VacuumSF_x', 'VacuumSF_y', 'VacuumSF_z', 'VacuumSF_Rx', 'VacuumSF_Ry', 'VacuumSF_Rz', 'VacuumTrans_y',\
'GraniteSlab_1', 'GraniteSlab_2', 'GraniteSlab_3','GraniteSlab_4', 'Aperture_x', 'Aperture_y', \
'Aperture_z', 'OpticsSF1_x', 'OpticsSF1_y', 'OpticsSF1_z', 'OpticsSF1_Rx', 'OpticsSF1_Ry', 'OpticsSF1_Rz', \
'OpticsStage1_y', 'OpticsSF2_x', 'OpticsSF2_y', 'OpticsSF2_z', 'OpticsSF2_Rx', 'OpticsSF2_Ry', 'OpticsSF2_Rz', \
'Diode1_z','Diode2_z', 'SampleStage_x', 'SampleStage_z', 'SampleStage_Rx', 'SampleStage_Ry', \
'Sample_Rot', 'Sample_x', 'Sample_y', 'Sample_z', 'Sample_Rx', 'Sample_Ry', 'Sample_Rz', \
'Detector_x', 'Detector_z', 'Slits_xLeft', 'Slits_xRight', 'Slits_zLow', 'Slits_zHigh', 'ScintillatorY', 'CamLensY', 'CameraRot',\
'UndulatorPos', 'Pitch', 'DCM']
if self.useSmarAct:
__list += ['SmarAct ch. 0 (x left)', 'SmarAct ch. 1 (z top)', 'SmarAct ch. 3 (x right)', 'SmarAct ch. 4 (z bottom)']
self.fMotorLog.write('#00: Identifier\n#01: Infostring\n#02: Index number I\n#03: Index number II\n#04: Current number\n')
__ii = 5 # start counter after first block of text
for __item in __list:
self.fMotorLog.write('#%02i: %s\n' %(__ii, __item))
__ii += 1
tmp = '%s\t%s\t%s\t%s\t%s\t' %(self.__FormattedString('#00', 18), \
self.__FormattedString('#01', 5), self.__FormattedString('#02', 5),\
self.__FormattedString('#03', 5), self.__FormattedString('#04', 5))
__ii = 5
for i1 in range(len(__list)):
tmp += '#%02i\t\t' %(i1 + 5)
self.fMotorLog.write(tmp+ '\n')
time.sleep(0.1)
# Prepare _LogBeam
self.fBeamLog = open(self.sBeamLogFile, 'w')
self.fBeamLog.write('#00: Identifier\n#01: Infostring\n#02: Index number I\n#03: Index number II\n#04: Current number\
\n@Timestamp[PETRA current@Timestamp]\n')
self.sIdentifier = ''
self.iNumber = None
self.iNumber2 = None
self.iCurr = 0
self.imgNumber = 0
tmp = self.GetCurrentMotorPosString("Before Scan", "Start Scan")
self.fMotorLog.write(tmp)
return None
#end __init__
def __FormattedString(self, __string, __length = 15):
if len(__string) >= __length:
return __string[:__length]
else:
return __string + (__length-len(__string))*' '
#end __FormattedString
def BeamshutterOpen(self, SilentMode = False):
try:
self.tBeamShutter.command_inout('CloseOpen_BS_1', 1)
if not SilentMode:
print(misc.GetTimeString()+': Beamshutter opened')
except:
pass
return None
#end BeamshutterOpen
def BeamshutterClose(self, SilentMode = False):
try:
self.tBeamShutter.command_inout('CloseOpen_BS_1', 0)
if not SilentMode:
print(misc.GetTimeString()+': Beamshutter closed')
except:
pass
return None
#end BeamshutterClose
def TakeDarkImages(self, num = 10,imgNumber=0):
self.BeamshutterClose()
time.sleep(40)
for i1 in range(num):
#self.SetCurrentName('dark', iNumber= i1, iNumber2= None, imgNumber=imgNumber+i1)
self.SetCurrentName('dark', iNumber= i1, iNumber2= None, imgNumber=None)
self.TakeImage()
print(i1)
self.BeamshutterOpen()
time.sleep(20)
return None
#end TakeDarkImages
# TODO iNumber with if-else is used in many places
def SetCurrentName(self, _identifier, iNumber = None, iNumber2 = None, currNum = None, imgNumber=None):
"""
Method to set the current identifier and image number
"""
if currNum != None:
self.iCurr = currNum
if self.useHamaTrigger != True:
if self.camera == 'Hamamatsu' or "Zyla" or "PCO" or "Lambda":
self.hamamatsu.setImageName(_identifier)
if imgNumber != None:
self.hamamatsu.setImgNumber(imgNumber)
self.imgNumber = imgNumber
self.sIdentifier = _identifier
if iNumber == None:
self.iNumber = None
self.curname = '%s_%s_%04i' %(self.sPrefix, self.sIdentifier, self.iCurr)
elif iNumber != None:
self.iNumber = iNumber
if iNumber2 == None:
self.iNumber2 = None
self.curname = '%s_%s_%04i_%04i' %(self.sPrefix, self.sIdentifier, self.iNumber, self.iCurr)
elif iNumber2 != None:
self.iNumber2 = iNumber2
self.curname = '%s_%s_%04i_%04i_%04i' %(self.sPrefix, self.sIdentifier, self.iNumber, self.iNumber2, self.iCurr)
#end SetCurrentName
def SetCurrentNumber(self, iNumber = None, iNumber2 = None, currNum = None):
"""
Method to set the current image number
"""
if currNum != None:
self.iCurr = currNum
if iNumber != None:
if iNumber2 == None:
self.iNumber = iNumber
self.iNumber2 = None
elif iNumber2 != None:
self.iNumber = iNumber
self.iNumber2 = iNumber2
if currNum != None:
self.iCurr = currNum
return None
#end SetCurrentName
def SetExposureTime(self,exptime):
self.exptime = exptime
self.hamamatsu.setExptime(self.exptime)
def StatusServerSendCommand(self, _command):
i0 = 0
while True:
try:
self.tStatusServer.command_inout(_command)
break
except:
print(misc.GetTimeString() + ': StatusServer not responding while executing command "%s"...' % _command)
time.sleep(10)
i0 += 1
if i0 > 5: break
return
#end StatusServerSendCommand
def StatusServerReadData(self):
i0 = 0
while True:
#try:
#self.StatusServerSendCommand('getLatestSnapshot').value
tmp = self.tStatusServer.read_attribute('data').value[1]
tmp = tmp.split('\n')[1:]
if tmp[0].split('[')[1] == '0.0@0]': tmp.pop(0)
if tmp[-1] == '': tmp.pop(-1)
# tmp = string.join(tmp, '\n')+ '\n'
# break
# except:
# print(misc.GetTimeString() + ': StatusServer not responding while reading data...')
# time.sleep(10)
# i0 += 1
# if i0 > 5:
# tmp = 'StatusServer error'
# break
return tmp
#end StatusServerReadData
def PrepareCamera(self):
self.hamamatsu.startLive()
def StopCamera(self):
self.hamamatsu.finishScan()
def TakeImage(self, verbose = False, writeLogs = True,inum=None,iname=None):
"""Method to take and image and write beam parameters to logfile."""
if verbose: print('%s: Acquiring image %s'% (misc.GetTimeString(),'bla'))#, self.curname
while True:
tmp_count = self.tPETRAnbCleaning.read_attribute('SweepCounter').value
self.reacquire = False
if writeLogs: _logdata = self.GetCurrentDataString(self.sIdentifier, 'start')
self.hamamatsu.acquireImage()
if self.useStatusServer:
self.StatusServerSendCommand('eraseData')
self.StatusServerSendCommand('startCollectData')
#
# elif self.camera == 'Hamamatsu':
# self.lastimage = copy.copy(self.currimage)
#
# self.currimage = self.hamamatsu.acquireImage()
# if self.useStatusServer: self.StatusServerSendCommand('stopCollectData')
#
# elif self.camera == 'Zyla':
# self.hamamatsu.acquireImage()
# if self.useStatusServer: self.StatusServerSendCommand('stopCollectData')
#
# elif self.camera == 'KIT':
# self.hamamatsu.acquireImage()
#
# elif self.camera == 'PCO':
# self.hamamatsu.acquireImage()
#
#
# if self.useStatusServer: self.StatusServerSendCommand('stopCollectData')
#
# elif self.camera == 'PixelLink':
# self.lastimage = copy.copy(self.currimage)
# self.tTrigger.write_attribute('Voltage', 5)
# time.sleep(self.exptime)
# self.tTrigger.write_attribute('Voltage', 0)
# if self.useStatusServer: self.StatusServerSendCommand('stopCollectData')
#
# elif self.camera == None:
# time.sleep(self.exptime)
# if self.useStatusServer: self.StatusServerSendCommand('stopCollectData')
if self.useStatusServer and writeLogs:
self.BeamLogWriteData(self.StatusServerReadData())
self.StatusServerSendCommand('eraseData')
tmp_count2 = self.tPETRAnbCleaning.read_attribute('SweepCounter').value
###comment out from here #### WHy__
if self.reacquire and self.useHamamatsu:
self.iCurr += 1
### comment out until here
if (not self.reacquire) or self.disableSideBunchReacquisition:
break
if writeLogs:
self.fLog.write(_logdata) # writes logdata from start of image aquisition
self.LogWriteCurrentData(self.sIdentifier, 'end', logMotors= True) # writes logdata from end of image aquisition
self.iCurr += 1
#self.SetCurrentName(self.sIdentifier, iNumber= self.iNumber, iNumber2 = self.iNumber2, currNum = self.iCurr)
return None
#end TakeImage
def TakeOneDummyImage(self):
self.tTrigger.write_attribute('Value', 1)
time.sleep(0.010)
self.tTrigger.write_attribute('Value', 0)
# TODO split depending on cameras using polymorphism
def TakeFastImage(self,writeLogs = True,inum=None,iname=None, WaitForCamera= False):
if writeLogs: _logdata = self.GetCurrentDataString(self.sIdentifier, 'start')
##### for PCO camera ####
if self.camera == 'PCO':
while not self.hamamatsu.state() == PyTango.DevState.ON:
continue
if not self.hamamatsu.state() == PyTango.DevState.RUNNING:
self.hamamatsu.startPCOacquisition()
while not self.hamamatsu.state() == PyTango.DevState.RUNNING:
continue
#time.sleep(0.01)
self.tTrigger.write_attribute('Voltage', 3.5)
rot_pos = self.tPMAC.ReadMotorPos('Sample_Rot')
time.sleep(self.exptime + 0.01)
self.tTrigger.write_attribute('Voltage', 0)
tmp = numpy.fromstring(self.hamamatsu.readAttribute('Image').value[1], dtype=numpy.uint16).byteswap()
self.image = (tmp[2:]).reshape(tmp[0], tmp[1])
self.image = numpy.float32(self.image)
# im2 = Image.fromarray(self.image.transpose(), mode="F" ) # float32
# im2.save(self.sPath + iname +'_%03i' % inum + '.tiff' , "TIFF" )
##### for Hamamatsu camera #######
elif self.camera == 'Hamamatsu':
out = self.tTriggerOut.read_attribute('Value')
while out.value != 1:
out = self.tTriggerOut.read_attribute('Value')
# while not self.hamamatsu.state() == PyTango.DevState.ON:
# continue
# while not self.hamamatsu.state() == PyTango.DevState.EXTRACT:
# continue
self.tTrigger.write_attribute('Value', 1)
rot_pos = self.tPMAC.ReadMotorPos('Sample_Rot')
time.sleep(0.005) # will be 10-13 ms
self.tTrigger.write_attribute('Value', 0)
if WaitForCamera:
out = self.tTriggerOut.read_attribute('Value')
while out.value != 1:
out = self.tTriggerOut.read_attribute('Value')
elif self.camera == "Lambda":
self.tTrigger.write_attribute('Value', 1)
rot_pos = self.tPMAC.ReadMotorPos('Sample_Rot')
time.sleep(0.005) # will be 10-13 ms
self.tTrigger.write_attribute('Value', 0)
if writeLogs:
self.fLog.write(_logdata.split('\n')[0]+str(rot_pos)+'\n')
self.iCurr += 1
return None
def SendTriggerInOut(self,writeLogs = True,name = 'tomo',WaitForCamera= False):
""" Method to send trigger when camera is ready (trigger ready from camera) """
#self.sIdentifier = name
if writeLogs: _logdata = self.GetCurrentDataString(self.sIdentifier, 'start')
# Wait for Trigger Ready from Camera
out = self.tTriggerOut.read_attribute('Value')
while out.value == 0:
out = self.tTriggerOut.read_attribute('Value')
# Send trigger
self.tTrigger.write_attribute('Value', 1)
# Read rotation stage position
rot_pos = self.tPMAC.ReadMotorPos('Sample_Rot')
self.tTrigger.write_attribute('Value', 0)
if writeLogs:
self.fLog.write(_logdata.split('\n')[0]+str(rot_pos)+'\n')
self.iCurr += 1
if WaitForCamera:
out = self.tTriggerOut.read_attribute('Value')
while out.value == 0:
out = self.tTriggerOut.read_attribute('Value')
return None
def SendTrigger(self, writeLogs=True):
if writeLogs: _logdata = self.GetCurrentDataString(self.sIdentifier, 'start')
self.hamamatsu.sendTrigger()
rot_pos = self.tPMAC.ReadMotorPos('Sample_Rot')
self.tTrigger.write_attribute('Value', 0)
if writeLogs:
self.fLog.write(_logdata.split('\n')[0] + str(rot_pos) + '\n')
self.iCurr += 1
def TakeImageSeries(self, num_images, waitForCamera=False):
self.hamamatsu.writeAttribute('TriggerMode', 0)
self.hamamatsu.setFrameNumbers(num_images)
self.hamamatsu.acquireImage()
if waitForCamera:
self.hamamatsu.waitForCamera()
def TakeFlatfieldCorrectedImage(self, pmac, inpos = None, refpos = None, motor = None, verbose = False):
if verbose: print('%s: Acquiring image %s'% (misc.GetTimeString(), self.curname))
if not os.path.exists(self.sPath + os.sep + 'abs'): os.mkdir(self.sPath + os.sep + 'abs')
#try:
pmac.Move(motor, refpos)
time.sleep(0.1)
self.sIdentifier = 'ref'
self.SetCurrentName(self.sIdentifier, iNumber= self.iNumber, iNumber2 = self.iNumber2, currNum = self.iCurr)
self.LogWriteCurrentData(self.sIdentifier, 'start', logMotors = True)
self.TakeImage(writeLogs = False)
ref = copy.copy(self.currimage)
self.LogWriteCurrentData(self.sIdentifier, 'end')
pmac.Move(motor, inpos)
time.sleep(0.1)
self.sIdentifier = 'img'
self.SetCurrentName(self.sIdentifier, iNumber= self.iNumber, iNumber2 = self.iNumber2, currNum = self.iCurr)
self.LogWriteCurrentData(self.sIdentifier, 'start', logMotors = True)
self.TakeImage(writeLogs = False)
img = copy.copy(self.currimage)
self.LogWriteCurrentData(self.sIdentifier, 'end')
_abs = (1.0* img /ref).astype(numpy.float32)
self.sIdentifier = 'abs'
self.SetCurrentName(self.sIdentifier, iNumber= self.iNumber, iNumber2 = self.iNumber2, currNum = self.iCurr)
_abs.tofile(self.sPath + '/abs/%s.raw' %self.curname)
# except:
# print('%s: Error acquiring flatfield corrected image.'% (misc.GetTimeString()))
# return None
self.iCurr += 1
return None
def HamaTakeRef(self, num_img=20):
time.sleep(0.1)
i=-1
self.hamamatsu.startLive()
time.sleep(0.1)
self.TakeOneDummyImage()
for i2 in range(num_img):
i = i + 1
time.sleep(0.01)
self.TakeFastImage()
print(i)
#time.sleep(0.5)
out = self.tTriggerOut.read_attribute('Value')
while out.value != 1:
out = self.tTriggerOut.read_attribute('Value')
#time.sleep(0.1)
self.hamamatsu.finishScan()
return None
def HamaTakeTomo(self,target_pos,rot="pos"):
self.tPMAC.Move('Sample_Rot',target_pos, WaitForMove=False)
i=-1
self.hamamatsu.startLive()
time.sleep(0.1)
self.TakeOneDummyImage()
if rot == "pos": # Move from negative to positive rotation angle
while self.tPMAC.ReadMotorPos('Sample_Rot') <= target_pos-1:
i = i + 1
self.TakeFastImage()
print(i)
elif rot == "neg": # Move from positive to negative rotation angle
while self.tPMAC.ReadMotorPos('Sample_Rot') >= target_pos+1:
i = i + 1
self.TakeFastImage()
print(i)
out = self.tTriggerOut.read_attribute('Value')
while out.value != 1:
out = self.tTriggerOut.read_attribute('Value')
time.sleep(0.1)
self.hamamatsu.finishScan()
return None
def TakeTomo(self, target_pos):
# Changed for Lambda
self.tPMAC.Move('Sample_Rot', target_pos, WaitForMove=False)
i = -1
self.hamamatsu.startLive()
time.sleep(0.3) # was 0.1
# nanoScript.TakeOneDummyImage()
while self.tPMAC.ReadMotorPos('Sample_Rot') <= target_pos - 1:
i = i + 1
self.TakeFastImage()
time.sleep(self.exptime + 0.01)
print(i)
time.sleep(0.1)
self.hamamatsu.finishScan()
return None
def LogWriteCurrentData(self, __identifier, __infostr, logMotors = False):
"""Write standard data in the scan logfile."""
tmp = self.GetCurrentDataString(__identifier, __infostr)
self.fLog.write(tmp)
if logMotors:
tmp = self.GetCurrentMotorPosString(__identifier, __infostr)
self.fMotorLog.write(tmp)
return None
#end WriteToScanLog
def GetCurrentDataString(self, __identifier, __infostr):
if self.iNumber == None:
iNumber = -1
else:
iNumber = self.iNumber
if self.iNumber2 == None:
iNumber2 = -1
else:
iNumber2 = self.iNumber2
if self.iCurr== None:
iCurr = -1
else:
iCurr = self.iCurr
if self.imgNumber == None:
imgNumber = self.hamamatsu.getImgNumber()
else:
imgNumber = self.imgNumber
tmp = '%s\t%s\t%04i\t%04i\t%04i\t%04i\t' %(self.__FormattedString(__identifier, 18), \
self.__FormattedString(__infostr, 5), iNumber, iNumber2, iCurr,imgNumber)
tmp += self.GetPETRAbeaminfo()
# tmp += self.GetPETRAcell4() # Orbit position beam, not needed ?!
# tmp += '%e\t' %self.tDCMenergy.read_attribute('Position').value # write dcm pos at begnning of scan
if self.disableSideBunchReacquisition == False:
tmp += '%04i\t%04i\t' %(self.tPETRAnbCleaning.read_attribute('SweepCounter').value, self.tPETRAnbCleaning.read_attribute('SweepThreshold').value)
if self.useEnviroLog:
for i1 in range(6):
tmp+= '%e\t' %(self.Environ[i1].read_attribute('Value').value*5*(numpy.mod(i1,2) + 1))
return tmp + '\n'
#end
def GetCurrentMotorPosString(self, __identifier, __infostr):
if self.iNumber == None:
iNumber = -1
else:
iNumber = self.iNumber
if self.iNumber2 == None:
iNumber2 = -1
else:
iNumber2 = self.iNumber2
if self.iCurr== None:
iCurr = -1
else:
iCurr = self.iCurr
tmp = '%s\t%s\t%04i\t%04i\t%04i\t' %(self.__FormattedString(__identifier, 18), \
self.__FormattedString(__infostr, 5), iNumber, iNumber2, iCurr)
tmp += self.tPMAC.ReadAllMotorPos()
try:
tmp += '%e\t%e\t%e\t' %(self.tScintiY.read_attribute('Position').value, self.tLensY.read_attribute('Position').value, self.tCamRot.read_attribute('Position').value)
except:
tmp += '%e\t%e\t%e\t' %(-1, -1, -1)
try:
tmp += '%e\t%e\t' %(self.tUndulator.read_attribute('Gap').value, self.tPitch.read_attribute('Position').value)
except:
tmp += '%e\t%e\t' %(-1, -1)
if self.useSmarAct:
try:
tmp += '%e\t%e\t%e\t%e\t' %(self.SM[0].read_attribute('Position').value, self.SM[1].read_attribute('Position').value, \
self.SM[2].read_attribute('Position').value, self.SM[3].read_attribute('Position').value)
except:
tmp += '%e\t%e\t%e\t%e\t' %(-1, -1, -1, -1)
return tmp + '\n'
#end GetCurrentMotorPosString
def BeamLogWriteData(self, beamdata):
if self.iNumber == None:
iNumber = -1
else:
iNumber = self.iNumber
if self.iNumber2 == None:
iNumber2 = -1
else:
iNumber2 = self.iNumber2
if self.iCurr== None:
iCurr = -1
else:
iCurr = self.iCurr
tmp = '%s\t%s\t%04i\t%04i\t%04i\n' %(self.__FormattedString(self.sIdentifier, 18), \
self.__FormattedString('PETRA', 5), iNumber, iNumber2, iCurr)
self.fBeamLog.write(tmp+beamdata)
return None
#end BeamLogWriteData
def WaitForBeam(self, PETRAcurrent = 95, valreturn = True):
"""Method to check for beam loss and wait until the beam is back to more
than 90% of the target value.
<PETRAcurrent>: current of the ring to be compared to."""
__retval = False
try:
while self.tPETRAinfo.read_attribute('BeamCurrent').value < 0.9*PETRAcurrent:
time.sleep(60)
print(misc.GetTimeString()+': Beam lost ... waiting for beam')
__retval = True
except:
print(misc.GetTimeString()+': TINE connection error')
if __retval:
# TODO Do you ever pass tPitch as 'qbpm2' or 'QBPM2'?
tPitch = PyTango.DeviceProxy(
proxies.motor_mono_01_tPitch) # tPitch = PyTango.DeviceProxy(proxies.motor_multi_25_tPitch) for DMM
OptimizePitch(tPitch, Detune=self.DCMdetune)
time.sleep(300)
if valreturn:
return __retval
else:
return None
#end WaitForBeam
def GetPETRAbeaminfo(self):
"""Layout: timestamp // Petra Beam Current (// Orbit RMSx // Orbit RMSy // QBPM current // QBPM pos x //QBPM pos y)
if not readable, return value is -1"""
__infostr = '%e\t' %(time.time()-self.starttime)
_attributevals = ['BeamCurrent'] # , 'OrbitRMSX', 'OrbitRMSY']
for _att in _attributevals:
try:
tmp = self.tPETRAinfo.read_attribute(_att).value
__infostr += '%010.6f\t' %tmp
except:
__infostr+= '-01.000000\t'
# try: # this block was for qbpm
# __infostr += '-01.000000\t-01.000000\t-01.000000\t'
# #tmp = self.tQBPM.read_attribute('PosAndAvgCurr').value
# #__infostr += '%e\t%e\t%e\t' %(tmp[2], tmp[0], tmp[1])
# except:
# __infostr += '-01.000000\t-01.000000\t-01.000000\t'
return __infostr
#end GetPETRAbeaminfo
def GetPETRAcell4(self):
"""Layout: BeamXAngleDeltaCell4 (in umrad) / BeamXPosDeltaCell4 (in um) / BeamYAngleDeltaCell4 (in umrad) / BeamYPosDeltaCell4 (in um)
if not readable, return value is -1"""
__infostr = ''
_attributevals = ['Xposition', 'XpositionSoll','Xangle', 'XangleSoll', 'Yangle', 'YangleSoll', 'Yposition', 'YpositionSoll']
for _att in _attributevals:
try:
# tmp = self.tPETRAcell4.read_attribute(_att).value
__infostr += '%010.6f\t' %(-1) #tmp
except:
__infostr+= '-01.000000\t'
return __infostr
#end GetPETRAcell4
def FinishScan(self):
"""Cleanup routines and closing of logfile"""
tmp = self.GetCurrentMotorPosString("End Scan", "Stop")
self.fMotorLog.write(tmp)
self.fLog.close()
self.fMotorLog.close()
self.fBeamLog.close()
# if self.camera not in ['KITnikon', 'PCOcamware', None, 'Hamamatsu','Zyla']:
# self.camera.finishScan()
if self.camera == 'Hamamatsu':
self.hamamatsu.finishScan()
if self.closeShutter:
self.BeamshutterClose()
print(misc.GetTimeString()+': Finished scan %s' %self.sPrefix)
return None
#end FinishScan
| [
"time.sleep",
"p05.tools.misc.GetTimeString",
"p05.scripts.OptimizePitch.OptimizePitch",
"sys.exit",
"copy.copy",
"numpy.mod",
"os.path.exists",
"p05.nanoCameras.KIT_nanoCam",
"shutil.copy2",
"os.path.split",
"p05.nanoCameras.Hamamatsu_nanoCam",
"os.mkdir",
"p05.nanoCameras.PCO_nanoCam",
"... | [((3370, 3446), 'shutil.copy2', 'shutil.copy2', (['currScript', "(self.sPath + '%s__LogScript.py.log' % self.sPrefix)"], {}), "(currScript, self.sPath + '%s__LogScript.py.log' % self.sPrefix)\n", (3382, 3446), False, 'import shutil\n'), ((3475, 3486), 'time.time', 'time.time', ([], {}), '()\n', (3484, 3486), False, 'import time\n'), ((7589, 7634), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.tQBPM_i404_exp02'], {}), '(proxies.tQBPM_i404_exp02)\n', (7608, 7634), False, 'import PyTango\n'), ((7661, 7704), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.tPETRA_globals'], {}), '(proxies.tPETRA_globals)\n', (7680, 7704), False, 'import PyTango\n'), ((7732, 7781), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.tPETRA_undbpos_cell4'], {}), '(proxies.tPETRA_undbpos_cell4)\n', (7751, 7781), False, 'import PyTango\n'), ((7814, 7875), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.tPETRAnbCleaning_umschaltmanager'], {}), '(proxies.tPETRAnbCleaning_umschaltmanager)\n', (7833, 7875), False, 'import PyTango\n'), ((7904, 7946), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.tBeam_shutter'], {}), '(proxies.tBeam_shutter)\n', (7923, 7946), False, 'import PyTango\n'), ((7969, 8018), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.motor_mono_01_tPitch'], {}), '(proxies.motor_mono_01_tPitch)\n', (7988, 8018), False, 'import PyTango\n'), ((8040, 8088), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.motor_mono_02_tRoll'], {}), '(proxies.motor_mono_02_tRoll)\n', (8059, 8088), False, 'import PyTango\n'), ((8115, 8156), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.tUndulator_1'], {}), '(proxies.tUndulator_1)\n', (8134, 8156), False, 'import PyTango\n'), ((8181, 8231), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.motor_eh1_05_tScintiY'], {}), '(proxies.motor_eh1_05_tScintiY)\n', (8200, 8231), False, 'import PyTango\n'), ((8254, 8302), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.motor_eh1_06_tLensY'], {}), '(proxies.motor_eh1_06_tLensY)\n', (8273, 8302), False, 'import PyTango\n'), ((8326, 8375), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.motor_eh1_07_tCamRot'], {}), '(proxies.motor_eh1_07_tCamRot)\n', (8345, 8375), False, 'import PyTango\n'), ((8402, 8456), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.dcmener_s01_01_tDCMenergy'], {}), '(proxies.dcmener_s01_01_tDCMenergy)\n', (8421, 8456), False, 'import PyTango\n'), ((10413, 10442), 'os.path.exists', 'os.path.exists', (['self.sLogfile'], {}), '(self.sLogfile)\n', (10427, 10442), False, 'import os\n'), ((13651, 13666), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (13661, 13666), False, 'import time\n'), ((15214, 15228), 'time.sleep', 'time.sleep', (['(40)'], {}), '(40)\n', (15224, 15228), False, 'import time\n'), ((15528, 15542), 'time.sleep', 'time.sleep', (['(20)'], {}), '(20)\n', (15538, 15542), False, 'import time\n'), ((22119, 22135), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (22129, 22135), False, 'import time\n'), ((26754, 26769), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (26764, 26769), False, 'import time\n'), ((27054, 27079), 'copy.copy', 'copy.copy', (['self.currimage'], {}), '(self.currimage)\n', (27063, 27079), False, 'import copy\n'), ((27179, 27194), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (27189, 27194), False, 'import time\n'), ((27479, 27504), 'copy.copy', 'copy.copy', (['self.currimage'], {}), '(self.currimage)\n', (27488, 27504), False, 'import copy\n'), ((28056, 28071), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (28066, 28071), False, 'import time\n'), ((28128, 28143), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (28138, 28143), False, 'import time\n'), ((28740, 28755), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (28750, 28755), False, 'import time\n'), ((29414, 29429), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (29424, 29429), False, 'import time\n'), ((29679, 29694), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (29689, 29694), False, 'import time\n'), ((29946, 29961), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (29956, 29961), False, 'import time\n'), ((4471, 4528), 'p05.nanoCameras.FLIeh2_nanoCam', 'FLIeh2_nanoCam', ([], {'imageDir': 'self.sPath', 'exptime': 'self.exptime'}), '(imageDir=self.sPath, exptime=self.exptime)\n', (4485, 4528), False, 'from p05.nanoCameras import FLIeh2_nanoCam, Hamamatsu_nanoCam, KIT_nanoCam, Lambda_nanoCam, PCO_nanoCam, PixelLink_nanoCam, Zyla_nanoCam\n'), ((4769, 4829), 'p05.nanoCameras.Hamamatsu_nanoCam', 'Hamamatsu_nanoCam', ([], {'imageDir': 'self.sPath', 'exptime': 'self.exptime'}), '(imageDir=self.sPath, exptime=self.exptime)\n', (4786, 4829), False, 'from p05.nanoCameras import FLIeh2_nanoCam, Hamamatsu_nanoCam, KIT_nanoCam, Lambda_nanoCam, PCO_nanoCam, PixelLink_nanoCam, Zyla_nanoCam\n'), ((4862, 4909), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.register_eh2_out03'], {}), '(proxies.register_eh2_out03)\n', (4881, 4909), False, 'import PyTango\n'), ((4963, 5009), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.register_eh2_in08'], {}), '(proxies.register_eh2_in08)\n', (4982, 5009), False, 'import PyTango\n'), ((5574, 5621), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.register_eh2_out03'], {}), '(proxies.register_eh2_out03)\n', (5593, 5621), False, 'import PyTango\n'), ((5677, 5723), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.register_eh2_in08'], {}), '(proxies.register_eh2_in08)\n', (5696, 5723), False, 'import PyTango\n'), ((5822, 5873), 'p05.nanoCameras.PixelLink_nanoCam', 'PixelLink_nanoCam', ([], {'imageDir': 'self.sPath', 'exptime': '(1.0)'}), '(imageDir=self.sPath, exptime=1.0)\n', (5839, 5873), False, 'from p05.nanoCameras import FLIeh2_nanoCam, Hamamatsu_nanoCam, KIT_nanoCam, Lambda_nanoCam, PCO_nanoCam, PixelLink_nanoCam, Zyla_nanoCam\n'), ((5906, 5945), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.dac_eh1_01'], {}), '(proxies.dac_eh1_01)\n', (5925, 5945), False, 'import PyTango\n'), ((6091, 6146), 'p05.nanoCameras.Zyla_nanoCam', 'Zyla_nanoCam', ([], {'imageDir': 'self.sPath', 'exptime': 'self.exptime'}), '(imageDir=self.sPath, exptime=self.exptime)\n', (6103, 6146), False, 'from p05.nanoCameras import FLIeh2_nanoCam, Hamamatsu_nanoCam, KIT_nanoCam, Lambda_nanoCam, PCO_nanoCam, PixelLink_nanoCam, Zyla_nanoCam\n'), ((6179, 6226), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.register_eh2_out03'], {}), '(proxies.register_eh2_out03)\n', (6198, 6226), False, 'import PyTango\n'), ((6446, 6504), 'p05.nanoCameras.KIT_nanoCam', 'KIT_nanoCam', ([], {'imageDir': 'self.sPath_Cam', 'exptime': 'self.exptime'}), '(imageDir=self.sPath_Cam, exptime=self.exptime)\n', (6457, 6504), False, 'from p05.nanoCameras import FLIeh2_nanoCam, Hamamatsu_nanoCam, KIT_nanoCam, Lambda_nanoCam, PCO_nanoCam, PixelLink_nanoCam, Zyla_nanoCam\n'), ((6537, 6584), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.register_eh1_out01'], {}), '(proxies.register_eh1_out01)\n', (6556, 6584), False, 'import PyTango\n'), ((6711, 6772), 'p05.nanoCameras.Lambda_nanoCam', 'Lambda_nanoCam', ([], {'imageDir': 'self.sPath_Cam', 'exptime': 'self.exptime'}), '(imageDir=self.sPath_Cam, exptime=self.exptime)\n', (6725, 6772), False, 'from p05.nanoCameras import FLIeh2_nanoCam, Hamamatsu_nanoCam, KIT_nanoCam, Lambda_nanoCam, PCO_nanoCam, PixelLink_nanoCam, Zyla_nanoCam\n'), ((6801, 6848), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.register_eh2_out03'], {}), '(proxies.register_eh2_out03)\n', (6820, 6848), False, 'import PyTango\n'), ((6969, 7027), 'p05.nanoCameras.PCO_nanoCam', 'PCO_nanoCam', ([], {'imageDir': 'self.sPath_Cam', 'exptime': 'self.exptime'}), '(imageDir=self.sPath_Cam, exptime=self.exptime)\n', (6980, 7027), False, 'from p05.nanoCameras import FLIeh2_nanoCam, Hamamatsu_nanoCam, KIT_nanoCam, Lambda_nanoCam, PCO_nanoCam, PixelLink_nanoCam, Zyla_nanoCam\n'), ((7060, 7099), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.dac_eh1_01'], {}), '(proxies.dac_eh1_01)\n', (7079, 7099), False, 'import PyTango\n'), ((7338, 7382), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.labmotion_exp01'], {}), '(proxies.labmotion_exp01)\n', (7357, 7382), False, 'import PyTango\n'), ((8523, 8571), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.tStatusServer_p05ct'], {}), '(proxies.tStatusServer_p05ct)\n', (8542, 8571), False, 'import PyTango\n'), ((9257, 9285), 'numpy.zeros', 'numpy.zeros', (['(4)'], {'dtype': 'object'}), '(4, dtype=object)\n', (9268, 9285), False, 'import numpy\n'), ((9313, 9358), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.smaract_eh1_cha0'], {}), '(proxies.smaract_eh1_cha0)\n', (9332, 9358), False, 'import PyTango\n'), ((9384, 9429), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.smaract_eh1_cha1'], {}), '(proxies.smaract_eh1_cha1)\n', (9403, 9429), False, 'import PyTango\n'), ((9455, 9500), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.smaract_eh1_cha3'], {}), '(proxies.smaract_eh1_cha3)\n', (9474, 9500), False, 'import PyTango\n'), ((9526, 9571), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.smaract_eh1_cha4'], {}), '(proxies.smaract_eh1_cha4)\n', (9545, 9571), False, 'import PyTango\n'), ((9698, 9744), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.tDiode_adc_eh1_01'], {}), '(proxies.tDiode_adc_eh1_01)\n', (9717, 9744), False, 'import PyTango\n'), ((9802, 9830), 'numpy.zeros', 'numpy.zeros', (['(6)'], {'dtype': 'object'}), '(6, dtype=object)\n', (9813, 9830), False, 'import numpy\n'), ((9861, 9900), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.adc_eh1_01'], {}), '(proxies.adc_eh1_01)\n', (9880, 9900), False, 'import PyTango\n'), ((9931, 9970), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.adc_eh1_02'], {}), '(proxies.adc_eh1_02)\n', (9950, 9970), False, 'import PyTango\n'), ((10001, 10040), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.adc_eh1_03'], {}), '(proxies.adc_eh1_03)\n', (10020, 10040), False, 'import PyTango\n'), ((10071, 10110), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.adc_eh1_04'], {}), '(proxies.adc_eh1_04)\n', (10090, 10110), False, 'import PyTango\n'), ((10141, 10180), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.adc_eh1_05'], {}), '(proxies.adc_eh1_05)\n', (10160, 10180), False, 'import PyTango\n'), ((10211, 10250), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.adc_eh1_06'], {}), '(proxies.adc_eh1_06)\n', (10230, 10250), False, 'import PyTango\n'), ((22967, 22998), 'time.sleep', 'time.sleep', (['(self.exptime + 0.01)'], {}), '(self.exptime + 0.01)\n', (22977, 22998), False, 'import time\n'), ((23253, 23278), 'numpy.float32', 'numpy.float32', (['self.image'], {}), '(self.image)\n', (23266, 23278), False, 'import numpy\n'), ((26616, 26659), 'os.path.exists', 'os.path.exists', (["(self.sPath + os.sep + 'abs')"], {}), "(self.sPath + os.sep + 'abs')\n", (26630, 26659), False, 'import os\n'), ((26661, 26698), 'os.mkdir', 'os.mkdir', (["(self.sPath + os.sep + 'abs')"], {}), "(self.sPath + os.sep + 'abs')\n", (26669, 26698), False, 'import os\n'), ((28245, 28261), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (28255, 28261), False, 'import time\n'), ((29885, 29916), 'time.sleep', 'time.sleep', (['(self.exptime + 0.01)'], {}), '(self.exptime + 0.01)\n', (29895, 29916), False, 'import time\n'), ((34680, 34729), 'PyTango.DeviceProxy', 'PyTango.DeviceProxy', (['proxies.motor_mono_01_tPitch'], {}), '(proxies.motor_mono_01_tPitch)\n', (34699, 34729), False, 'import PyTango\n'), ((34830, 34874), 'p05.scripts.OptimizePitch.OptimizePitch', 'OptimizePitch', (['tPitch'], {'Detune': 'self.DCMdetune'}), '(tPitch, Detune=self.DCMdetune)\n', (34843, 34874), False, 'from p05.scripts.OptimizePitch import OptimizePitch\n'), ((34887, 34902), 'time.sleep', 'time.sleep', (['(300)'], {}), '(300)\n', (34897, 34902), False, 'import time\n'), ((10683, 10693), 'sys.exit', 'sys.exit', ([], {}), '()\n', (10691, 10693), False, 'import sys\n'), ((24006, 24023), 'time.sleep', 'time.sleep', (['(0.005)'], {}), '(0.005)\n', (24016, 24023), False, 'import time\n'), ((34364, 34378), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (34374, 34378), False, 'import time\n'), ((35256, 35267), 'time.time', 'time.time', ([], {}), '()\n', (35265, 35267), False, 'import time\n'), ((37185, 37205), 'p05.tools.misc.GetTimeString', 'misc.GetTimeString', ([], {}), '()\n', (37203, 37205), True, 'import p05.tools.misc as misc\n'), ((3271, 3299), 'os.path.split', 'os.path.split', (['self.sLogfile'], {}), '(self.sLogfile)\n', (3284, 3299), False, 'import os\n'), ((3329, 3357), 'os.path.split', 'os.path.split', (['self.sLogfile'], {}), '(self.sLogfile)\n', (3342, 3357), False, 'import os\n'), ((17932, 17946), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (17942, 17946), False, 'import time\n'), ((24461, 24478), 'time.sleep', 'time.sleep', (['(0.005)'], {}), '(0.005)\n', (24471, 24478), False, 'import time\n'), ((14685, 14705), 'p05.tools.misc.GetTimeString', 'misc.GetTimeString', ([], {}), '()\n', (14703, 14705), True, 'import p05.tools.misc as misc\n'), ((14996, 15016), 'p05.tools.misc.GetTimeString', 'misc.GetTimeString', ([], {}), '()\n', (15014, 15016), True, 'import p05.tools.misc as misc\n'), ((19181, 19201), 'p05.tools.misc.GetTimeString', 'misc.GetTimeString', ([], {}), '()\n', (19199, 19201), True, 'import p05.tools.misc as misc\n'), ((26564, 26584), 'p05.tools.misc.GetTimeString', 'misc.GetTimeString', ([], {}), '()\n', (26582, 26584), True, 'import p05.tools.misc as misc\n'), ((34401, 34421), 'p05.tools.misc.GetTimeString', 'misc.GetTimeString', ([], {}), '()\n', (34419, 34421), True, 'import p05.tools.misc as misc\n'), ((34524, 34544), 'p05.tools.misc.GetTimeString', 'misc.GetTimeString', ([], {}), '()\n', (34542, 34544), True, 'import p05.tools.misc as misc\n'), ((17817, 17837), 'p05.tools.misc.GetTimeString', 'misc.GetTimeString', ([], {}), '()\n', (17835, 17837), True, 'import p05.tools.misc as misc\n'), ((31738, 31754), 'numpy.mod', 'numpy.mod', (['i1', '(2)'], {}), '(i1, 2)\n', (31747, 31754), False, 'import numpy\n')] |
"""Test drawing module
"""
import pytest
import numpy as np
from shellplot.axis import Axis
from shellplot.drawing import (
LegendItem,
_draw_canvas,
_draw_legend,
_draw_x_axis,
_draw_y_axis,
_pad_lines,
)
def test_draw_legend():
legend = [LegendItem(1, "one"), LegendItem(2, "two")]
legend_lines = ["+ one", "* two"]
assert legend_lines == _draw_legend(legend)
@pytest.mark.parametrize(
"lines,ref_lines,expecte_padded_lines",
[
(["a", "b"], ["a", "b", "c"], ["", "a", "b"]),
(None, ["a", "b", "c"], ["", "", ""]),
],
)
def test_pad_lines(lines, ref_lines, expecte_padded_lines):
padded_lines = _pad_lines(lines, ref_lines)
assert padded_lines == expecte_padded_lines
@pytest.mark.parametrize(
"axis,expected_axis_lines",
[
(
Axis(display_length=51, label="my_fun_label", limits=(0, 1)),
[
"└┬---------┬---------┬---------┬---------┬---------┬\n",
" 0.0 0.2 0.4 0.6 0.8 1.0\n",
" my_fun_label",
],
),
(
Axis(display_length=51, label="my_fun_label", limits=(0, 0.01)),
[
"└┬---------┬---------┬---------┬---------┬---------┬\n",
" 0.0 0.002 0.004 0.006 0.008 0.01\n",
" my_fun_label",
],
),
],
)
def test_draw_x_axis(axis, expected_axis_lines):
x_lines = _draw_x_axis(x_axis=axis, left_pad=0)
assert x_lines == expected_axis_lines
@pytest.mark.parametrize(
"axis,label,limits, expected_axis_lines",
[
(
Axis(display_length=16),
"my_fun_label",
(0, 1),
[
" my_fun_label",
" 0.99┤",
" |",
" |",
" |",
" |",
" 0.66┤",
" |",
" |",
" |",
" |",
" 0.33┤",
" |",
" |",
" |",
" |",
" 0.0┤",
],
),
],
)
def test_draw_y_axis(axis, label, limits, expected_axis_lines):
axis.label = label
axis.limits = limits
y_lines = _draw_y_axis(y_axis=axis, left_pad=10)
assert y_lines == expected_axis_lines
@pytest.mark.parametrize(
"canvas,expected_canvas_lines",
[
(
np.array(
[
[0, 0, 0, 0, 5],
[0, 0, 0, 4, 0],
[0, 0, 3, 0, 0],
[0, 2, 0, 0, 0],
[1, 0, 0, 0, 0],
]
),
["@ ", " x ", " o ", " * ", " +"],
),
],
)
def test_draw_canvas(canvas, expected_canvas_lines):
canvas_lines = _draw_canvas(canvas)
assert canvas_lines == expected_canvas_lines
| [
"shellplot.drawing._draw_canvas",
"shellplot.axis.Axis",
"shellplot.drawing._draw_y_axis",
"shellplot.drawing._draw_x_axis",
"pytest.mark.parametrize",
"numpy.array",
"shellplot.drawing._pad_lines",
"shellplot.drawing._draw_legend",
"shellplot.drawing.LegendItem"
] | [((405, 566), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lines,ref_lines,expecte_padded_lines"""', "[(['a', 'b'], ['a', 'b', 'c'], ['', 'a', 'b']), (None, ['a', 'b', 'c'], ['',\n '', ''])]"], {}), "('lines,ref_lines,expecte_padded_lines', [(['a', 'b'\n ], ['a', 'b', 'c'], ['', 'a', 'b']), (None, ['a', 'b', 'c'], ['', '', ''])]\n )\n", (428, 566), False, 'import pytest\n'), ((670, 698), 'shellplot.drawing._pad_lines', '_pad_lines', (['lines', 'ref_lines'], {}), '(lines, ref_lines)\n', (680, 698), False, 'from shellplot.drawing import LegendItem, _draw_canvas, _draw_legend, _draw_x_axis, _draw_y_axis, _pad_lines\n'), ((1541, 1578), 'shellplot.drawing._draw_x_axis', '_draw_x_axis', ([], {'x_axis': 'axis', 'left_pad': '(0)'}), '(x_axis=axis, left_pad=0)\n', (1553, 1578), False, 'from shellplot.drawing import LegendItem, _draw_canvas, _draw_legend, _draw_x_axis, _draw_y_axis, _pad_lines\n'), ((2504, 2542), 'shellplot.drawing._draw_y_axis', '_draw_y_axis', ([], {'y_axis': 'axis', 'left_pad': '(10)'}), '(y_axis=axis, left_pad=10)\n', (2516, 2542), False, 'from shellplot.drawing import LegendItem, _draw_canvas, _draw_legend, _draw_x_axis, _draw_y_axis, _pad_lines\n'), ((3074, 3094), 'shellplot.drawing._draw_canvas', '_draw_canvas', (['canvas'], {}), '(canvas)\n', (3086, 3094), False, 'from shellplot.drawing import LegendItem, _draw_canvas, _draw_legend, _draw_x_axis, _draw_y_axis, _pad_lines\n'), ((272, 292), 'shellplot.drawing.LegendItem', 'LegendItem', (['(1)', '"""one"""'], {}), "(1, 'one')\n", (282, 292), False, 'from shellplot.drawing import LegendItem, _draw_canvas, _draw_legend, _draw_x_axis, _draw_y_axis, _pad_lines\n'), ((294, 314), 'shellplot.drawing.LegendItem', 'LegendItem', (['(2)', '"""two"""'], {}), "(2, 'two')\n", (304, 314), False, 'from shellplot.drawing import LegendItem, _draw_canvas, _draw_legend, _draw_x_axis, _draw_y_axis, _pad_lines\n'), ((381, 401), 'shellplot.drawing._draw_legend', '_draw_legend', (['legend'], {}), '(legend)\n', (393, 401), False, 'from shellplot.drawing import LegendItem, _draw_canvas, _draw_legend, _draw_x_axis, _draw_y_axis, _pad_lines\n'), ((835, 895), 'shellplot.axis.Axis', 'Axis', ([], {'display_length': '(51)', 'label': '"""my_fun_label"""', 'limits': '(0, 1)'}), "(display_length=51, label='my_fun_label', limits=(0, 1))\n", (839, 895), False, 'from shellplot.axis import Axis\n'), ((1161, 1224), 'shellplot.axis.Axis', 'Axis', ([], {'display_length': '(51)', 'label': '"""my_fun_label"""', 'limits': '(0, 0.01)'}), "(display_length=51, label='my_fun_label', limits=(0, 0.01))\n", (1165, 1224), False, 'from shellplot.axis import Axis\n'), ((1723, 1746), 'shellplot.axis.Axis', 'Axis', ([], {'display_length': '(16)'}), '(display_length=16)\n', (1727, 1746), False, 'from shellplot.axis import Axis\n'), ((2677, 2777), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 5], [0, 0, 0, 4, 0], [0, 0, 3, 0, 0], [0, 2, 0, 0, 0], [1, 0,\n 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 5], [0, 0, 0, 4, 0], [0, 0, 3, 0, 0], [0, 2, 0, 0, 0\n ], [1, 0, 0, 0, 0]])\n', (2685, 2777), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.