arxiv_id stringlengths 0 16 | text stringlengths 10 1.65M |
|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 8 14:44:34 2018
@author: jack.lingheng.meng
"""
import tensorflow as tf
import numpy as np
import time
import gym
from Environment.LASEnv import LASEnv
from LASAgent.RandomLASAgent import RandomLASAgent
from LASAgent.LASAgent_Actor_Critic import LASAgent_Actor_Critic
from Environment.VisitorEnv import VisitorEnv
from VisitorAgent.RedLightExcitedVisitorAgent import RedLightExcitedVisitorAgent
if __name__ == '__main__':
with tf.Session() as sess:
# Instantiate MountainCar environment
mountain_car_env = gym.make('MountainCarContinuous-v0')
pendulum_env = gym.make('Pendulum-v0')
observation_For_LAS = pendulum_env.reset()
# Iinstantiate LAS-agent
LASAgent1 = LASAgent_Actor_Critic(sess, pendulum_env,
actor_lr = 0.0001, actor_tau = 0.001,
critic_lr = 0.0001, critic_tau = 0.001, gamma = 0.99,
minibatch_size = 64,
max_episodes = 50000, max_episode_len = 1000,
# Exploration Strategies
exploration_action_noise_type = 'ou_0.2',
exploration_epsilon_greedy_type = 'none',
# Save Summaries
save_dir = '../ROM_Experiment_results/LASAgentActorCritic/',
experiment_runs = 'run5',
# Save and Restore Actor-Critic Model
restore_actor_model_flag = False,
restore_critic_model_flag = False)
# Step counter
i = 1
epo_num = 100
for epo_i in range(epo_num):
done = False
reward_for_LAS = 0
observation_For_LAS = pendulum_env.reset()
while not done:
pendulum_env.render()
# LAS interacts with environment.
actionLAS = LASAgent1.perceive_and_act(observation_For_LAS, reward_for_LAS, done)
# delay the observing of consequence of LASAgent's action
observation_For_LAS, reward_for_LAS, done, info = pendulum_env.step(actionLAS)
print("LAS Step: {}, reward: {}".format(i, reward_for_LAS))
i += 1 | |
import numpy as np
from scipy.spatial.distance import cdist
K = lambda x, y, bw: np.exp(-0.5*cdist(x, y, 'sqeuclidean') / bw**2)
def mmd(x: np.ndarray, y: np.ndarray, bw: float) -> float:
"""Computes the maximum mean discrepancy between two samples. This is a measure
of the similarity of two distributions that generate the input samples.
Args:
x: First set of samples.
y: Second set of samples.
bw: Bandwidth parameter to use in computing the squared exponential
kernel.
Returns:
u: An unbiased estimator of the maximum mean discrepancy.
"""
m = len(x)
n = len(y)
a = 0.0
b = 0.0
c = 0.0
for i in range(m):
xp = x[[i]]
Ka = K(xp, x, bw)
Kc = K(xp, y, bw)
a += np.sum(Ka) - Ka[0, i]
c += np.sum(Kc)
for i in range(n):
yp = y[[i]]
Kb = K(yp, y, bw)
b += np.sum(Kb) - Kb[0, i]
a /= m*(m-1)
b /= n*(n-1)
c /= -0.5*m*n
u = a + b + c
return u | |
from os.path import dirname, abspath
import numpy as np
import cv2
import matplotlib.pyplot as plt
# Global constants and parameters
OUTPUT_DIR = dirname(dirname(abspath(__file__))) + "/output_images/"
LANE_REGION_HEIGHT = 0.63 # top boundary (% of ysize)
LANE_REGION_UPPER_WIDTH = 0.05 # upper width (% of ysize, X2 for actual width)
LANE_REGION_LOWER_WIDTH = 0.45 # lower width (% of ysize, X2 for actual width)
LANE_WIDTH_POSTWARP = 0.25 # width of region to use after perspective txfm
# Test values, used for attempting to clear the "Harder Challenge"
# This doesn't work when the curve of the lane is too sharp, so don't use this
# LANE_REGION_HEIGHT = 0.68 # top boundary (% of ysize)
# LANE_REGION_UPPER_WIDTH = 0.12 # upper width (% of ysize, X2 for actual width)
# LANE_REGION_LOWER_WIDTH = 0.45 # lower width (% of ysize, X2 for actual width)
# LANE_WIDTH_POSTWARP = 0.20 # width of region to use after perspective txfm
L_ADJ_FACTOR = 42 # lightness threshold laxness (higher = more lax)
S_ADJ_FACTOR = 42 # saturation threshold laxness (higher = more lax)
SOBEL_L_THRESHOLD = 20 # gradient (lightness channel, x direction) threshold
SOBEL_S_THRESHOLD = 20 # gradient (saturation channel, x direction) threshold
SOBEL_KERNEL = 15 # kernel size for sobel gradient
LANE_EXTENTS = (600, 679) # x-values to slice the center of the lane to use in averaging
NWINDOWS = 10 # For sliding window pixel search, number of slices per image
MARGIN = 40 # (Margin * 2) is the width of each window slice
MINPIX = 20 # Number of found pixels needed to force a shift of the next window
MY = 30 / 720 # meters per pixel in y dimension
MX = 3.7 / 700 # meters per pixel in x dimension
PLOT = True # toggle whether to generate images for intermediate steps
# Finds lane lines on an image, marks them, and displays the radius of curvature
# and the delta between the center of the image and the center of the lane.
# Inputs:
# image image to process
# mtx camera matrix coefficients
# dist distortion coefficients
# lheight top boundary of region for perspective transform (% of img height)
# lupwidth upper width of region for perspective transform (% of img width, X2)
# llowwidth lower width of region for perspective transform (% of img width, X2)
# lwidthwarped width of search region post-perspective transform
# left_fit prior fit coefficients for left lane line, speeds up pixel search.
# if left as default "None", will proceed to pixel search independently.
# right_fit prior fit coefficients for left lane line, speeds up pixel search.
# if left as default "None", will proceed to pixel search independently.
# my conversion factor, meters per pixel in y dimension
# mx conversion factor, meters per pixel in x dimension
# Outputs:
# output_img processed image with lane lines marked and measurements displayed
# left_fit fit coefficients for left lane line, to be used next frame
# right_fit fit coefficients for left lane line, to be used next frame
def process_image(image, mtx, dist,
lheight=LANE_REGION_HEIGHT,
lupwidth=LANE_REGION_UPPER_WIDTH,
llowwidth=LANE_REGION_LOWER_WIDTH,
lwidthwarped=LANE_WIDTH_POSTWARP,
left_fit=None, right_fit=None, my=MY, mx=MX):
# First, undistort the input image according to the calibration coefficients
undist = cv2.undistort(image, mtx, dist, None, mtx)
# Calculate x-y values for perspective transform
xsize = image.shape[1]
ysize = image.shape[0]
x_ul, x_ur = int(xsize * (0.5 - lupwidth)), int(xsize * (0.5 + lupwidth))
x_ll, x_lr = int(xsize * (0.5 - llowwidth)), int(xsize * (0.5 + llowwidth))
y_top = int(ysize * lheight)
x_wl, x_wr = int(xsize * (0.5 - lwidthwarped)), int(xsize * (0.5 + lwidthwarped))
# Apply perspective transform to look at the road ahead
src_points = np.float32([[x_ul, y_top], [x_ur, y_top], [x_ll, ysize], [x_lr, ysize]])
dst_points = np.float32([[x_wl, 0], [x_wr, 0], [x_wl, ysize], [x_wr, ysize]])
warped_image = warp(undist, src_points, dst_points)
# Apply color (R-channel and S-channel) and gradient thresholding
thresd_warped = thresholding(warped_image)
if left_fit is None or right_fit is None:
warped_lane, left_fit, right_fit = find_polynomial(thresd_warped)
else:
warped_lane, left_fit, right_fit = search_around_poly(thresd_warped,
left_fit, right_fit)
lane_overlay = warp(warped_lane, dst_points, src_points) # switching src/dst unwarps
overlaid_img = weighted_img(lane_overlay, undist)
output_img = measure_stats(overlaid_img, left_fit, right_fit, my, mx)
### Visualization ###
if PLOT:
# Check undistorted image
save_undist = np.copy(undist)
cv2.line(save_undist, (x_ul, y_top), (x_ur, y_top), color=(255, 0, 0), thickness=3)
cv2.line(save_undist, (x_ur, y_top), (x_lr, ysize), color=(255, 0, 0), thickness=3)
cv2.line(save_undist, (x_lr, ysize), (x_ll, ysize), color=(255, 0, 0), thickness=3)
cv2.line(save_undist, (x_ll, ysize), (x_ul, y_top), color=(255, 0, 0), thickness=3)
plt.imsave(OUTPUT_DIR + "01_undistort.jpg", save_undist)
plt.imshow(save_undist)
plt.show()
# Check warped image
save_warped_img = np.copy(warped_image)
cv2.line(save_warped_img, (x_wl, 0), (x_wr, 0), color=(255, 0, 0), thickness=3)
cv2.line(save_warped_img, (x_wr, 0), (x_wr, ysize), color=(255, 0, 0), thickness=3)
cv2.line(save_warped_img, (x_wr, ysize), (x_wl, ysize), color=(255, 0, 0), thickness=3)
cv2.line(save_warped_img, (x_wl, ysize), (x_wl, 0), color=(255, 0, 0), thickness=3)
plt.imsave(OUTPUT_DIR + "02_warped.jpg", save_warped_img)
plt.imshow(save_warped_img)
plt.show()
# Check thresholded image
save_thresd_warped = np.copy(thresd_warped)
save_thresd_warped = cv2.cvtColor(255 * save_thresd_warped, cv2.COLOR_GRAY2RGB)
cv2.line(save_thresd_warped, (x_wl, 0), (x_wr, 0), color=(255, 0, 0), thickness=3)
cv2.line(save_thresd_warped, (x_wr, 0), (x_wr, ysize), color=(255, 0, 0), thickness=3)
cv2.line(save_thresd_warped, (x_wr, ysize), (x_wl, ysize), color=(255, 0, 0), thickness=3)
cv2.line(save_thresd_warped, (x_wl, ysize), (x_wl, 0), color=(255, 0, 0), thickness=3)
plt.imsave(OUTPUT_DIR + "03_threshold.jpg", save_thresd_warped)
plt.imshow(save_thresd_warped)
plt.show()
plt.imshow(warped_lane)
plt.imsave(OUTPUT_DIR + "05_warpedlane.jpg", warped_lane)
plt.show()
plt.imshow(lane_overlay)
plt.imsave(OUTPUT_DIR + "06_unwarpedlane.jpg", lane_overlay)
plt.show()
plt.imshow(overlaid_img)
plt.imsave(OUTPUT_DIR + "07_overlaylane.jpg", overlaid_img)
plt.show()
return output_img, left_fit, right_fit
# Performs a perspective transform.
# Inputs:
# img image to transform
# src source point coordinates (pre-transform)
# dst destination points coordinates (post-transform)
# Outputs:
# img perspective-transformed image
def warp(img, src=0, dst=0):
# Check for src/dst points - if not inputted, skip processing
if np.ndim(src) < 2 or np.ndim(dst) < 2:
raise ValueError('source and/or destination vertices not defined')
img_size = (img.shape[1], img.shape[0])
# Compute perspective transform M using given source and destination coordinates
warp_matrix = cv2.getPerspectiveTransform(src, dst)
return cv2.warpPerspective(img, warp_matrix, img_size, flags=cv2.INTER_LINEAR)
# Applies color and gradinent thresholding to isolate lane line candidates.
# Inputs:
# img image to apply thresholding to
# l_thresh_comp lightness threshold laxness (higher = more lax)
# s_thresh_comp saturation threshold laxness (higher = more lax)
# sxl_thresh gradient (lightness channel, x direction) threshold
# sxs_thresh gradient (saturation channel, x direction) threshold
# sobelk kernel size for sobel gradient
# lane_extents x-values to slice the center of the lane to use in averaging
# Outputs:
# combined binary image with culled pixels == 0, passed pixels == 1
def thresholding(img, l_thresh_comp=L_ADJ_FACTOR, s_thresh_comp=S_ADJ_FACTOR,
sxl_thresh=SOBEL_L_THRESHOLD, sxs_thresh=SOBEL_S_THRESHOLD,
sobelk=SOBEL_KERNEL,
lane_extents=LANE_EXTENTS):
image = np.copy(img)
# Convert to HLS color space and keep only saturation channel
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
l_channel = hls[:, :, 1]
s_channel = hls[:, :, 2]
# Grab information from the center of the lane to compute what the "road"
# looks like
avg_l = np.mean(l_channel[:, lane_extents[0]:lane_extents[1]]) # road lightness
avg_s = np.mean(s_channel[:, lane_extents[0]:lane_extents[1]]) # road saturation
# Set lightness and saturation search thresholds
# The lighter the road, the closer our threshold needs to be to 255
new_l_thresh = (255 + avg_l) / 2 - l_thresh_comp
new_s_thresh = (255 + avg_s) / 2 - s_thresh_comp
# Sobel X on lightness channel
sobel_xl = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0, ksize=sobelk) # Take the derivative in x
abs_sobel_xl = np.absolute(sobel_xl) # Nevermind direction, just want sharp edges
scaled_sobel_xl = np.uint8(255 * abs_sobel_xl / np.max(abs_sobel_xl))
# Threshold Sobel X on lightness channel
sxl_binary = np.zeros_like(scaled_sobel_xl)
sxl_binary[scaled_sobel_xl >= sxl_thresh] = 1
# Sobel X on saturation channel
sobel_xs = cv2.Sobel(s_channel, cv2.CV_64F, 1, 0, ksize=sobelk) # Take the derivative in x
abs_sobel_xs = np.absolute(sobel_xs) # Nevermind direction, just want sharp edges
scaled_sobel_xs = np.uint8(255 * abs_sobel_xs / np.max(abs_sobel_xs))
# Threshold Sobel X on saturation channel
sxs_binary = np.zeros_like(scaled_sobel_xs)
sxs_binary[scaled_sobel_xs >= sxs_thresh] = 1
# Threshold color channel
r_channel = image[:, :, 0]
r_binary = np.zeros_like(r_channel)
r_binary[r_channel > new_l_thresh] = 1
# Threshold saturation channel
s_binary = np.zeros_like(s_channel)
s_binary[s_channel > new_s_thresh] = 1
# Use red + saturation channel info for color-only selection
# Use lightness sobel-x + red for gradient-based selection
# If neither finds much, use gradients + saturation as a last effort to find pixels
combined = np.zeros_like(r_binary)
combined[((r_binary == 1) & (s_binary == 1)) |
((sxl_binary == 1) & (r_binary == 1)) |
((sxl_binary == 1) & (sxs_binary == 1) & (s_binary == 1))] = 1
return combined
# Given an appropriate binary image, finds two new lane lines in the current frame.
# Inputs:
# binary_warped image frame (binary), perspective transformed already
# Outputs:
# out_img image with lane pixels marked, lane filled in green
# left_fit fit coefficients for current frame's left lane line
# right_fit fit coefficients for current frame's right lane line
def find_polynomial(binary_warped):
# Find our lane pixels first
out_img, leftx, lefty, rightx, righty = find_lane_pixels(binary_warped)
left_fit, right_fit, left_fitx, right_fitx, ploty = fit_poly(binary_warped.shape,
leftx, lefty, rightx, righty)
## Visualization ##
# Fill space between the polynomial fits
out_img = fill_lane(out_img, left_fitx, right_fitx, ploty)
# Color in the left and right lane pixels
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
# Plot the polylines (temporary, only for writeup visualization)
# vertices_left = np.array([[x, y] for x, y in np.stack((left_fitx, ploty), axis=-1)], np.int32)
# vertices_right = np.array([[x, y] for x, y in np.stack((right_fitx, ploty), axis=-1)], np.int32)
# cv2.polylines(out_img, [vertices_left], isClosed=False, color=(255, 255, 0), thickness=3)
# cv2.polylines(out_img, [vertices_right], isClosed=False, color=(255, 255, 0), thickness=3)
return out_img, left_fit, right_fit
# Given an appropriate binary image, finds pixels most likely to belong to
# the left and right lane lines respectively.
# Inputs:
# binary_warped image frame (binary), perspective transformed already
# nwindows number of horizontal slices to use to try and isolate lane points
# margin double margin = width of each horizontal slice
# minpix no. of found pixels needed to force a shift for the next window
# Outputs:
# out_img image with lane pixels marked, lane filled in green
# leftx x-values for the left lane line point cloud
# lefty y-values for the left lane line point cloud
# rightx x-values for the right lane line point cloud
# righty y-values for the right lane line point cloud
def find_lane_pixels(binary_warped, nwindows=NWINDOWS, margin=MARGIN, minpix=MINPIX):
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0] // 2:, :], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0] // 2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0] // nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window + 1) * window_height
win_y_high = binary_warped.shape[0] - window * window_height
win_xleft_low = int(leftx_current - margin)
win_xleft_high = int(leftx_current + margin)
win_xright_low = int(rightx_current - margin)
win_xright_high = int(rightx_current + margin)
# Draw the windows on the visualization image (temporary, only for writeup visualization)
# cv2.rectangle(out_img, (win_xleft_low, win_y_low),
# (win_xleft_high, win_y_high), (0, 255, 0), 2)
# cv2.rectangle(out_img, (win_xright_low, win_y_low),
# (win_xright_high, win_y_high), (0, 255, 0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = [i for i in range(len(nonzerox))
if ((win_xleft_low <= nonzerox[i] < win_xleft_high)
and (win_y_low <= nonzeroy[i] < win_y_high))]
good_right_inds = [i for i in range(len(nonzerox))
if ((win_xright_low <= nonzerox[i] < win_xright_high)
and (win_y_low <= nonzeroy[i] < win_y_high))]
# Append these indices to the lists
left_lane_inds.extend(good_left_inds)
right_lane_inds.extend(good_right_inds)
# If we found > minpix pixels, recenter next window (leftx_current/rightx_current)
# on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.mean(nonzerox[good_left_inds])
if len(good_right_inds) > minpix:
rightx_current = np.mean(nonzerox[good_right_inds])
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return out_img, leftx, lefty, rightx, righty
# Given two previous 2nd-order polynomials (describing previously found lane lines),
# finds two new lane lines in the current frame.
# Inputs:
# binary_warped image frame (binary), perspective transformed already
# left_fit fit coefficients for the previous left lane line
# right_fit fit coefficients for the previous right lane line
# margin margin (left and right) within which to search for lane pixels
# Outputs:
# out_img image with lane pixels marked, lane filled in green
# new_left_fit fit coefficients for current frame's left lane line
# new_right_fit fit coefficients for current frame's right lane line
def search_around_poly(binary_warped, left_fit, right_fit, margin=MARGIN):
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Set the area of search based on the LAST polynomial fit -- must supply values externally
left_fit_prev = left_fit[0] * nonzeroy ** 2 + left_fit[1] * nonzeroy + left_fit[2]
right_fit_prev = right_fit[0] * nonzeroy ** 2 + right_fit[1] * nonzeroy + right_fit[2]
left_lane_inds = ((nonzerox >= left_fit_prev - margin) &
(nonzerox < left_fit_prev + margin))
right_lane_inds = ((nonzerox >= right_fit_prev - margin) &
(nonzerox < right_fit_prev + margin))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit new polynomials
new_left_fit, new_right_fit, left_fitx, right_fitx, ploty \
= fit_poly(binary_warped.shape, leftx, lefty, rightx, righty)
## Visualization ##
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Fill space between the polynomial fits
out_img = fill_lane(out_img, left_fitx, right_fitx, ploty)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
return out_img, new_left_fit, new_right_fit
# Takes two point clouds and fits a 2nd-order polynomial to each.
# Inputs:
# img_shape array containing the pixel dimensions of the image
# leftx x-values of left-lane-line points
# lefty y-values of left-lane-line points
# rightx x-values of right-lane-line points
# righty y-values of right-lane-line points
# Outputs:
# left_fit fit coefficients for the left lane line
# right_fit fit coefficients for the right lane line
# left_fitx x-values for a set of points describing the left line
# right_fitx x-values for a set of points describing the right line
# ploty y-values for both sets of x-values
def fit_poly(img_shape, leftx, lefty, rightx, righty):
# Fit a second order polynomial to each with np.polyfit()
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, img_shape[0] - 1, img_shape[0])
# Calculate curve points using ploty, left_fit and right_fit
try:
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are none or incorrect
print('The function failed to fit a line!')
left_fitx = 1 * ploty ** 2 + 1 * ploty
right_fitx = 1 * ploty ** 2 + 1 * ploty
return left_fit, right_fit, left_fitx, right_fitx, ploty
# Takes two sets of lane curve points and colors in the space between them.
# Inputs:
# img image to draw on
# left_fitx x-values of left-lane-line points
# right_fitx x-values of right-lane-line points
# ploty y-values common to both sets of lane-line points
# Outputs:
# img image with lane filled in green
def fill_lane(img, left_fitx, right_fitx, ploty):
vertices_left = [[x, y] for x, y in np.stack((left_fitx, ploty), axis=-1)]
vertices_right = [[x, y] for x, y in np.stack((right_fitx, ploty), axis=-1)]
vertices_right.reverse()
vertices = np.array(vertices_left + vertices_right).astype(int)
cv2.fillPoly(img, [vertices], color=[0, 128, 0])
return img
# Takes two images and overlays the first onto the second.
# Inputs:
# overlay_img image to overlay
# initial_img image to be overlaid upon
# α coefficient for OpenCV weighting function
# β coefficient for OpenCV weighting function
# γ coefficient for OpenCV weighting function
# Outputs:
# img weighted, overlaid image
def weighted_img(overlay_img, initial_img, α=0.8, β=1., γ=0.):
return cv2.addWeighted(initial_img, α, overlay_img, β, γ)
# Calculates the curvature of the road, and the distance the camera is off from the lane
# center, in meters, and writes these values onto the image frame.
# Inputs:
# img image frame to write information onto
# left_fit fit coefficients for the left side curve
# right_fit fit coefficients for the right side curve
# my pixel-to-meters conversion for y-dimension (after perspective txfm)
# mx pixel-to-meters conversion for x-dimension (after perspective txfm)
# Outputs:
# img image frame with curvature and off-center distance written
def measure_stats(img, left_fit, right_fit, my, mx):
# Choose the maximum y-value (the bottom of the image) for curvature measurement
y_eval = img.shape[0] - 1
# Using the formula for radius of curvature, we'll need to convert from pixel distances
# to meters (on the perspective-txfm'd image)
# Check that the left/right lines curve the same way - if they don't, the lane is
# quite straight and any "radius" measurement will be incorrect
if np.sign(left_fit[0]) == np.sign(right_fit[0]):
left_curverad = (1 + (2 * (mx/my/my)*left_fit[0] * (y_eval*my) + (mx/my)*left_fit[1]) ** 2) ** (3/2) \
/ abs(2 * (mx/my/my)*left_fit[0])
right_curverad = (1 + (2 * (mx/my/my)*right_fit[0] * (y_eval*my) + (mx/my)*right_fit[1]) ** 2) ** (3/2) \
/ abs(2 * (mx/my/my)*right_fit[0])
mean_curverad = (left_curverad + right_curverad) // 2
else:
mean_curverad = float("inf")
# Distance off-center can be calculated with just deltas
left_edge = left_fit[0] * y_eval ** 2 + left_fit[1] * y_eval + left_fit[2]
right_edge = right_fit[0] * y_eval ** 2 + right_fit[1] * y_eval + right_fit[2]
off_center = mx * ((left_edge + right_edge) / 2 - img.shape[1] / 2)
# Write text to image frame
if mean_curverad == float("inf"):
rad_message = "Radius of curvature: NA (straight)"
else:
rad_message = "Radius of curvature: %d m" % mean_curverad
if off_center > 0:
off_message = "Vehicle is %f m left of center" % off_center
else:
off_message = "Vehicle is %f m right of center" % -off_center
cv2.putText(img, rad_message, (10, 40), cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1.5, color=(255, 255, 255), thickness=2, lineType=2)
cv2.putText(img, off_message, (10, 80), cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1.5, color=(255, 255, 255), thickness=2, lineType=2)
return img | |
import json
import argparse
import tensorflow.keras as keras
import numpy as np
import tensorflow as tf
from image_quality_assessment.utils import utils
import grpc
from tensorflow_serving.apis import predict_pb2, prediction_service_pb2_grpc
TFS_HOST = 'localhost'
TFS_PORT = 8500
def normalize_labels(labels):
labels_np = np.array(labels)
return labels_np / labels_np.sum()
def calc_mean_score(score_dist):
score_dist = normalize_labels(score_dist)
return (score_dist * np.arange(1, 11)).sum()
def get_image_quality_predictions(image_path, model_name):
# Load and preprocess image
image = utils.load_image(image_path, target_size=(224, 224))
image = keras.applications.mobilenet.preprocess_input(image)
# Run through model
target = f'{TFS_HOST}:{TFS_PORT}'
channel = grpc.insecure_channel(target)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = model_name
request.model_spec.signature_name = 'image_quality'
request.inputs['input_image'].CopyFrom(
tf.contrib.util.make_tensor_proto(np.expand_dims(image, 0))
)
response = stub.Predict(request, 10.0)
result = round(calc_mean_score(response.outputs['quality_prediction'].float_val), 2)
print(json.dumps({'mean_score_prediction': np.round(result, 3)}, indent=2))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-ip', '--image-path', help='Path to image file.', required=True)
parser.add_argument(
'-mn', '--model-name', help='mobilenet_aesthetic or mobilenet_technical', required=True
)
args = parser.parse_args()
get_image_quality_predictions(**args.__dict__) | |
import math
from PIL import Image
import numpy as np
import filterdata as fd
import config
import scipy.misc
imagesbase = config.imagesbase
fullpath = config.fullpath
outputdir = config.outputdir
outputdir1 = config.outputdir if fullpath else ''
idx = 0
cnttxt = 0;
cntnon = 0;
phasenames = ['train', 'val']
for phase in [0, 1]: # 0=train , 1=val
if phase == 0:
print('start creating training set....')
else:
print('start creating validation set....')
if 'ct' not in locals(): # to prevent the API from re-loading
from COCOAPI import coco_text
ct = coco_text.COCO_Text('COCOAPI/COCO_Text.json')
if (phase == 0):
allimgIds = ct.getImgIds(imgIds=ct.train, catIds=[('legibility', 'legible')])
else:
allimgIds = ct.getImgIds(imgIds=ct.val, catIds=[('legibility', 'legible')])
imgs = ct.loadImgs(allimgIds)
f = open('%s_unbalance.txt' % (phasenames[phase]), 'w')
for x in imgs:
annids = ct.getAnnIds(imgIds=x['id'], catIds=[('legibility', 'legible')])
anns = ct.loadAnns(annids)
image = Image.open('%s%s' % (imagesbase, x['file_name']))
print('processing image %d' % (x['id']))
w = x['width']
h = x['height']
# non text areas
xmin = int(np.floor(np.amin([z['bbox'][0] for z in anns])))
ymin = int(np.floor(np.amin([z['bbox'][1] for z in anns])))
if ((xmin > 32) & (ymin > 32)):
for i in range(0, xmin - 32, 32):
for j in range(0, ymin - 32, 32):
box = [i, j, i + 32, j + 32]
window = image.crop(box)
scipy.misc.imsave('%stxt_%d.jpg' % (outputdir, idx), window)
print('%stxt_%d.jpg %d' % (outputdir1, idx, 0), end="", file=f)
idx = idx + 1
cntnon = cntnon + 1
xmax = int(np.floor(np.amax([z['bbox'][0] for z in anns])))
ymax = int(np.floor(np.amax([z['bbox'][1] for z in anns])))
if (((h - xmax) > 32) & ((w - ymax) > 32)):
for i in range(xmax, h - xmax - 32, 32):
for j in range(ymax, w - ymax - 32, 32):
box = [i, j, i + 32, j + 32]
window = image.crop(box)
scipy.misc.imsave('%stxt_%d.jpg' % (outputdir, idx), window)
print('%stxt_%d.jpg %d' % (outputdir1, idx, 0), end="", file=f)
idx = idx + 1
cntnon = cntnon + 1
# text areas
for y in anns:
bbox = y['bbox'];
if bbox[3] < 32:
bbox[3] = 32
if bbox[2] < 32:
bbox[2] = 32
bbox[2] = bbox[2] + bbox[0];
bbox[3] = bbox[3] + bbox[1];
bbox = [int(math.floor(xx)) for xx in bbox];
crop = image.crop(bbox)
if crop.size[0] < 32 or crop.size[1] < 32:
scipy.misc.imsave('%stxt_%d.jpg' % (outputdir, idx), crop)
print('%stxt_%d.jpg %d' % (outputdir1, idx, 1), end="", file=f)
idx = idx + 1
else:
for i in range(0, crop.size[0] - 32, 32):
for j in range(0, crop.size[1] - 32, 32):
box = [i, j, i + 32, j + 32]
window = crop.crop(box)
scipy.misc.imsave('%stxt_%d.jpg' % (outputdir, idx), window)
print('%stxt_%d.jpg %d' % (outputdir1, idx, 1), end="", file=f)
idx = idx + 1
if phase == 0:
print('done training set....')
else:
print('done validation set....')
f.close()
print('total=', idx, ' non-text=', cntnon, ' text=', idx - cntnon)
########################
#### start filtering data
fd.filter()
print('Data set created in')
print(outputdir)
print('unbalanced dataset images are listed in train_unbalanced.txt and val_unbalance.txt')
print('final balanced dataset images are listed in train.txt and val.txt') | |
from datetime import datetime, timedelta
from services.log_service import LogService
import torch
import numpy as np
from entities.metric import Metric
from entities.data_output_log import DataOutputLog
class LogServiceFake(LogService):
def __init__(self):
pass
def log_progress(
self,
current_step: int,
all_steps: int,
epoch_num: int = None,
evaluation: bool = False):
pass
def initialize_evaluation(self):
pass
def log_evaluation(
self,
train_metric: Metric,
validation_metric: Metric,
epoch: int,
iteration: int,
iterations: int,
new_best: bool,
metric_log_key: str = None):
"""
logs progress to user through tensorboard and terminal
"""
pass
def log_info(self, message: str):
print(message)
def log_debug(self, message: str):
print(message)
def log_error(self, message: str):
print(message)
def log_exception(self, message: str, exception: Exception):
log_message = f'Exception occurred. Message: {message}\nOriginal exception: {exception}'
print(log_message)
def log_warning(self, message: str):
print(message)
def log_summary(self, key: str, value: object):
pass
def log_batch_results(self, data_output_log: DataOutputLog):
pass
def log_incremental_metric(self, metric_key: str, metric_value: object):
pass
def log_arguments(self):
pass
def log_heatmap(
self,
heatmap_title: str,
matrix_values: np.array,
x_labels: list,
y_labels: list,
show_text_inside: bool = False):
pass
def start_logging_model(self, model: torch.nn.Module, criterion: torch.nn.Module = None):
pass
def get_time_passed(self) -> timedelta:
result = timedelta(minutes=60)
return result
def _get_current_step(self) -> int:
return 0 | |
## same as the analytic case but with the fft
import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import cond
import cmath;
from scipy import linalg as LA
from numpy.linalg import solve as bslash
import time
from convolution_matrices.convmat1D import *
from RCWA_1D_functions.grating_fft.grating_conv import *
def nonHermitianEigenSorter(eigenvalues):
N = len(eigenvalues);
sorted_indices=[];
sorted_eigs = [];
for i in range(N):
eig = eigenvalues[i];
if(np.real(eig)>0 and np.imag(eig) == 0):
sorted_indices.append(i); sorted_eigs.append(eig);
elif(np.real(eig)==0 and np.imag(eig) > 0):
sorted_indices.append(i); sorted_eigs.append(eig);
elif(np.real(eig)>0 and abs(np.imag(eig)) > 0):
sorted_indices.append(i); sorted_eigs.append(eig);
return sorted_eigs, sorted_indices;
# Moharam et. al Formulation for stable and efficient implementation for RCWA
plt.close("all")
'''
1D TM implementation of PLANAR DIFFRACTiON
STILL NOT WORKING YET
only: sign convention is exp(-ikr) (is the positive propagating wave), so loss is + not -
source for fourier decomps is from the paper: Formulation for stable and efficient implementation of
the rigorous coupled-wave analysis of binary gratings by Moharam et. al
'''
# plt.plot(x, np.real(fourier_reconstruction(x, period, 1000, 1,np.sqrt(12), fill_factor = 0.1)));
# plt.title('check that the analytic fourier series works')
# #'note that the lattice constant tells you the length of the ridge'
# plt.show()
L0 = 1e-6;
e0 = 8.854e-12;
mu0 = 4*np.pi*1e-8;
fill_factor = 0.3; # 50% of the unit cell is the ridge material
num_ord = 3; #INCREASING NUMBER OF ORDERS SEEMS TO CAUSE THIS THING TO FAIL, to many orders induce evanescence...particularly
# when there is a small fill factor
PQ = 2*num_ord+1;
indices = np.arange(-num_ord, num_ord+1)
n_ridge = 3.48; #3.48; # ridge
n_groove = 1; # groove (unit-less)
lattice_constant = 0.7; # SI units
# we need to be careful about what lattice constant means
# in the gaylord paper, lattice constant exactly means (0, L) is one unit cell
d = 0.46; # thickness, SI units
Nx = 2*256;
eps_r = n_groove**2*np.ones((2*Nx, 1)); #put in a lot of points in eps_r
border = int(2*Nx*fill_factor);
eps_r[0:border] = n_ridge**2;
fft_fourier_array = grating_fft(eps_r);
x = np.linspace(-lattice_constant,lattice_constant,1000);
period = lattice_constant;
## simulation parameters
theta = (0)*np.pi/180;
spectra = list();
spectra_T = list();
wavelength_scan = np.linspace(0.5, 2, 100)
## construct permittivity harmonic components E
#fill factor = 0 is complete dielectric, 1 is air
##construct convolution matrix
Ezz = np.zeros((2 * num_ord + 1, 2 * num_ord + 1)); Ezz = Ezz.astype('complex')
p0 = Nx; #int(Nx/2);
p_index = np.arange(-num_ord, num_ord + 1);
q_index = np.arange(-num_ord, num_ord + 1);
fourier_array = fft_fourier_array;#fourier_array_analytic;
detected_pffts = np.zeros_like(Ezz);
for prow in range(2 * num_ord + 1):
# first term locates z plane, 2nd locates y coumn, prow locates x
row_index = p_index[prow];
for pcol in range(2 * num_ord + 1):
pfft = p_index[prow] - p_index[pcol];
detected_pffts[prow, pcol] = pfft;
Ezz[prow, pcol] = fourier_array[p0 + pfft]; # fill conv matrix from top left to top right
Exz = np.zeros_like(Ezz);
Ezx = -np.zeros_like(Ezz);
Exz = 0.2*np.eye(PQ)
Ezx = Exz;
print((Exz.shape, Ezx.shape, Ezz.shape))
## FFT of 1/e;
inv_fft_fourier_array = grating_fft(1/eps_r);
##construct convolution matrix
E_conv_inv = np.zeros((2 * num_ord + 1, 2 * num_ord + 1));
E_conv_inv = E_conv_inv.astype('complex')
p0 = Nx;
p_index = np.arange(-num_ord, num_ord + 1);
for prow in range(2 * num_ord + 1):
# first term locates z plane, 2nd locates y coumn, prow locates x
for pcol in range(2 * num_ord + 1):
pfft = p_index[prow] - p_index[pcol];
E_conv_inv[prow, pcol] = inv_fft_fourier_array[p0 + pfft]; # fill conv matrix from top left to top right
## IMPORTANT TO NOTE: the indices for everything beyond this points are indexed from -num_ord to num_ord+1
## alternate construction of 1D convolution matrix
PQ =2*num_ord+1;
I = np.eye(PQ)
zeros = np.zeros((PQ, PQ))
# E is now the convolution of fourier amplitudes
for wvlen in wavelength_scan:
j = cmath.sqrt(-1);
lam0 = wvlen; k0 = 2 * np.pi / lam0; #free space wavelength in SI units
print('wavelength: ' + str(wvlen));
## =====================STRUCTURE======================##
## Region I: reflected region (half space)
n1 = 1;#cmath.sqrt(-1)*1e-12; #apparently small complex perturbations are bad in Region 1, these shouldn't be necessary
## Region 2; transmitted region
n2 = 1;
#from the kx_components given the indices and wvln
kx_array = k0*(n1*np.sin(theta) + indices*(lam0 / lattice_constant)); #0 is one of them, k0*lam0 = 2*pi
k_xi = kx_array;
## IMPLEMENT SCALING: these are the fourier orders of the x-direction decomposition.
KX = np.diag((k_xi/k0)); #singular since we have a n=0, m= 0 order and incidence is normal
# PQ_block = np.block([[zeros, np.linalg.inv(E_conv_inv)],[KX@bslash(E, KX) - I, zeros]])
# # plt.imshow(np.abs(PQ_block));
# # plt.show();
# print('condition of PQ block: '+str(np.linalg.cond(PQ_block)))
# big_eigenvals, bigW = LA.eig(PQ_block);
# print((bigW.shape, big_eigenvals.shape))
# Wp = bigW[0:PQ, PQ:]
# plt.imshow(abs(bigW))
# plt.show();
## construct matrix of Gamma^2 ('constant' term in ODE):
## one thing that isn't obvious is that are we doing element by element division or is it matricial
B = (KX@bslash(Ezz, KX) - I);
bE = np.linalg.inv(E_conv_inv) + bslash(Ezz,(Exz@Ezx)); #/Ezz;
G = j*bslash(Ezz,Ezx) @ KX;
H = j*KX @bslash(Ezz, Exz);
#print((G,H))
print((bE.shape,G.shape, H.shape))
print((np.linalg.cond(B), np.linalg.cond(bE)))
M = np.linalg.inv(bE);
K = -(B + H@np.linalg.inv(bE)@G);
C = -np.linalg.inv(bE)@G - H@np.linalg.inv(bE);
Z = np.zeros_like(M);
I = np.eye(M.shape[0], M.shape[1]);
OA = np.block([[M, Z],[Z, I]])
OB = np.block(np.block([[C, K],[-I, Z]]))
## these matrices aren't poorly conditioned
print((np.linalg.cond(OA), np.linalg.cond(OB)))
## solve eiegenvalues;
beigenvals, bigW = LA.eig(OB, OA); #W contains eigenmodes of the form (lambda x, x)
## AT THIS POINT, we have still extracted TWO times the number of eigenvalues...
#try rounding...
rounded_beigenvals = np.array([round(i,8) for i in beigenvals])
print(rounded_beigenvals)
#quadrant_sort = [1 if abs(np.real(i))>=0 and np.imag(i)>=0 else 0 for i in rounded_beigenvals];
sorted_eigs, sorted_indices = nonHermitianEigenSorter(rounded_beigenvals)
sorted_indices = np.nonzero(sorted_indices)[0];
#print(quadrant_sort)
# sorted_indices = np.nonzero(quadrant_sort)[0]
print(len(sorted_indices))
#sorted_indices = np.argsort(np.real(rounded_beigenvals))
sorted_eigenmodes = bigW[:, sorted_indices];
#print(sorted_eigenmodes)
#adding real and imaginary parts seems to work...
sorted_eigenvals = beigenvals[sorted_indices]
print(sorted_eigenvals)
W = sorted_eigenmodes[PQ:,:]
eigenvals_wp = (sorted_eigenvals[0:PQ]);
# plt.subplot(121)
# plt.plot(np.real(beigenvals), np.imag(beigenvals), '.', markersize = 20); plt.title('1st');
# plt.subplot(122)
# plt.plot(np.real(beigenvals), np.imag(beigenvals), '.', markersize = 20);
# plt.plot(np.real(eigenvals_wp), (np.imag(eigenvals_wp)), '.r', markersize = 10)
# plt.show();
# ##
Q = np.diag(eigenvals_wp); #eigenvalue problem is for kz, not kz^2
V = np.linalg.inv(bE)@(W @ Q + H @ W);
X = np.diag(np.exp(-k0*np.diag(Q)*d)); #this is poorly conditioned because exponentiation
## pointwise exponentiation vs exponentiating a matrix
## observation: almost everything beyond this point is worse conditioned
k_I = k0**2*(n1**2 - (k_xi/k0)**2); #k_z in reflected region k_I,zi
k_II = k0**2*(n2**2 - (k_xi/k0)**2); #k_z in transmitted region
k_I = k_I.astype('complex'); k_I = np.sqrt(k_I);
k_II = k_II.astype('complex'); k_II = np.sqrt(k_II);
Z_I = np.diag(k_I / (n1**2 * k0 ));
Z_II = np.diag(k_II /(n2**2 * k0));
delta_i0 = np.zeros((len(kx_array),1));
delta_i0[num_ord] = 1;
n_delta_i0 = delta_i0*j*np.cos(theta)/n1;
## design auxiliary variables: SEE derivation in notebooks: RCWA_note.ipynb
# we want to design the computation to avoid operating with X, particularly with inverses
# since X is the worst conditioned thing
print((W.shape, V.shape))
#this appears to be worse and worse conditioned at higher orders...
O = np.block([
[W, W],
[V,-V]
]); #this is much better conditioned than S..
print('condition of O: '+str(np.linalg.cond(O)))
print((np.linalg.cond(W), np.linalg.cond(V)))
# plt.imshow(abs(O))
# plt.show();
f = I;
g = j * Z_II; #all matrices
fg = np.concatenate((f,g),axis = 0)
ab = np.matmul(np.linalg.inv(O),fg);
a = ab[0:PQ,:];
b = ab[PQ:,:];
term = X @ a @ np.linalg.inv(b) @ X;
f = W @ (I + term);
g = V@(-I+term);
T = np.linalg.inv(np.matmul(j*Z_I, f) + g);
T = np.dot(T, (np.dot(j*Z_I, delta_i0) + n_delta_i0));
R = np.dot(f,T)-delta_i0; #shouldn't change
T = np.dot(np.matmul(np.linalg.inv(b),X),T)
## calculate diffraction efficiencies
#I would expect this number to be real...
DE_ri = R*np.conj(R)*np.real(np.expand_dims(k_I,1))/(k0*n1*np.cos(theta));
DE_ti = T*np.conj(T)*np.real(np.expand_dims(k_II,1)/n2**2)/(k0*np.cos(theta)/n1);
print('R(lam)='+str(np.sum(DE_ri))+' T(lam) = '+str(np.sum(DE_ti)))
spectra.append(np.sum(DE_ri)); #spectra_T.append(T);
spectra_T.append(np.sum(DE_ti))
spectra = np.array(spectra);
spectra_T = np.array(spectra_T)
plt.figure();
plt.plot(wavelength_scan, spectra);
plt.plot(wavelength_scan, spectra_T)
plt.plot(wavelength_scan, spectra+spectra_T)
# plt.legend(['reflection', 'transmission'])
# plt.axhline(((3.48-1)/(3.48+1))**2,xmin=0, xmax = max(wavelength_scan))
# plt.axhline(((3.48-1)/(3.48+1)),xmin=0, xmax = max(wavelength_scan), color='r')
#
plt.show() | |
#import libraries
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import seaborn
from matplotlib import pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import classification_report
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, LabelBinarizer
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc
class DataPreProcessor :
"""
A class used to preprocess train and test data.
...
Attributes
----------
numerical_columns : list
a list with the numerical columns names of the dataframe
categorical_columns : list
a list with the categorical columns names of the dataframe
categories : list
a list of arrays especifying the categories of each categorical attribute to one-hot encode categorical attributes
label_encoders : list
a list of LabelEncoders for the categorical attributes
num_scaler : sklearn.preprocessing.StandardScaler
a StandardScaler to scale numerical attributes
onehot_encoder : sklearn.preprocessing.OneHotEncoder
a Encoder to One-Hot encode categorical attributes
Methods
----------
preprocess_train_data(train_data)
Preprocess the train data
preprocess_test_data(test_data)
Preprocess the test data
"""
def __init__(self, categorical_columns, numerical_columns, categories) :
"""
Parameters
----------
categorical_columns : list
a list with the categorical columns names of the dataframe
numerical_columns : list
a list with the numerical columns names of the dataframe
categories : list
a list of arrays especifying the categories of each categorical attribute to one-hot encode categorical attributes
"""
self.numerical_columns = numerical_columns
self.categorical_columns = categorical_columns
self.label_encoders = dict()
self.num_scaler = StandardScaler()
self.onehot_encoder = OneHotEncoder(categories=categories)
def preprocess_train_data(self, train_data) :
"""Preprocess the train data
Parameters
----------
train_data : pandas.core.frame.DataFrame
the train dataframe to be preprocessed
Returns
-------
numpy.ndarray
a matrix with the preprocessed data
"""
if self.categorical_columns is not None :
#one-hot encode categorical attributes
categorical_vars = train_data[self.categorical_columns]
for col in categorical_vars.columns :
categorical_vars[col] = categorical_vars[col].astype('category').cat.codes
scaled_cat = self.onehot_encoder.fit_transform(categorical_vars).toarray()
#standardize numerical attributes
scaled_num = self.num_scaler.fit_transform(train_data[self.numerical_columns])
#return the standardized numerical attributes stacked with the one-hot encoded categorical attributes
return np.column_stack((scaled_num, scaled_cat))
else :
#standardize numerical attributes
return self.num_scaler.fit_transform(train_data[self.numerical_columns])
def preprocess_test_data(self, test_data) :
"""Preprocess the test data
Parameters
----------
test_data : pandas.core.frame.DataFrame
the test dataframe to be preprocessed
Returns
-------
numpy.ndarray
a matrix with the preprocessed data
"""
if self.categorical_columns is not None :
#one-hot encode categorical variables
categorical_vars = test_data[self.categorical_columns]
for col in categorical_vars.columns :
categorical_vars[col] = categorical_vars[col].astype('category').cat.codes
scaled_cat = self.onehot_encoder.transform(categorical_vars).toarray()
#standardize numerical attributes
scaled_num = self.num_scaler.transform(test_data[self.numerical_columns])
#return the standardized numerical attributes stacked with the one-hot encoded categorical attributes
return np.column_stack((scaled_num, scaled_cat))
else :
return self.num_scaler.transform(test_data[self.numerical_columns])
def train_evaluate_model(model_name, x_train, y_train, x_test, y_test) :
"""Train and evaluate a classifier model
Parameters
----------
model_name : string
the name of the model
x_train : numpy.ndarray
training data
y_train : numpy.ndarray
training labels
x_test : numpy.ndarray
testing data
y_test : numpy.ndarray
testing labels
Returns
-------
pandas.core.frame.DataFrame
a dataframe with the obtained classification metrics
"""
#initialize the model
model = initialize_model(model_name)
#train the model
model.fit(x_train, y_train)
#make predictions
predictions = model.predict(x_test)
#compute metrics
model_results = pd.DataFrame([[model_name, np.round(accuracy_score(y_test, predictions),4),
np.round(precision_score(y_test, predictions, average='weighted'),4),
np.round(recall_score(y_test, predictions, average='weighted'),4),
np.round(f1_score(y_test, predictions, average='weighted'),4)]],
columns = ['model','accuracy','precision','recall','f1'])
#return metric values
return model_results
def initialize_model(model_name) :
"""Get the desired classifier model initialized.
Parameters
----------
model_name : string
the name of the model
Returns
-------
classifier_model
the especified classifier model initialized
"""
#define a dict that initialize each classifier model
switcher = {'RF' : RandomForestClassifier(n_estimators=100, random_state=9, verbose=True, n_jobs=3),
'KNN' : KNeighborsClassifier(n_neighbors=10, n_jobs=3),
'DT' : DecisionTreeClassifier(random_state=9),
'SVM' : SVC(C=100, max_iter=300, kernel='linear', probability=True, random_state=9, verbose=1),
'MLP' : MLPClassifier(hidden_layer_sizes=(128,64,32), max_iter=300, random_state=9, verbose=1)}
#return the desired classifier model
return switcher.get(model_name, "Invalid model name") | |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
import pathlib
import os
import zipfile
from multiprocessing import Pool
import datetime
from dateutil.relativedelta import relativedelta
def df_from_csv_with_geo(file_path, nrows=None):
"""Extract useful columns from birth record csv
Takes a csv path. CSV must be before 2005 to include geo data.
"""
# get year of CSV
year = int(str(file_path).split("/")[-1][4:8])
# print(year)
# no geo data before 1982 or after 2004
if year > 2004 or year < 1982:
pass
else:
# load FIPS code data
df_fips = pd.read_csv(
file_path.parent.parent / "external" / "all-geocodes-v2017.csv", dtype=str
)
# get the fips codes for the states only
df_fips = df_fips[
(df_fips["County Code (FIPS)"] == "000")
& (df_fips["County Subdivision Code (FIPS)"] == "00000")
& (df_fips["Place Code (FIPS)"] == "00000")
& (df_fips["Consolidtated City Code (FIPS)"] == "00000")
][
[
"State Code (FIPS)",
"Area Name (including legal/statistical area description)",
]
]
# rename columns in df
df_fips.columns = ["state_fips", "state_name_mr"]
# require differnt column names depending on year
# columns for 2003+
col_load_1 = [
"dob_yy",
"dob_mm",
"dob_wk",
"mrstate",
"mrcntyfips",
"mrcityfips",
"apgar5",
]
# columns for 1989-2002
col_load_2 = [
"biryr",
"birmon",
"weekday",
"stresfip",
"cntyrfip",
"cityres",
"fmaps",
]
rename_col2 = [
"dob_yy",
"dob_mm",
"dob_wk",
"mrstate",
"mrcntyfips",
"mrcityfips",
"apgar5",
]
# columns for 1982 through 1988
col_load_3 = [
"datayear",
"birmon",
"birday",
"stresfip",
"cntyrfip",
"cityres",
"fmaps",
]
rename_col3 = [
"dob_yy",
"dob_mm",
"dob_day",
"mrstate",
"mrcntyfips",
"mrcityfips",
"apgar5",
]
# create dictionary to rename older csvs
col_rename_dict2 = dict(zip(col_load_2, rename_col2))
col_rename_dict3 = dict(zip(col_load_3, rename_col3))
# if the CSVs are of newer format
if year >= 2003:
# load abbreviation csv so we can rename AK to Alaska, etc.
df_abbr = pd.read_csv(
file_path.parent.parent / "external" / "state_abbreviations.csv",
header=None,
names=["state", "abbr"],
)
# load only select columns, and set dtype for columns
df = pd.read_csv(file_path, nrows=nrows, usecols=col_load_1, dtype=str)
# get the full state name and append them onto the df
df = (
pd.merge(
df,
df_abbr,
left_on="mrstate",
right_on="abbr",
how="inner",
copy=False,
)
.drop(["abbr"], axis=1)
.drop(["mrstate"], axis=1)
)
df = df.rename(columns={"state": "state_name_mr"})
# get state FIPS code and append
df = pd.merge(
df,
df_fips,
left_on="state_name_mr",
right_on="state_name_mr",
how="inner",
copy=False,
)
df = df.rename(columns={"state_fips": "mrstatefips"})
# drop any rows with NaN's
df = df.dropna()
# if the CSVs are of older format
else:
if year >= 1989:
col_load, col_rename_dict = col_load_2, col_rename_dict2
else:
col_load, col_rename_dict = col_load_3, col_rename_dict3
# load only select columns from the birth CSV
df = pd.read_csv(
file_path, nrows=nrows, usecols=col_load, dtype=str
).rename(columns=col_rename_dict)
# rename 'mrstate' column
df = df.rename(columns={"mrstate": "mrstatefips"})
# merge the df_stat_fips to get the full state name
df = pd.merge(
df,
df_fips,
left_on="mrstatefips",
right_on="state_fips",
how="inner",
copy=False,
).drop(["state_fips"], axis=1)
# years before 1989 only show a single digit (i.e. 2 for 1982)
if year < 1989:
df = df.drop(columns=["dob_yy"])
df["dob_yy"] = np.array([year] * df.shape[0])
# drop any rows with NaN's
df = df.dropna()
# return the dataframe, and order the columns in a fixed manner
print(f'{year} processing complete')
return df[
[
"dob_yy",
"dob_mm",
"mrcntyfips",
"mrcityfips",
"state_name_mr",
"mrstatefips",
"apgar5",
]
].astype({"dob_mm": int, "dob_yy": int, "apgar5": int})
def df_from_csv_no_geo(file_path, nrows=None):
"""Extract useful columns from birth record csv
Takes a csv path. Produces a dataframe without geo data.
Good for all years of data collection.
"""
# get year of CSV
year = int(str(file_path).split("/")[-1][4:8])
# print(year)
# require differnt column names depending on year
# columns for 2003+
col_load_1 = ["dob_yy", "dob_mm"]
# columns for 1989-2002
col_load_2 = ["biryr", "birmon"]
rename_col2 = ["dob_yy", "dob_mm"]
# columns for 1982 through 1988
col_load_3 = ["datayear", "birmon"]
rename_col3 = ["dob_yy", "dob_mm"]
# create dictionary to rename older csvs
col_rename_dict2 = dict(zip(col_load_2, rename_col2))
col_rename_dict3 = dict(zip(col_load_3, rename_col3))
# create dictionary to rename older CSVs from 1991 to 2002
col_rename_dict = dict(zip(col_load_2, col_load_1))
# if the CSVs are of newer format
if year >= 2003:
# 2019 and 2020 columns are capitalized
if year >= 2019:
col_load_1 = [col_name.upper() for col_name in col_load_1]
# load only select columns, and set dtype for columns
df = pd.read_csv(file_path, nrows=nrows, usecols=col_load_1, dtype=str)
df.columns = df.columns.str.lower()
# drop any rows with NaN's
df = df.dropna()
df["births"] = np.ones(df.shape[0])
df = (
df.groupby(["dob_yy", "dob_mm"], as_index=False)
.count()
.sort_values(by=["dob_yy", "dob_mm"])
)
elif year > 1988 and year < 2004:
# load only select columns from the birth CSV
df = pd.read_csv(file_path, nrows=nrows, usecols=col_load_2, dtype=str).rename(
columns=col_rename_dict2
)
# drop any rows with NaN's
df = df.dropna()
df["births"] = np.ones(df.shape[0])
df = (
df.groupby(["dob_yy", "dob_mm"], as_index=False)
.count()
.sort_values(by=["dob_yy", "dob_mm"])
)
# if the CSVs are of older format
else:
# load only select columns from the birth CSV
df = pd.read_csv(file_path, nrows=nrows, usecols=col_load_3, dtype=str).rename(
columns=col_rename_dict3
)
# years before 1989 only show a single digit (i.e. 2 for 1982)
df = df.drop(columns=["dob_yy"])
df["dob_yy"] = np.array([year] * df.shape[0])
# drop any rows with NaN's
df = df.dropna()
df["births"] = np.ones(df.shape[0])
df = (
df.groupby(["dob_yy", "dob_mm"], as_index=False)
.count()
.sort_values(by=["dob_yy", "dob_mm"])
)
# return the dataframe, and order the columns in a fixed manner
print(f'{year} processing complete')
return df[["dob_yy", "dob_mm", "births"]].astype(
{"dob_mm": int, "dob_yy": int, "births": int}
)
def df_from_csv_no_geo_extra(file_path, nrows=None):
"""Extract useful columns from birth record csv
Takes a csv path. Produces a dataframe without geo data.
Includes extra columns, such as apgar5, from 1978 onwards.
"""
# get year of CSV
year = int(str(file_path).split("/")[-1][4:8])
# print(year)
# require differnt column names depending on year
# columns for 2003+
col_load_1 = ["dob_yy", "dob_mm", "apgar5"]
# columns for 1989-2002
col_load_2 = ["biryr", "birmon", "fmaps"]
rename_col2 = ["dob_yy", "dob_mm", "apgar5"]
# columns for 1982 through 1988
col_load_3 = ["datayear", "birmon", "fmaps"]
rename_col3 = ["dob_yy", "dob_mm", "apgar5"]
# create dictionary to rename older csvs
col_rename_dict2 = dict(zip(col_load_2, rename_col2))
col_rename_dict3 = dict(zip(col_load_3, rename_col3))
# create dictionary to rename older CSVs from 1991 to 2002
col_rename_dict = dict(zip(col_load_2, col_load_1))
# if the CSVs are of newer format
if year >= 2003:
# 2019 and 2020 columns are capitalized
if year >= 2019:
col_load_1 = [col_name.upper() for col_name in col_load_1]
# load only select columns, and set dtype for columns
df = pd.read_csv(file_path, nrows=nrows, usecols=col_load_1, dtype=str)
df.columns = df.columns.str.lower()
# drop any rows with NaN's
df = df.dropna()
df["births"] = np.ones(df.shape[0])
df = (
df.groupby(["dob_yy", "dob_mm", "apgar5"], as_index=False)
.count()
.sort_values(by=["dob_yy", "dob_mm", "apgar5"])
)
elif year > 1988 and year < 2004:
# load only select columns from the birth CSV
df = pd.read_csv(file_path, nrows=nrows, usecols=col_load_2, dtype=str).rename(
columns=col_rename_dict2
)
# drop any rows with NaN's
df = df.dropna()
df["births"] = np.ones(df.shape[0])
df = (
df.groupby(["dob_yy", "dob_mm", "apgar5"], as_index=False)
.count()
.sort_values(by=["dob_yy", "dob_mm", "apgar5"])
)
# if the CSVs are of older format
elif year > 1977 and year <= 1988:
# load only select columns from the birth CSV
df = pd.read_csv(file_path, nrows=nrows, usecols=col_load_3, dtype=str).rename(
columns=col_rename_dict3
)
# years before 1989 only show a single digit (i.e. 2 for 1982)
df = df.drop(columns=["dob_yy"])
df["dob_yy"] = np.array([year] * df.shape[0])
# drop any rows with NaN's
df = df.dropna()
df["births"] = np.ones(df.shape[0])
df = (
df.groupby(["dob_yy", "dob_mm", "apgar5"], as_index=False)
.count()
.sort_values(by=["dob_yy", "dob_mm", "apgar5"])
)
# if the csvs are older than 1978 they do not have relevant cols
# like apgar, and thus we skip them
else:
df = pd.DataFrame(columns=["dob_yy", "dob_mm", "apgar5", "births"])
# return the dataframe, and order the columns in a fixed manner
print(f'{year} processing complete')
return df[["dob_yy", "dob_mm", "apgar5", "births"]].astype(
{"dob_mm": int, "dob_yy": int, "apgar5": int, "births": int}
)
###############################################################################
# Functions for processing the collated data files and returning a dataframe
###############################################################################
def get_month_string(cols):
month_index = int(cols[0])
# from https://stackoverflow.com/a/28446432/9214620
return datetime.date(1900, month_index, 1).strftime("%B")
def get_conception_month(cols):
month_index = int(cols[0])
concept_date = datetime.date(1900, month_index, 1) + relativedelta(months=-9)
return concept_date.strftime("%B")
def get_conception_year(cols):
month_index = int(cols[0])
year = int(cols[1])
concept_date = datetime.date(year, month_index, 1) + relativedelta(months=-9)
return int(concept_date.strftime("%Y"))
def get_conception_month_index(cols):
month_index = int(cols[0])
concept_date = datetime.date(1900, month_index, 1) + relativedelta(months=-9)
return int(concept_date.strftime("%m"))
def df_birth_no_geo_prep(df):
"""Prepare the birth data for analysis.
Args:
df (pd.DataFrame): The raw birth data. Requires columns: dob_yy, dob_mm, births.
filter_year (int): The year to filter the data to.
"""
# load csv is like this...
# df = pd.read_csv(data_file, dtype=int).sort_values(by=['dob_yy', 'dob_mm', 'dob_wk'])
# add conception month columns and birth month
df["conc_month"] = df[["dob_mm"]].apply(get_conception_month, axis=1)
df["birth_month"] = df[["dob_mm"]].apply(get_month_string, axis=1)
df["conc_mm"] = df[["dob_mm"]].apply(get_conception_month_index, axis=1)
df["conc_yy"] = df[["dob_mm", "dob_yy"]].apply(get_conception_year, axis=1)
df = df.sort_values(by=["conc_yy", "conc_mm"])
return df[
[
"conc_yy",
"conc_month",
"dob_yy",
"birth_month",
"conc_mm",
"dob_mm",
"births",
]
]
def df_birth_with_geo_prep(df, df_abbr):
df = pd.merge(df, df_abbr,
left_on='state_name_mr',
right_on='state',
how='inner', copy=False).drop(['state'], axis=1)
df = df.groupby(
['dob_yy', 'dob_mm', 'state_name_mr','mrstatefips','abbr'],
as_index=False).sum().sort_values(by=['dob_yy',
'dob_mm',
'state_name_mr']).drop(columns=['mrcntyfips'])
# add conception month columns and birth month
df['conc_month'] = df[['dob_mm']].apply(get_conception_month, axis=1)
df['birth_month'] = df[['dob_mm']].apply(get_month_string, axis=1)
df['conc_mm'] = df[['dob_mm']].apply(get_conception_month_index, axis=1)
df['conc_yy'] = df[['dob_mm', 'dob_yy']].apply(get_conception_year, axis=1)
return df[['dob_yy', 'dob_mm', 'state_name_mr',
'mrstatefips', 'abbr', 'conc_month',
'birth_month', 'conc_mm', 'conc_yy', 'births']]
def filter_by_year(df, filter_cat="conc_yy", year=1990):
"""Filter df by year, either with conception year ('conc_yy')
or birth year ('dob_yy')
"""
df = (
df[df[filter_cat] == year]
.groupby(
[
"conc_yy",
"conc_month",
"dob_yy",
"birth_month",
"conc_mm",
"dob_mm",
],
as_index=False,
)
.sum()
)
if filter_cat == "conc_yy":
df = df.sort_values(by=["conc_mm"]).reset_index(drop=True)
return df
else:
df = df.sort_values(by=["dob_mm"]).reset_index(drop=True)
return df
def percentage_birts_by_month(df, years_greater_than=1980):
"""Take the prepared df and calculage the average births (avg_births)
and percent above average (percent_above_average) for each month.
Args:
df (pd.DataFrame): The prepared birth data (created using df_birth_no_geo_prep).
Requires columns: "conc_yy", "conc_month", "dob_yy", "birth_month",
"conc_mm", "dob_mm", "births",
years_greater_than (int): Only include data from above this year.
"""
# group by month
df = df.groupby([
'conc_yy', 'conc_month','dob_yy',
'birth_month','conc_mm','dob_mm',], as_index=False).sum()
df = df.sort_values(by=['dob_yy','dob_mm']).reset_index(drop=True)
# add 12 month rolling average
df['avg_births'] = df['births'].rolling(window=12).mean()
df['percent_above_avg'] = (df['births'] - df['avg_births'])/df['avg_births']*100
# only select dates > years_greater_than
df = df[df['dob_yy'] > years_greater_than]
return df | |
from dataclasses import dataclass
import gc
from torch import optim
import numpy as np
import random
import os
from src.models import *
# To eliminate randomness
def seed_everything(seed: int = 77):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.enabled = False
torch.backends.cudnn.deterministic = True
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:2"
@dataclass
class GeneticAlgorithmConfig:
ell: int = 20
k: int = 5
mutation_rate: float = 0.1
num_epochs: int = 10
class Individual:
def __init__(self):
self.name = '#' + ''.join(map(str, np.random.randint(0,9, size=7).tolist()))
self.num_epochs_base = np.random.choice(np.arange(60, 300))
self.hidden_size = np.random.choice([2 ** power for power in range(2, 10)])
self.num_layers = np.random.choice(np.arange(2, 15))
self.learning_rate = round(np.random.random(), 2)
self.loss = np.inf
self.fitness = None
def __repr__(self):
"""
For convenience only.
"""
string = 'Chromosome ' + self.name + f' with the loss of {self.loss:.4}' + f' and {self.num_epochs_base} epochs:\n'
string = string + f'learning_rate = {self.learning_rate:.4}, '
string = string + f'num_layers = {self.num_layers}, '+ f'hidden_size = {self.hidden_size}'
return string
@dataclass
class Population:
def __init__(self, config: GeneticAlgorithmConfig):
self.individuals = [Individual() for _ in range(config.ell)]
self.top_k_individuals = None
self.best_indivdual = None
class GeneticAlgorithm:
def __init__(self, optimized_block, criterion,
population: Population, config: GeneticAlgorithmConfig,
device, verbose=True, seed: int = 77):
self.optimized_block = optimized_block
self.criterion = criterion
self.population = population
self.config = config
self.device = device
self.verbose = verbose
self.seed = seed
self.val_loss_history = []
def fit(self, X_val, y_val):
for epoch in range(self.config.num_epochs):
self.evaluate(X_val, y_val, self.population)
self.select(self.population)
self.val_loss_history.append(self.population.best_indivdual.loss)
offsprings = []
for weak_individual in self.population.individuals[self.config.k:]:
strong_individual = np.random.choice(self.population.top_k_individuals)
offsprings.append(self.crossover(strong_individual, weak_individual))
new_population = self.population.top_k_individuals + offsprings
mutated_population = []
for individual in new_population[1:]:
mutated_population.append(self.mutate(individual))
self.population.individuals = [self.population.best_indivdual] + mutated_population
if self.verbose:
clear_output(wait=True)
print(f"Epoch: {epoch + 1}")
plot_metric(self.criterion.__class__.__name__,
val_metric=self.val_loss_history)
print(f'{self.population.best_indivdual}')
def evaluate(self, X_val, y_val, population: Population):
losses = []
for individual in population.individuals:
gc.collect()
torch.cuda.empty_cache()
if self.optimized_block == 'LSTM':
seed_everything(self.seed)
model = LSTM(input_size=X_val.shape[2],
hidden_size=int(individual.hidden_size),
num_layers=individual.num_layers).to(self.device)
elif self.optimized_block == 'GRU':
seed_everything(self.seed)
model = GRU(input_size=X_val.shape[2],
hidden_size=int(individual.hidden_size),
num_layers=individual.num_layers).to(self.device)
else:
raise ValueError('Only LSTM and GRU blocks are available for optimization.')
optimizer = optim.Adam(model.parameters(), lr=individual.learning_rate)
seed_everything(self.seed)
train(model, self.criterion, optimizer, device, X_val, y_val, individual.num_epochs_base,
verbose=False, return_loss_history=False, compute_test_loss=False)
individual.loss = predict(model, X_val, y_val, self.criterion, device)
losses.append(individual.loss)
del model
for individual in population.individuals:
individual.fitness = self.normalize(individual.loss, min(losses), max(losses))
def normalize(self, z, loss_best, loss_worst) -> float:
return (z - loss_worst) / (loss_best - loss_worst)
def select(self, population: Population):
ranked_population = sorted(population.individuals,
key=lambda individual: individual.fitness,
reverse=True)
population.best_indivdual = ranked_population[0]
population.top_k_individuals = ranked_population[:self.config.k]
def crossover(self, strong_parent: Individual, weak_parent: Individual) -> Individual:
offspring = Individual()
prob = strong_parent.fitness / (strong_parent.fitness + weak_parent.fitness)
if np.random.random() > prob:
offspring.hidden_size = weak_parent.hidden_size
else:
offspring.hidden_size = strong_parent.hidden_size
if np.random.random() > prob:
offspring.num_layers = weak_parent.num_layers
else:
offspring.num_layers = strong_parent.num_layers
if np.random.random() > prob:
offspring.learning_rate = weak_parent.learning_rate
else:
offspring.learning_rate = strong_parent.learning_rate
if np.random.random() > prob:
offspring.num_epochs_base = weak_parent.num_epochs_base
else:
offspring.num_epochs_base = strong_parent.num_epochs_base
return offspring
def mutate(self, individual: Individual) -> Individual:
if np.random.random() < self.config.mutation_rate:
individual.hidden_size = 2 ** (np.log2(individual.hidden_size) + np.random.randint(-1, 2))
individual.hidden_size = int(np.array(individual.hidden_size).clip(2 ** 3, 2 ** 9))
if np.random.random() < self.config.mutation_rate:
individual.num_layers += np.random.randint(-2, 3)
individual.num_layers = np.array(individual.num_layers).clip(2, 14)
if np.random.random() < self.config.mutation_rate:
individual.learning_rate += round(np.random.uniform(-0.1, 0.1), 2)
individual.learning_rate = np.array(individual.learning_rate).clip(0.001, 1)
if np.random.random() < self.config.mutation_rate:
individual.num_epochs_base += np.random.randint(-30, 30)
individual.num_epochs_base = np.array(individual.num_epochs_base).clip(10, 300)
return individual | |
from __future__ import print_function
import os
import glob # may cause segmentation fault in (C+Python) environment
import numpy as np
import cv2
import csv
import faiss
import pandas as pd
import utm
### For dataset
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import get_streetview
from vps import vps
import unit_test_vps_config as config
import sys;sys.path.insert(0,'/home/ccsmm/workdir/ccsmmutils');import torch_img_utils as tim
from ipdb import set_trace as bp
postfix_dict = {
"ascen_fix":"ascen_fix.csv",
"images":"images.avi",
"imu_data":"imu_data.csv",
"intersect":"intersect.csv",
"novatel_fix":"novatel_fix.csv"}
def get_input_file_list(testset_dir, ext="*.avi", out_postfix='vps.csv'):
flist = glob.glob(os.path.join(testset_dir, ext))
flist.sort()
images = []
ascen_fix = []
outputs = []
for i, fn in enumerate(flist):
prefix = os.path.basename(fn)[:14] # "191115_151140_" from 191115_151140_images.avi
#images.append(os.path.join(testset_dir, prefix+postfix_dict["images"]))
images.append(fn)
ascen_fix.append(os.path.join(testset_dir, prefix+postfix_dict["ascen_fix"]))
outputs.append(os.path.join(testset_dir, prefix+out_postfix))
assert len(images)==len(ascen_fix), "Number of files are mis-matched."
return images, ascen_fix, outputs
class dgDataset(Dataset):
def __init__(self, avi, ascen):
self.cap = cv2.VideoCapture(avi)
self.video_fps = self.cap.get(cv2.CAP_PROP_FPS)
self.video_frame_length = self.cap.get(cv2.CAP_PROP_FRAME_COUNT)
self.timestamp, self.lat, self.lon, self.alt = self.parse_ascen(ascen)
self.starttime = self.timestamp[0]
self.endtime = self.timestamp[-1]
self.time_length = self.endtime - self.starttime # ascen's total seconds
self.video_scale = self.video_fps * self.time_length / self.video_frame_length
print("=====> Start reading {} and {}.".format(avi, ascen))
def parse_ascen(self, ascen):
'''
ipdb> data.head()
time .header.seq .header.stamp.secs .header.stamp.nsecs .header.frame_id ... .latitude .longitude .altitude .position_covariance .position_covariance_type
0 2019/11/15/11:40:06.994837 24 1573785606 994514942 gps ... 36.382057 127.367646 90.2 (18.3184, 0.0, 0.0, 0.0, 18.3184, 0.0, 0.0, 0.... 1
1 2019/11/15/11:40:07.993330 25 1573785607 993129014 gps ... 36.382056 127.367646 90.5 (18.3184, 0.0, 0.0, 0.0, 18.3184, 0.0, 0.0, 0.... 1
2 2019/11/15/11:40:08.991022 26 1573785608 990794897 gps ... 36.382057 127.367646 90.4 (24.2064, 0.0, 0.0, 0.0, 24.2064, 0.0, 0.0, 0.... 1
'''
data = pd.read_csv(ascen, sep=",")
lat = np.array([ float(i) for i in data[".latitude"]])
lon = np.array([ float(i) for i in data[".longitude"]])
alt = np.array([ float(i) for i in data[".altitude"]])
timestamp = np.array([float(i) for i in data[".header.stamp.secs"]])
return timestamp, lat, lon, alt
def get_image_timestamp(self, fnumber):
timestamp = self.starttime + fnumber * self.video_scale / self.video_fps
return timestamp
def get_latlon_from_timestamp(self, q_timestamp):
best_similar_idx = np.argmin(np.abs(self.timestamp - q_timestamp))
return self.lat[best_similar_idx], self.lon[best_similar_idx], best_similar_idx
def __len__(self):
return int(self.video_frame_length )
def release(self):
self.cap.release()
def __getitem__(self, idx):
fnumber = idx
ret, qimg = self.cap.read()
image_timestamp = self.get_image_timestamp(fnumber)
lat, lon, tidx = self.get_latlon_from_timestamp(image_timestamp)
return [qimg, fnumber, image_timestamp, lat, lon]
def get_utm_err(lat1, lon1, lat2, lon2):
if np.isnan(lat1) or np.isnan(lat1) or np.isnan(lon1) or np.isnan(lon2):
return -1
if lat1 < 36 or lon1 < 127 or lat2 < 36 or lon2 < 127:
return -1
if lat1 > 38 or lon1 > 128 or lat2 > 38 or lon2 > 128:
return -1
p1 = np.array(utm.from_latlon(lat1, lon1)[0:2])
p2 = np.array(utm.from_latlon(lat2, lon2)[0:2])
err_l2norm = np.linalg.norm(p1-p2) # l2norm = np.sqrt(np.sum((p1-p2)**2))
return err_l2norm
def do_vps(avi, ascen, output_filename, begin_frame=1000, server_ip="129.254.81.204"): # file names
print("=====> Start reading {}.".format(avi))
dataset = dgDataset(avi, ascen)
dataloader = DataLoader(dataset, batch_size=1, shuffle=False)
fout = open(output_filename, 'w', buffering=1)
string='fnumber,timestamp,svid,svidx,pred_lat,pred_lon,distance,angle,confidence,curr_lat,curr_lon,utm_err'
fout.write(string+'\n')
print(string)
for idx, [qimg, fnumber, timestamp, lat, lon] in enumerate(dataloader):
qimg = qimg.numpy()[0]
fnumber = fnumber.numpy()[0]
timestamp = timestamp.numpy()[0]
curr_lat = lat.numpy()[0]
curr_lon = lon.numpy()[0]
try:
[h, w, c] = qimg.shape
if (h < 480) or (w < 640) or c != 3:
print("Invalid shape of query image :", h,w,c)
continue
except:
print("Broken query image :", fname)
continue
qimg = cv2.resize(qimg,(640,480))
#vps_IDandConf = mod_vps.apply(qimg, 3, 36.381438, 127.378867, 0.8, 1.0, streetview_server_ipaddr) # k=5 for knn
if idx < begin_frame: # Skip beginning videos
print("Skip {}\r".format(idx), end='')
continue
#cv2.imshow('QImg', qimg)
#cv2.waitKey(1)
vps_IDandConf = mod_vps.apply(image=qimg, K=1, gps_lat=lat, gps_lon=lon, gps_accuracy=0.8, timestamp=timestamp, ipaddr=server_ip) # k=5 for knn
svid = vps_IDandConf[0][0] # street view id from map server
svidx = "f" # cubic == "f" || cubic == "b" || cubic == "l" || cubic == "r" || cubic == "u" || cubic == "d")
confidence = vps_IDandConf[1][0] # 0 ~ 1, default 1
distance = -1 # distance in the ground from camera to predicted point. default -1, meter
angle = np.pi # Relative angle from camera to predicted point(CCW : +). default is pi, radian
_, pred_lat, pred_lon = get_streetview.GetStreetView_fromID(svid, roi_radius=1, ipaddr=server_ip)
utm_err = get_utm_err(curr_lat, curr_lon, pred_lat, pred_lon)
string = '{0:04d},{1:10.3f},{2:11d},{3:},{4:2.8f},{5:3.7f},{6:3d},{7:1.3f},{8:1.3f},{9:2.8f},{10:3.7f},{11:3.1f}'.format(
fnumber,timestamp,svid,svidx,pred_lat,pred_lon,distance,angle,confidence,curr_lat,curr_lon,utm_err)
fout.write(string+'\n')
print(string)
# print('{0:04d},{1:10.0f},{2:11d},{3:},{4:2.8f},{5:3.7f},{6:3d},{7:1.3f},{8:1.3f},{9:2.8f},{10:3.7f},{11:3.1f}'.format(fnumber,timestamp,svid,svidx,pred_lat,pred_lon,distance,angle,confidence,curr_lat,curr_lon,utm_err))
if False:
## Display Result
qImgs = mod_vps.get_qImgs() # [10,3,480,640]
dbImgs = mod_vps.get_dbImgs() # [10,3,480,640]
qdbImgs = torch.cat((qImgs,dbImgs),-1) # [10,3,480,1280]
fout.close()
dataset.release()
#cv2.destroyAllWindows()
if __name__ == "__main__":
## Image server address
server_ip = config.ip
region = config.dataset_region
## Set the GPU number (which gpu will you use)
gpu_num = config.which_gpu
## In/out directory and file information
testset_dir = config.indir
in_ext = config.input_reference_ext # "*.avi"
out_postfix = config.out_postfix # "vps_lr.csv"
date_idx = config.date_idx
## Skip frame for invalid video at the very begenning.
begin_skip_frame = config.begin_skip_frame
images, ascen_fix, outputs = get_input_file_list(testset_dir, ext=in_ext, out_postfix=out_postfix)
mod_vps = vps(gpu_num, region)
mod_vps.initialize()
avi_filename = images[date_idx]
ascen_filename = ascen_fix[date_idx]
output_filename = outputs[date_idx]
do_vps(avi_filename, ascen_filename, output_filename, begin_skip_frame, server_ip) # avi and novatel are filenames
#for i, [avi, ascen] in enumerate(zip(images, ascen_fix)):
#do_vps(avi_filename, ascen_filename, output_filename, 2500, server_ip) # avi and novatel are filenames | |
from numpy.testing import assert_almost_equal, assert_raises
import numpy as np
from id3.data import load_data
import uuid
X = np.arange(20).reshape(10, 2)
y = np.arange(10).reshape(10, )
def test_load_data():
assert_raises(IOError, load_data.load_data, str(uuid.uuid4()))
X_, y_, _ = load_data.load_data("test.csv", nominal=False)
assert_almost_equal(X, X_)
assert_almost_equal(y, y_) | |
import numpy as np
import time
from datetime import timedelta
def sum_1to1000():
ret = 0
for i in range(1, 1001):
ret += i
return ret
def sum_1to1000_nparray():
a = np.ones(1000)
b = np.arange(1,1001)
return int(a.dot(b))
pass
if __name__ == "__main__":
start = time.time()
print(sum_1to1000())
elapsed = time.time() - start
print ("sum_1to1000(): {} ".format(str(timedelta(seconds=elapsed))))
start = time.time()
print(sum_1to1000_nparray())
elapsed = time.time() - start
print ("sum_1to1000_nparray(): {} ".format(str(timedelta(seconds=elapsed))))
pass | |
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import numpy as np
from svm_model import SVM_Model
from svm_model import SVM_Model
from sklearn.datasets import load_iris
def svm_test():
# input = np.array([[1,1],[1,2],[0.5,0.5],[3,3],[1.7,2],[2.5,3.0]])
# output = np.array([1, -1, 1, 1, -1, -1])
# print(np.shape(input[0]),input[0])
iris_dataset = load_iris()
# print(np.shape(iris_dataset["data"][]))
input = np.array(iris_dataset['data'][:,:2][:100])
output = np.array(iris_dataset['target'][:100])
output[output != 1] = -1
# print(input[:, :2], output)
print(np.shape(input),np.shape(output))
inputmean = np.mean(input,axis=0)
inputstd = np.std(input,axis=0)
input = (input - inputmean)/inputstd
model = SVM_Model(input,output,1e-1,0.1,1e-5,'rbf',100)
model.training()
model.plot_classification()
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print_hi('PyCharm')
svm_test()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/ | |
import unittest
import shapely
import numpy as np
from mlx.od.nms import compute_nms, compute_iou
class TestNMS(unittest.TestCase):
def test_iou(self):
geom1 = shapely.geometry.box(0, 0, 4, 4)
geom2 = shapely.geometry.box(2, 2, 6, 6)
iou = compute_iou(geom1, geom2)
self.assertEqual(iou, 4 / (32 - 4))
def test_nms(self):
boxes = np.array([
[0, 0, 10, 10],
[1, 1, 11, 11],
[5, 5, 10, 10],
[1, 1, 11, 11]
])
labels = np.array([1, 1, 1, 2])
scores = np.array([0.8, 0.9, 0.9, 0.9])
good_inds = compute_nms(boxes, labels, scores, iou_thresh=0.5)
self.assertListEqual(good_inds, [1, 2, 3])
scores = np.array([0.9, 0.8, 0.9, 0.9])
good_inds = compute_nms(boxes, labels, scores, iou_thresh=0.5)
self.assertListEqual(good_inds, [0, 2, 3])
scores = np.array([0.9, 0.8, 0.9, 0.9])
good_inds = compute_nms(boxes, labels, scores, iou_thresh=0.9)
self.assertListEqual(good_inds, [0, 1, 2, 3])
if __name__ == '__main__':
unittest.main() | |
from sympy import *
from tait_bryan_R_utils import *
x_t, y_t, z_t = symbols('x_t y_t z_t')
px, py, pz = symbols('px py pz')
om, fi, ka = symbols('om fi ka')
pxc, pyc, pzc = symbols('pxc pyc pzc')
omc, fic, kac = symbols('omc fic kac')
om_mirror = symbols('om_mirror')
ray_dir_x, ray_dir_y, ray_dir_z, ray_length = symbols('ray_dir_x ray_dir_y ray_dir_z ray_length')
plane_a, plane_b, plane_c, plane_d = symbols('plane_a plane_b plane_c plane_d')
position_symbols = [px, py, pz]
orientation_symbols = [om, fi, ka]
position_symbols_cal = [pyc, pzc]
orientation_symbols_cal = [fic, kac]
plane_symbols = [plane_a, plane_b, plane_c, plane_d]
all_symbols = position_symbols + orientation_symbols + plane_symbols + position_symbols_cal + orientation_symbols_cal
mcal=matrix44FromTaitBryan(pxc, pyc, pzc, omc, fic, kac)
m_rot_mirror=mcal * matrix44FromTaitBryan(0, 0, 0, om_mirror, 0, 0)
plane = Matrix([[plane_a, plane_b, plane_c, plane_d]])
R_cw=m_rot_mirror[:-1,:-1].transpose()
T_wc=Matrix([0, 0, 0]).vec()
T_cw=-R_cw*T_wc
RT_cw=Matrix.hstack(R_cw, T_cw)
m_rot_mirror_inv=Matrix.vstack(RT_cw, Matrix([[0,0,0,1]]))
plane_t = plane * m_rot_mirror_inv
plane_a = plane_t[0]
plane_b = plane_t[1]
plane_c = plane_t[2]
plane_d = plane_t[3]
a = plane_a * ray_dir_x + plane_b * ray_dir_y + plane_c * ray_dir_z
intersection_x = - ray_dir_x * (plane_d/a)
intersection_y = - ray_dir_y * (plane_d/a)
intersection_z = - ray_dir_z * (plane_d/a)
n=Matrix([plane_a, plane_b, plane_c]).vec()
d=Matrix([ray_dir_x, ray_dir_y, ray_dir_z]).vec()
rd=2*d.dot(n)*n-d
ll = ray_length - sqrt(intersection_x * intersection_x + intersection_y * intersection_y + intersection_z * intersection_z)
x_s=-(intersection_x + rd[0] * ll)
y_s=-(intersection_y + rd[1] * ll)
z_s=-(intersection_y + rd[2] * ll)
point_source = Matrix([x_s, y_s, z_s, 1]).vec()
point_target = Matrix([x_t, y_t, z_t]).vec()
transformed_point_source = (matrix44FromTaitBryan(px, py, pz, om, fi, ka) * point_source)[:-1,:]
target_value = Matrix([0,0,0]).vec()
model_function = transformed_point_source-point_target
delta = target_value - model_function
delta_jacobian=delta.jacobian(all_symbols)
print(delta)
print(delta_jacobian)
print(point_source)
with open("point_to_point_source_to_target_rotated_mirror_tait_bryan_wc_jacobian.h",'w') as f_cpp:
f_cpp.write("inline void transform_point_rotated_mirror_tait_bryan_wc(double &x, double &y, double &z, double &px, double &py, double &pz, double &om, double &fi, double &ka, double &ray_dir_x, double &ray_dir_y, double &ray_dir_z, double &ray_length, double &plane_a, double &plane_b, double &plane_c, double &plane_d, double &om_mirror, double &pxc, double &pyc, double &pzc, double &omc, double &fic, double &kac)\n")
f_cpp.write("{")
f_cpp.write("x = %s;\n"%(ccode(transformed_point_source[0])))
f_cpp.write("y = %s;\n"%(ccode(transformed_point_source[1])))
f_cpp.write("z = %s;\n"%(ccode(transformed_point_source[2])))
f_cpp.write("}")
f_cpp.write("\n")
f_cpp.write("inline void point_to_point_source_to_target_rotated_mirror_tait_bryan_wc_jacobian(Eigen::Matrix<double, 3, 14, Eigen::RowMajor> &j, double &px, double &py, double &pz, double &om, double &fi, double &ka, double &ray_dir_x, double &ray_dir_y, double &ray_dir_z, double &ray_length, double &plane_a, double &plane_b, double &plane_c, double &plane_d, double &x_t, double &y_t, double &z_t, double &om_mirror, double &pxc, double &pyc, double &pzc, double &omc, double &fic, double &kac)\n")
f_cpp.write("{")
for i in range (3):
for j in range (14):
f_cpp.write("j.coeffRef(%d,%d) = %s;\n"%(i,j, ccode(delta_jacobian[i,j])))
f_cpp.write("}")
f_cpp.write("\n")
f_cpp.write("inline void point_to_point_source_to_target_rotated_mirror_tait_bryan_wc(Eigen::Matrix<double, 3, 1> &delta, double &px, double &py, double &pz, double &om, double &fi, double &ka, double &ray_dir_x, double &ray_dir_y, double &ray_dir_z, double &ray_length, double &plane_a, double &plane_b, double &plane_c, double &plane_d, double &x_t, double &y_t, double &z_t, double &om_mirror, double &pxc, double &pyc, double &pzc, double &omc, double &fic, double &kac)\n")
f_cpp.write("{")
for i in range (3):
for j in range (1):
f_cpp.write("delta.coeffRef(%d,%d) = %s;\n"%(i,j, ccode(delta[i,j])))
f_cpp.write("}")
f_cpp.write("\n") | |
"""
Random choices functions
Copyright (c) 2019 Julien Kervizic
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
def random_choice_weight_dict(weight_dict: dict) -> str:
"""
Randomly selected a key based on the weighted dict
It requires the weights to be normalized (ie: summing to 1)
Returns:
str: key selected at random
"""
weights = weight_dict
choices = list(weights.keys())
return np.random.choice(choices, p=list(weights.values()))
def random_choice_non_normalized_weight_dict(weight_dict: dict) -> str:
"""
Randomly selected a key based on a non-weighted dict
It does not requires the weights to be normalized (ie: summing to 1)
Returns:
str: key selected at random
"""
weights = weight_dict
choices = list(weights.keys())
proba = [x / sum(weights.values()) for x in list(weights.values())]
return np.random.choice(choices, p=proba) | |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
WIDTH = 12
HEIGHT = 3
plt.rcParams['font.size'] = 14
plt.rcParams['legend.fontsize'] = 14
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['font.family'] = 'Times New Roman'
SCHEDULELIST = [1,1,25]
ARMSLIST = [4, 10, 100]
INTERVALLIST = [10, 10, 20]
########################## CONSTANTS ######################
LINEWIDTH = 1.75
BATCHSIZE = 5
BETA = 0.99
EPISODEEND = 2000
RUNS = 50
TIMELIMIT = 300
REINFORCELR = 0.001
def plotDeadlineIndex(ARMS, SCHEDULE):
deadlineFileName = (f'../testResults/deadline_env/deadlineIndexResults_arms_{ARMS}_timeLimit_{TIMELIMIT}_schedule_{SCHEDULE}.csv')
df = pd.read_csv(deadlineFileName)
deadlineRewards = df.iloc[:,1]
deadline5Percentile, deadline95Percentile = np.percentile(deadlineRewards[0:RUNS], [5, 95])
deadlineRewards = sum(deadlineRewards[0:RUNS] / RUNS)
return deadlineRewards, deadline5Percentile, deadline95Percentile
def plotNeurWIN(ARMS, SCHEDULE, INTERVAL):
nnRewards = []
for i in range(RUNS):
NeurWINFileName = (f'../testResults/deadline_env/nnIndexResults_arms_{ARMS}_batchSize_{BATCHSIZE}_run_{i}_timeLimit_{TIMELIMIT}_schedule_{SCHEDULE}.csv')
df = pd.read_csv(NeurWINFileName)
runReward = df.iloc[:, 1]
nnRewards.append(runReward)
nnVal = np.sum(nnRewards, 0) / RUNS
nnRewards = np.transpose(nnRewards)
percentile5 = np.percentile(nnRewards, 5, axis=1)
percentile95 = np.percentile(nnRewards, 95, axis=1)
return nnVal, percentile5, percentile95
def plotWolp(ARMS, SCHEDULE, INTERVAL):
wolpRewards = []
for i in range(RUNS):
if ARMS == 100:
wolpFileName = (f'../testResults/deadline_env/wolpResults_arms_{ARMS}_batchSize_{BATCHSIZE}_run_{i}_timeLimit_{TIMELIMIT}_schedule_{SCHEDULE}.csv')
else:
wolpFileName = (f'../wolpertinger_ddpg/testResults/deadline_ddpg_results_{ARMS}_arms_choose_{SCHEDULE}_run_{i}.csv')
df = pd.read_csv(wolpFileName)
runReward = df.iloc[:, 1]
wolpRewards.append(runReward)
wolpVal = np.sum(wolpRewards, 0) / RUNS
wolpRewards = np.transpose(wolpRewards)
percentile5wolp = np.percentile(wolpRewards, 5, axis=1)
percentile95wolp = np.percentile(wolpRewards, 95, axis=1)
return wolpVal, percentile5wolp, percentile95wolp
def plotReinforce(ARMS, SCHEDULE, INTERVAL):
reinforceRewards = []
for i in range(RUNS):
reinforceFileName = (f'../testResults/deadline_env/reinforceResults_arms_{ARMS}_batchSize_{BATCHSIZE}\
_lr_{REINFORCELR}_run_{i}_schedule_{SCHEDULE}.csv')
df = pd.read_csv(reinforceFileName)
runReward = list(df.iloc[:,1])
reinforceRewards.append(runReward)
reinforceVal = np.sum(reinforceRewards, 0) / RUNS
reinforcePercentileRewards = np.transpose(reinforceRewards)
percentile5Reinforce = np.percentile(reinforcePercentileRewards, 5, axis=1)
percentile95Reinforce = np.percentile(reinforcePercentileRewards, 95, axis=1)
return reinforceVal, percentile5Reinforce, percentile95Reinforce
def plotAQL(ARMS, SCHEDULE, INTERVAL):
aqlRewards = []
for i in range(RUNS):
aqlFileName = (f'../testResults/deadline_env/aqlResults_arms_{ARMS}_run_{i}_schedule_{SCHEDULE}.csv')
df = pd.read_csv(aqlFileName)
runReward = df.iloc[:, 1]
aqlRewards.append(runReward)
aqlVal = np.sum(aqlRewards, 0) / RUNS
aqlRewards = np.transpose(aqlRewards)
percentile5aql = np.percentile(aqlRewards, 5, axis=1)
percentile95aql = np.percentile(aqlRewards, 95, axis=1)
return aqlVal, percentile5aql, percentile95aql
def plotQWIC(ARMS, SCHEDULE, INTERVAL):
qLearningRewards = []
for i in range(RUNS):
qLearningFileName = (f'../testResults/deadline_env/qLearningResults_arms_{ARMS}_run_{i}_schedule_{SCHEDULE}.csv')
df = pd.read_csv(qLearningFileName)
runReward = df.iloc[:, 1]
qLearningRewards.append(runReward)
qLearningVal = np.sum(qLearningRewards, 0) / RUNS
qLearningRewards = np.transpose(qLearningRewards)
qLearningpercentile5 = np.percentile(qLearningRewards, 5, axis=1)
qLearningpercentile95 = np.percentile(qLearningRewards, 95, axis=1)
return qLearningVal, qLearningpercentile5, qLearningpercentile95
def plotWIBQL(ARMS, SCHEDULE, INTERVAL):
wibqlRewards = []
for i in range(RUNS):
wibqlFileName = (f'../testResults/deadline_env/wibqlResults_arms_{ARMS}_run_{i}_schedule_{SCHEDULE}.csv')
df = pd.read_csv(wibqlFileName)
runReward = df.iloc[:, 1]
wibqlRewards.append(runReward)
wibqlVal = np.sum(wibqlRewards, 0) / RUNS
wibqlRewards = np.transpose(wibqlRewards)
wibqlpercentile5 = np.percentile(wibqlRewards, 5, axis=1)
wibqlpercentile95 = np.percentile(wibqlRewards, 95, axis=1)
return wibqlVal, wibqlpercentile5, wibqlpercentile95
######################################################################################################################
fig, axes = plt.subplots(nrows=1,ncols=3, figsize=(WIDTH, HEIGHT), gridspec_kw={'wspace':0.13, 'hspace':0.0}, frameon=False)
for i in range(len(ARMSLIST)):
ARMS = ARMSLIST[i]
SCHEDULE = SCHEDULELIST[i]
INTERVAL = INTERVALLIST[i]
numEpisode = np.arange(0, EPISODEEND + INTERVAL, INTERVAL)
deadlineRewards, deadlinePercentile5, deadlinePercentile95 = plotDeadlineIndex(ARMS, SCHEDULE)
NeurWINRewards, NeurWINPercentile5, NeurWINPercentile95 = plotNeurWIN(ARMS, SCHEDULE, INTERVAL)
wolpRewards, wolpPercentile5, wolpPercentile95 = plotWolp(ARMS, SCHEDULE, INTERVAL)
aqlRewards, aqlPercentile5, aqlPercentile95 = plotAQL(ARMS, SCHEDULE, INTERVAL)
qwicRewards, qwicPercentile5, qwicPercentile95 = plotQWIC(ARMS, SCHEDULE, INTERVAL)
wibqlRewards, wibqlPercentile5, wibqlPercentile95 = plotWIBQL(ARMS, SCHEDULE, INTERVAL)
axes[i].hlines(xmin=0, xmax=EPISODEEND, y=deadlineRewards, label='Deadline Index', color='r', linewidth=LINEWIDTH, linestyle='dashdot', zorder=4)
axes[i].fill_between(x=numEpisode, y1=deadlinePercentile5, y2=deadlinePercentile95, alpha=0.2, color='orange')
axes[i].plot(numEpisode, NeurWINRewards, label='NeurWIN', linewidth=LINEWIDTH, linestyle='solid', zorder=5)
axes[i].fill_between(x=numEpisode, y1=NeurWINPercentile5, y2=NeurWINPercentile95, alpha=0.2, color='green')
axes[i].plot(numEpisode, aqlRewards, label=f'AQL', color='saddlebrown', linewidth=LINEWIDTH, linestyle='dotted')
axes[i].fill_between(x=numEpisode, y1=aqlPercentile5, y2=aqlPercentile95, alpha=0.2, color='saddlebrown')
axes[i].plot(numEpisode, qwicRewards, label=f'QWIC', color='g', linewidth=LINEWIDTH, linestyle=(0, (3,1,1,1,1,1)))
axes[i].fill_between(x=numEpisode,y1=qwicPercentile5, y2=qwicPercentile95, alpha=0.2, color='teal')
axes[i].plot(numEpisode, wibqlRewards, label=f'WIBQL', color='lightseagreen', linewidth=LINEWIDTH, linestyle=(0, (3,1,3,3,1,3)))
axes[i].fill_between(x=numEpisode,y1=wibqlPercentile5, y2=wibqlPercentile95, alpha=0.2, color='lightseagreen')
if ARMS == 100:
pass
else:
reinforceRewards, reinforcePercentile5, reinforcePercentile95 = plotReinforce(ARMS, SCHEDULE, INTERVAL)
axes[i].plot(numEpisode, reinforceRewards, label=f'REINFORCE', color='k', linewidth=LINEWIDTH, linestyle='dotted')
axes[i].fill_between(x=numEpisode, y1=reinforcePercentile5, y2=reinforcePercentile95, alpha=0.2, color='k')
axes[i].plot(numEpisode, wolpRewards, label='WOLP-DDPG', color='darkorchid', linewidth=LINEWIDTH, linestyle='dashed')
axes[i].fill_between(x=numEpisode, y1=wolpPercentile5, y2=wolpPercentile95, alpha=0.2, color='darkorchid')
axes[i].tick_params(axis='y', rotation=90)
axes[i].set_xticks(np.arange(0,EPISODEEND+1,500))
if ARMS == 100:
pass
else:
handles, labels = axes[i].get_legend_handles_labels()
yStart, yEnd = axes[i].get_ylim()
yLimits = np.linspace(yStart, yEnd, 4)
yTicks = [50*round(num/50) for num in yLimits]
axes[i].set_yticks(yTicks)
plt.legend(handles, labels, frameon=False, bbox_to_anchor=(1,0.95))
axes[0].set_title(f'N = 4 M = 1', weight='bold', fontsize=14)
axes[1].set_title(f'N = 10 M = 1', weight='bold', fontsize=14)
axes[2].set_title(f'N = 100 M = 25', weight='bold', fontsize=14)
axes[0].set_ylabel('Total Discounted Rewards', weight='bold')#, fontsize=15)
axes[1].set_xlabel('Training Episodes', weight='bold')#, fontsize=15)
plt.savefig('../plotResults/deadline_results/deadline_rewards.pdf', bbox_inches='tight')
plt.show() | |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A stats generator which counts the number of missing (null) values in a path.
This constituent stats generator counts the total number of rows in all batches
which are null. If a set of `required_paths` are also provided, only those rows
in which at least one of the `required paths` is present will be counted. This
is useful in the case where a set of features should be considered holistically
(like weighted features or sparse features). In this case, if the whole feature
is missing (i.e. all components of the weighted or sparse feature) then it is
not useful to report the absence of a single component.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow_data_validation import types
from tensorflow_data_validation.statistics.generators import input_batch
from tensorflow_data_validation.statistics.generators import stats_generator
from typing import Iterable, Optional, Text, Tuple, Union
class CountMissingGenerator(stats_generator.ConstituentStatsGenerator):
"""A stats generator which counts the number of missing values in a path."""
def __init__(self,
path: types.FeaturePath,
required_paths: Optional[Iterable[types.FeaturePath]] = None):
"""Initializes to count the number of null lists in a specific feature path.
When required_paths is also passed, rows which are null for all of
the required paths will not be counted as missing.
Args:
path: The path in which to count missing rows.
required_paths: The set of paths among which at least one must be non-null
in order for a null entry in the array for `path` to contribute to the
missing count.
"""
self._path = path
if required_paths:
self._required_paths = tuple(sorted(required_paths))
else:
self._required_paths = None
@classmethod
def key(
cls,
path: types.FeaturePath,
required_paths: Optional[Iterable[types.FeaturePath]] = None
) -> Tuple[Union[Text, types.FeaturePath], ...]:
"""Generates a key for instances created with the same args passed to init.
Args:
path: The path in which to count missing rows.
required_paths: The set of paths among which at least one must be non-null
in order for a null entry in the array for `path` to contribute to the
missing count.
Returns:
The unique key for this set of init args.
"""
key_tuple = ('CountMissingGenerator', path)
if required_paths:
key_tuple += tuple(sorted(required_paths))
return key_tuple
def get_key(self) -> Tuple[Union[Text, types.FeaturePath], ...]:
"""Generates a unique key for this instance.
Returns:
The unique key for this set of init args.
"""
return CountMissingGenerator.key(self._path, self._required_paths)
def create_accumulator(self) -> int:
return 0
def add_input(self, accumulator, batch: input_batch.InputBatch) -> int:
"""Accumulates the number of missing rows from new batch."""
null_mask = batch.null_mask(self._path)
if self._required_paths:
required_null_mask = batch.all_null_mask(*self._required_paths)
null_mask = null_mask & ~required_null_mask
return accumulator + np.sum(null_mask)
def merge_accumulators(self, accumulators: Iterable[int]) -> int:
return sum(accumulators)
def extract_output(self, accumulator: int) -> int:
"""Returns the count of missing values for this stats generator."""
return accumulator | |
import numpy as np
import torch
from perfectpitch.onsetsdetector.model import OnsetsDetector
from perfectpitch.utils.transcription import pianoroll_to_transcription
class Transcriber:
def __init__(self, onsets_detector_path, device):
self._device = torch.device(device)
self._onsets_detector = OnsetsDetector()
self._onsets_detector.load_state_dict(
torch.load(onsets_detector_path, map_location=torch.device("cpu"))
)
self._onsets_detector.to(self._device)
self._onsets_detector.eval()
def _get_splits(self, length):
step = 2000
pad = 1000
for start in range(0, length, step):
end = min(start + step, length)
pad_before = min(start, pad)
pad_after = min(length - end, pad)
yield (start - pad_before, end + pad_after, pad_before, pad_after)
def _forward(self, spec):
with torch.no_grad():
spec = torch.from_numpy(spec).to(self._device).unsqueeze(1)
onsets_logits = self._onsets_detector(spec).squeeze(1).cpu().numpy()
onsets = np.zeros_like(onsets_logits)
onsets[onsets_logits > 0] = 1
actives = np.zeros_like(onsets)
offsets = np.zeros_like(onsets)
velocities = np.zeros_like(onsets)
return actives, onsets, offsets, velocities
def __call__(self, spec):
actives_parts = []
onsets_parts = []
offsets_parts = []
velocities_parts = []
for start, end, pad_before, pad_after in self._get_splits(len(spec)):
actives_part, onsets_part, offsets_part, velocities_part = self._forward(
spec[start:end]
)
actives_parts.append(actives_part[pad_before:-pad_after])
onsets_parts.append(onsets_part[pad_before:-pad_after])
offsets_parts.append(offsets_part[pad_before:-pad_after])
velocities_parts.append(velocities_part[pad_before:-pad_after])
actives = np.concatenate(actives_parts)
onsets = np.concatenate(onsets_parts)
offsets = np.concatenate(offsets_parts)
velocities = np.concatenate(velocities_parts)
return pianoroll_to_transcription(actives, onsets, offsets, velocities) | |
import os
import numpy as np
import time
import sys
from PIL import Image
import cv2
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
from DensenetModels import DenseNet121
from DensenetModels import DenseNet169
from DensenetModels import DenseNet201
#--------------------------------------------------------------------------------
#---- Class to generate heatmaps (CAM)
class HeatmapGenerator ():
#---- Initialize heatmap generator
#---- pathModel - path to the trained densenet model
#---- nnArchitecture - architecture name DENSE-NET121, DENSE-NET169, DENSE-NET201
#---- nnClassCount - class count, 14 for chxray-14
def __init__ (self, pathModel, nnArchitecture, nnClassCount, transCrop):
#---- Initialize the network
if nnArchitecture == 'DENSE-NET-121': model = DenseNet121(nnClassCount, True).cuda()
elif nnArchitecture == 'DENSE-NET-169': model = DenseNet169(nnClassCount, True).cuda()
elif nnArchitecture == 'DENSE-NET-201': model = DenseNet201(nnClassCount, True).cuda()
model = torch.nn.DataParallel(model).cuda()
modelCheckpoint = torch.load(pathModel)
model.load_state_dict(modelCheckpoint['state_dict'])
self.model = model.module.densenet121.features
self.model.eval()
#---- Initialize the weights
self.weights = list(self.model.parameters())[-2]
#---- Initialize the image transform - resize + normalize
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
transformList = []
transformList.append(transforms.Resize(transCrop))
transformList.append(transforms.ToTensor())
transformList.append(normalize)
self.transformSequence = transforms.Compose(transformList)
#--------------------------------------------------------------------------------
def generate (self, pathImageFile, pathOutputFile, transCrop):
#---- Load image, transform, convert
imageData = Image.open(pathImageFile).convert('RGB')
imageData = self.transformSequence(imageData)
imageData = imageData.unsqueeze_(0)
input = torch.autograd.Variable(imageData)
self.model.cuda()
output = self.model(input.cuda())
#---- Generate heatmap
heatmap = None
for i in range (0, len(self.weights)):
map = output[0,i,:,:]
if i == 0: heatmap = self.weights[i] * map
else: heatmap += self.weights[i] * map
#---- Blend original and heatmap
npHeatmap = heatmap.cpu().data.numpy()
imgOriginal = cv2.imread(pathImageFile, 1)
imgOriginal = cv2.resize(imgOriginal, (transCrop, transCrop))
cam = npHeatmap / np.max(npHeatmap)
cam = cv2.resize(cam, (transCrop, transCrop))
heatmap = cv2.applyColorMap(np.uint8(255*cam), cv2.COLORMAP_JET)
img = heatmap * 0.5 + imgOriginal
cv2.imwrite(pathOutputFile, img)
#--------------------------------------------------------------------------------
pathInputImage = 'test/00009285_000.png'
pathOutputImage = 'test/heatmap.png'
pathModel = 'm-08042018-135924.pth.tar'
nnArchitecture = 'DENSE-NET-169'
nnClassCount = 14
transCrop = 224
h = HeatmapGenerator(pathModel, nnArchitecture, nnClassCount, transCrop)
h.generate(pathInputImage, pathOutputImage, transCrop) | |
# Copyright 2022 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Convert checkpoint to SavedModel, and SavedModel to TFJS / TFLite format.
Example Usage (Defaults):
ddsp_export --model_path=/path/to/model
Example Usage (TFJS model):
ddsp_export --model_path=/path/to/model --inference_model=autoencoder \
--tflite=false --tfjs
Example Usage (TFLite model):
ddsp_export --model_path=/path/to/model --inference_model=streaming_f0_pw \
--tflite --tfjs=false
Example Usage (SavedModel Only):
ddsp_export --model_path=/path/to/model --inference_model=[model_type] \
--tflite=false --tfjs=false
"""
import datetime
import json
import os
from absl import app
from absl import flags
import ddsp
from ddsp.training import data
from ddsp.training import inference
from ddsp.training import postprocessing
from ddsp.training import train_util
import gin
import librosa
import note_seq
import tensorflow as tf
from tensorflowjs.converters import converter
from tflite_support import metadata as _metadata
flags.DEFINE_string(
'model_path', '', 'Path to checkpoint or SavedModel directory. If no '
'SavedModel is found, will search for latest checkpoint '
'use it to create a SavedModel. Can also provide direct '
'path to desired checkpoint. E.g. `/path/to/ckpt-[iter]`.')
flags.DEFINE_string(
'save_dir', '', 'Optional directory in which to save converted checkpoint.'
'If none is provided, it will be FLAGS.model_path if it '
'contains a SavedModel, otherwise FLAGS.model_path/export.')
# Specify model class.
flags.DEFINE_enum(
'inference_model',
'streaming_f0_pw',
[
'autoencoder',
'streaming_f0_pw',
'vst_extract_features',
'vst_predict_controls',
'vst_stateless_predict_controls',
'vst_synthesize',
'autoencoder_full',
],
'Specify the ddsp.training.inference model to use for '
'converting a checkpoint to a SavedModel. Names are '
'snake_case versions of class names.')
# Optional flags.
flags.DEFINE_multi_string('gin_param', [],
'Gin parameters for custom inference model kwargs.')
flags.DEFINE_boolean('debug', False, 'DEBUG: Do not save the model')
# Conversion formats.
flags.DEFINE_boolean('tfjs', True,
'Convert SavedModel to TFJS for deploying on the web.')
flags.DEFINE_boolean('tflite', True,
'Convert SavedModel to TFLite for embedded C++ apps.')
flags.DEFINE_string('metadata_file', None,
'Optional metadata file to pack into TFLite model.')
FLAGS = flags.FLAGS
# Metadata.
flags.DEFINE_boolean('metadata', True, 'Save metadata for model as a json.')
flags.DEFINE_string(
'dataset_path', None,
'Only required if FLAGS.metadata=True. Path to TF Records containing '
'training examples. Only used if no binding to train.data_provider can '
'be found.')
# Reverb Impulse Response.
flags.DEFINE_boolean('reverb', True,
'Save reverb impulse response as a wav file.')
flags.DEFINE_integer('reverb_sample_rate', 44100,
'If not None, also save resampled reverb ir.')
FLAGS = flags.FLAGS
def get_data_provider(dataset_path, model_path):
"""Get the data provider for dataset for statistics.
Read TF examples from specified path if provided, else use the
data provider specified in the gin config.
Args:
dataset_path: Path to an sstable of TF Examples.
model_path: Path to the model checkpoint dir containing the gin config.
Returns:
Data provider to calculate statistics over.
"""
# First, see if the dataset path is specified
if dataset_path is not None:
dataset_path = train_util.expand_path(dataset_path)
return data.TFRecordProvider(dataset_path)
else:
inference.parse_operative_config(model_path)
try:
dp_binding = gin.query_parameter('train.data_provider')
return dp_binding.scoped_configurable_fn()
except ValueError as e:
raise Exception(
'Failed to parse dataset from gin. Either --dataset_path '
'or train.data_provider gin param must be set.') from e
def get_metadata_dict(data_provider, model_path):
"""Compute metadata using compute_dataset_statistics and add version/date."""
# Parse gin for num_harmonics and num_noise_amps.
inference.parse_operative_config(model_path)
# Get number of outputs.
ref = gin.query_parameter('Autoencoder.decoder')
decoder_type = ref.config_key[-1].split('.')[-1]
output_splits = dict(gin.query_parameter(f'{decoder_type}.output_splits'))
# Get power rate and size.
frame_size = gin.query_parameter('%frame_size')
frame_rate = gin.query_parameter('%frame_rate')
sample_rate = gin.query_parameter('%sample_rate')
# Compute stats.
full_metadata = postprocessing.compute_dataset_statistics(
data_provider,
power_frame_size=frame_size,
power_frame_rate=frame_rate)
lite_metadata = {
'mean_min_pitch_note':
float(full_metadata['mean_min_pitch_note']),
'mean_max_pitch_note':
float(full_metadata['mean_max_pitch_note']),
'mean_min_pitch_note_hz':
float(ddsp.core.midi_to_hz(full_metadata['mean_min_pitch_note'])),
'mean_max_pitch_note_hz':
float(ddsp.core.midi_to_hz(full_metadata['mean_max_pitch_note'])),
'mean_min_power_note':
float(full_metadata['mean_min_power_note']),
'mean_max_power_note':
float(full_metadata['mean_max_power_note']),
'version':
ddsp.__version__,
'export_time':
datetime.datetime.now().isoformat(),
'num_harmonics':
output_splits['harmonic_distribution'],
'num_noise_amps':
output_splits['noise_magnitudes'],
'frame_rate':
frame_rate,
'frame_size':
frame_size,
'sample_rate':
sample_rate,
}
return lite_metadata
def get_inference_model(ckpt):
"""Restore model from checkpoint using global FLAGS.
Use --gin_param for any custom kwargs for model constructors.
Args:
ckpt: Path to the checkpoint.
Returns:
Inference model, built and restored from checkpoint.
"""
# Parse model kwargs from --gin_param.
print('Parsing --gin_param flags:', FLAGS.gin_param)
with gin.unlock_config():
gin.parse_config_files_and_bindings(None, FLAGS.gin_param)
print(gin.config.config_str())
models = {
'autoencoder': inference.AutoencoderInference,
'vst_extract_features': inference.VSTExtractFeatures,
'vst_predict_controls': inference.VSTPredictControls,
'vst_stateless_predict_controls': inference.VSTStatelessPredictControls,
'vst_synthesize': inference.VSTSynthesize,
'autoencoder_full': inference.AutoencoderFull,
}
# return models[FLAGS.inference_model](ckpt, verbose=False, crepe_saved_model_path="small") # , n_samples=64000
return models[FLAGS.inference_model](ckpt, verbose=False, n_samples=64000, n_frames=201)
def ckpt_to_saved_model(ckpt, save_dir):
"""Convert Checkpoint to SavedModel."""
print(f'\nConverting to SavedModel:' f'\nInput: {ckpt}\nOutput: {save_dir}\n')
model = get_inference_model(ckpt)
print('Finshed Loading Model!')
if not FLAGS.debug:
model.save_model(save_dir)
print('SavedModel Conversion Success!')
def saved_model_to_tfjs(input_dir, save_dir):
"""Convert SavedModel to TFJS model."""
print(f'\nConverting to TFJS:\nInput:{input_dir}\nOutput:{save_dir}\n')
converter.convert([
'--input_format=tf_saved_model', '--signature_name=serving_default',
'--control_flow_v2=True', '--skip_op_check', '--quantize_float16=True',
'--experiments=True', input_dir, save_dir
])
print('TFJS Conversion Success!')
def saved_model_to_tflite(input_dir, save_dir, metadata_file=None, quantize=True):
"""Convert SavedModel to TFLite model."""
print(f'\nConverting to TFLite:\nInput:{input_dir}\nOutput:{save_dir}\n')
# Convert the model.
tflite_converter = tf.lite.TFLiteConverter.from_saved_model(input_dir)
tflite_converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, # Enable TensorFlow Lite ops.
tf.lite.OpsSet.SELECT_TF_OPS, # Enable extended TensorFlow ops.
]
if quantize:
representative_dataset = get_representative_dataset(
"/cluster/home/vvolhejn/datasets/violin2/violin2.tfrecord*"
)
tflite_converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_converter.representative_dataset = representative_dataset
tflite_converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
tflite_converter.inference_input_type = tf.int8 # or tf.uint8
tflite_converter.inference_output_type = tf.int8 # or tf.uint8
tflite_model = tflite_converter.convert() # Byte string.
# Save the model.
model_name = "model_quantized.tflite" if quantize else "model_unquantized.tflite"
save_path = os.path.join(save_dir, model_name)
with tf.io.gfile.GFile(save_path, 'wb') as f:
f.write(tflite_model)
if metadata_file is not None:
populator = _metadata.MetadataPopulator.with_model_file(save_path)
populator.load_associated_files([metadata_file])
populator.populate()
print('TFLite Conversion Success!')
if quantize:
test_quantization(tflite_converter, representative_dataset)
def get_representative_dataset(file_pattern, include_f0_hz=False):
data_provider = ddsp.training.data.TFRecordProvider(
file_pattern=file_pattern,
frame_rate=50,
centered=True,
)
preprocessor = ddsp.training.preprocessing.OnlineF0PowerPreprocessor(
frame_rate=50,
padding="center",
compute_f0=False,
)
def representative_dataset():
import numpy as np
dataset = data_provider.get_batch(batch_size=1, shuffle=True, repeats=1)
for i, batch in zip(range(40), dataset):
# Model has only one input so each data point has one element.
# yield [np.expand_dims(np.array(batch["audio"]), axis=0)]
# print([batch["audio"][0].shape, batch["f0_hz"][0].shape, batch["f0_confidence"][0].shape])
# yield [batch["audio"][0], batch["f0_hz"][0], batch["f0_confidence"][0]]
features = preprocessor(batch)
if not include_f0_hz:
yield [features["f0_scaled"], features["pw_scaled"]]
else:
yield [features["f0_scaled"], features["pw_scaled"], features["f0_hz"], batch["audio"], batch["f0_confidence"]]
#yield [batch["f0_hz"], batch["f0_confidence"]]
# yield [batch["f0_hz"][0,:1], batch["loudness_db"][0,:1]]
return representative_dataset
def test_quantization(tflite_converter, representative_dataset):
"""
Running the model through the quantization debugger mainly ensures that the model is
runnable - in some scenarios, models convert correctly but fail with SIGABRT when run.
"""
debugger = tf.lite.experimental.QuantizationDebugger(
converter=tflite_converter, debug_dataset=representative_dataset
)
debugger.run()
print(str(debugger.layer_statistics)[:1000] + " [...and more layer statistics]")
def export_impulse_response(model_path, save_dir, target_sr=None):
"""Extracts and saves the reverb impulse response."""
with gin.unlock_config():
ddsp.training.inference.parse_operative_config(model_path)
model = ddsp.training.models.Autoencoder()
model.restore(model_path, verbose=False)
sr = model.processor_group.harmonic.sample_rate
reverb = model.processor_group.reverb
reverb.build(unused_input_shape=[])
ir = reverb.get_controls(audio=tf.zeros([1, 1]))['ir'].numpy()[0]
print(f'Reverb Impulse Response is {ir.shape[0] / sr} seconds long')
def save_ir(ir, sr):
"""Save the impulse response."""
ir_path = os.path.join(save_dir, f'reverb_ir_{sr}_hz.wav')
with tf.io.gfile.GFile(ir_path, 'wb') as f:
wav_data = note_seq.audio_io.samples_to_wav_data(ir, sr)
f.write(wav_data)
# Save to original impulse response.
save_ir(ir, sr)
# Save the resampled impulse response.
if target_sr is not None:
sr = target_sr
ir = librosa.resample(ir, orig_sr=sr, target_sr=target_sr)
save_ir(ir, sr)
def ensure_exits(dir_path):
"""Make directory if none exists."""
if not tf.io.gfile.exists(dir_path):
tf.io.gfile.makedirs(dir_path)
def main(unused_argv):
model_path = train_util.expand_path(FLAGS.model_path)
# Figure out what type the model path is.
is_saved_model = tf.io.gfile.exists(
os.path.join(model_path, 'saved_model.pb'))
is_ckpt = not tf.io.gfile.isdir(model_path)
# Infer save directory path.
if FLAGS.save_dir:
save_dir = FLAGS.save_dir
else:
if is_saved_model:
# If model_path is a SavedModel, use that directory.
save_dir = model_path
elif is_ckpt:
# If model_path is a checkpoint file, use the directory of the file.
save_dir = os.path.join(os.path.dirname(model_path), 'export')
else:
# If model_path is a checkpoint directory, use child export directory.
save_dir = os.path.join(model_path, 'export')
# Make a new save directory.
save_dir = train_util.expand_path(save_dir)
ensure_exits(save_dir)
# Save reverb impulse response.
if FLAGS.reverb:
export_impulse_response(model_path, save_dir, FLAGS.reverb_sample_rate)
# Save metadata.
if FLAGS.metadata:
metadata_path = os.path.join(save_dir, 'metadata.json')
data_provider = get_data_provider(FLAGS.dataset_path, model_path)
metadata = get_metadata_dict(data_provider, model_path)
with tf.io.gfile.GFile(metadata_path, 'w') as f:
f.write(json.dumps(metadata))
# Create SavedModel if none already exists.
if not is_saved_model:
ckpt_to_saved_model(model_path, save_dir)
# Convert SavedModel.
if FLAGS.tfjs:
tfjs_dir = os.path.join(save_dir, 'tfjs')
ensure_exits(tfjs_dir)
saved_model_to_tfjs(save_dir, tfjs_dir)
if FLAGS.tflite:
tflite_dir = os.path.join(save_dir, 'tflite')
ensure_exits(tflite_dir)
saved_model_to_tflite(save_dir, tflite_dir,
metadata_path if FLAGS.metadata else '')
def console_entry_point():
"""From pip installed script."""
app.run(main)
if __name__ == '__main__':
console_entry_point() | |
#////////////////////////////////////////////////////////////////
#//
#// Python modules
#//
#// -------------------------------------------------------------
#//
#// AUTHOR: Miguel Ramos Pernas
#// e-mail: miguel.ramos.pernas@cern.ch
#//
#// Last update: 04/10/2017
#//
#// -------------------------------------------------------------
#//
#// Description:
#//
#// This module implements different tools to generate plots
#// using Root objects.
#//
#// -------------------------------------------------------------
#////////////////////////////////////////////////////////////////
from Isis.data_management import DataMgr
from Isis.iboost.general import sendErrorMsg, sendWarningMsg
from Isis.iroot import ROOT as rt
from Isis.math_ext import nearest_square
import itertools, sys
from math import sqrt
import numpy as np
import scipy as sc
class CanvasStorer:
'''
Simple struct with three attributes to store a canvas and two lists:
one with data objects (histograms, graphs, ...) and the other one
focused adding information to the plot (legend, lines, ...)
'''
def __init__( self, canvas, data_objs = None, info_objs = None ):
'''
Constructor given the canvas and the lists of data objects and
information objects
'''
self.canvas = canvas
self.dataObjs = data_objs or []
self.infoObjs = info_objs or []
def write( self, info = True ):
'''
Write the canvas and its objects in the current file
'''
self.canvas.Write()
for obj in self.dataObjs:
obj.Write()
if info:
for obj in self.infoObjs:
obj.Write()
class FormatListIter:
'''
Output class for an iteration over a < FormatList > instance. This class
stores the information of the color, line style and fill style. It also has
features to apply it to Root objects.
'''
def __init__( self, color, linest, markst, fillst ):
'''
The color and the line, marker and fill styles are passed to the class
'''
self.color = color
self.lineSt = linest
self.markSt = markst
self.fillSt = fillst
def apply( self, obj, lw = 2 ):
'''
Apply the format stored to a Root object. By default the line width is also
set to < 2 >.
'''
obj.SetLineWidth( lw )
obj.SetLineColor( self.color )
obj.SetMarkerColor( self.color )
obj.SetLineColor( self.color )
obj.SetMarkerStyle( self.markSt )
if self.lineSt != None:
obj.SetLineStyle( self.lineSt )
if self.fillSt != None:
obj.SetFillColor( self.color )
obj.SetFillStyle( self.fillSt )
def styles( self ):
'''
Returns the members of this class in a tuple
'''
return tuple( el for el in (self.color, self.lineSt,
self.markSt, self.fillSt)
if el != None )
class FormatList:
'''
This class allows to generate a list storing colors and line, marker and fill
styles, so one can iterate over it. The formats are extracted using
< __getitem__ > or on an iteration process. If the index is greater than the
number of colors in the list, the color will correspond to the loop number,
added to the remainder araising from the division. For the other features
the quantity is directly extracted from the remainder.
'''
def __init__( self, colors = None, linest = None, markst = None, fillst = False ):
'''
Any of the input arguments can be passed explicitily to the class. However,
this class stores default values for each input parameter. By default the fill
style is not used, since Root automatically fills the histogram if it is set.
If a list is passed to any of the input arguments, a check will be made to see
that all of them have the same length (beware of the length of the default
arguments). If only one value is specified, all the objects using these formats
will have the same value of that quantity too.
'''
self._iter = 0
if colors != None:
self.colors = colors
else:
self.colors = [ rt.kBlue, rt.kRed, rt.kOrange,
rt.kGreen, rt.kMagenta, rt.kCyan ]
if linest != None:
self.lineSt = linest
else:
self.lineSt = range( 1, 7 )
if markst != None:
self.markSt = markst
else:
self.markSt = range( 20, 26 )
if fillst != None:
self.fillSt = fillst
else:
self.fillSt = range( 3000, 3006 )
''' Check that all the lists given have the same length '''
lgths = [ len( lst ) for lst in ( self.colors, self.lineSt,
self.markSt, self.fillSt )
if isinstance( lst, list ) ]
if len( set( lgths ) ) != 1:
sendErrorMsg('Lists passed to FormatList instance have different lengths')
def __getitem__( self, idx ):
''' Gets the format for the given index '''
if isinstance( self.colors, list ):
lst = self.colors
elif isinstance( self.lineSt, list ):
lst = self.lineSt
elif isinstance( self.markSt, list ):
lst = self.markSt
elif isinstance( self.fillSt, list ):
lst = self.fillSt
else:
lst = [ 0 ]
n = len( lst )
nloop = idx / n
niter = idx % n
col = self._getit( self.colors, niter, nloop )
lst = self._getit( self.lineSt, niter )
mst = self._getit( self.markSt, niter )
fst = self._getit( self.fillSt, niter )
return FormatListIter( col, lst, mst, fst )
def __iter__( self ):
''' Definition of the iterator '''
self._iter = 0
return self
def next( self ):
'''
Sets the new value for the iteration. In order to use this class in an
iterative mode, another iterable object has to be the one that raises the
exception to stop the iteration.
'''
frmt = self.__getitem__(self._iter)
self._iter += 1
return frmt
def _getit( self, lst, idx, nloop = False ):
'''
Auxiliar function to get the content of the next item in a list (if any)
'''
if lst:
if isinstance( lst, list ):
return lst[ idx ] + nloop
else:
return lst
else:
return
def draw_hists( hlst, drawopt = '', norm = True, title = 'List of histograms' ):
'''
Draws the given list of histograms. If the variable < norm > is set to True,
then the histograms will be normalized. It returns the histogram used to give
format to the plot, and the list of input histograms or the normalized clones.
The draw options are set using the < drawopt > keyword. The titles of the axes
are taken from the first histogram in the list.
'''
if norm:
meth = lambda h, d: rt.TH1.DrawNormalized(h, d, norm)
else:
meth = rt.TH1.Draw
hformat = hist_format(hlst, title = hlst[0].GetTitle(), norm = norm)
hformat.Draw()
drawopt += 'SAME'
outhlst = [hformat]
for h in hlst:
hdr = meth(h, drawopt)
if hdr:
hdr.SetDirectory(0)
outhlst.append(hdr)
if len(outhlst) == 1:
outhlst += hlst
return outhlst
def format_plottable_2d( obj, name = '', title = None, xtitle = '', ytitle = '' ):
'''
Set name, main title and titles for each axis of a 2D object
'''
title = title or name
obj.SetNameTitle(name, title)
if xtitle:
obj.GetXaxis().SetTitle(xtitle)
if ytitle:
obj.GetYaxis().SetTitle(ytitle)
def hist_bounds( arr, vmin = None, vmax = None ):
'''
Extract the histogram bounds given a list of values and the possible
bounds (if any). If the list is empty, bounds at (0, 1) are returned.
This is necessary to handle format histograms. If no maximum is
provided, a small offset is applied.
'''
if arr.size == 0:
return 0, 1
vmin = vmin if vmin != None else arr.min()
vmax = vmax if vmax != None else 1.01*arr.max()
return vmin, vmax
def hist_format( hlst, name = '', title = None, norm = True ):
'''
Return the histogram with the format (number of bins and bounds) to define the
axis of a figure with all being plotted together
'''
title = title if title else name
xmin = np.fromiter((h.GetXaxis().GetXmin() for h in hlst), float).min()
xmax = np.fromiter((h.GetXaxis().GetXmax() for h in hlst), float).max()
ymin = np.fromiter((h.GetMinimum() for h in hlst), float)
ymax = np.fromiter((h.GetMaximum() for h in hlst), float)
if norm:
wgts = np.fromiter((h.GetSumOfWeights() for h in hlst), float)
ymin *= norm/wgts
ymax *= norm/wgts
ymin = ymin.min()
ymax = ymax.max()
ymax += 0.1*(ymax - ymin)
hform = hlst[0].__class__(name, title, hlst[0].GetNbinsX(), xmin, xmax)
hform.GetXaxis().SetTitle(hlst[0].GetXaxis().GetTitle())
hform.GetYaxis().SetTitle(hlst[0].GetYaxis().GetTitle())
hform.SetLineColor(0)
hform.SetLineStyle(0)
hform.GetYaxis().SetRangeUser(ymin, ymax)
return hform
def hist_points( arr, nbins, vmin = None, vmax = None ):
'''
This function extracts the indexes of the given array of data which are
supposed to be used to make a histogram
'''
if len(arr) == 0:
return []
if vmin == None:
vmin = arr.min()
if vmax == None:
vmax = arr.max()
vmax += (vmax - vmin)/(2.*nbins)
return np.logical_and(arr >= vmin, arr < vmax)
def hist_ctor_from_type( tp, dim = 1 ):
'''
Returns the histogram constructor given the type as a string
'''
if tp not in ( 'float', 'double', 'int' ):
sendErrorMsg('Histogram type < %s > not known' %tp)
return
if dim == 1:
if tp == 'float':
return rt.TH1F
elif tp == 'double':
return rt.TH1D
else:
return rt.TH1I
elif dim == 2:
if tp == 'float':
return rt.TH2F
elif tp == 'double':
return rt.TH2D
else:
return rt.TH2I
else:
sendErrorMsg('Histogram dimension < %i >, not allowed' %dim)
def adbin_hist( name, minocc, values,
htype = 'double',
title = None,
weights = False,
xtitle = '',
ytitle = '' ):
'''
This function creates a 1-D adaptive binning histogram given a name, the
minimum occupancy value and a list. Adding a list of weights is also possible.
'''
histcall = hist_ctor_from_type(htype, 1)
''' Calculates the array of weights '''
length = len(values)
if weights:
sw = float(sum(weights))
nbins = int(sw)/minocc
else:
weights = length*[1.]
sw = float(length)
nbins = length/minocc
idxs = hist_points(values, nbins)
vmin, vmax = hist_bounds(values[idxs])
''' If the occupancy requested is too big, an error message is displayed '''
if nbins == 0:
sendErrorMsg('Occupancy requested is too big: %i' %iminocc)
'''
Creates a list with the values and the weights joint and sorts it by the values
'''
values = zip(values, weights)
values.sort()
''' Fills the bins with the data '''
binlist = tuple([vmax, 0] for i in xrange(nbins))
idat, swpb = 0, 0
for idf, ib in enumerate(binlist):
swpb = sw/(nbins - idf)
while ib[1] < swpb and idat < length:
val, wgt = values[idat]
if val < ib[0]:
ib[0] = val
ib[1] += wgt
idat += 1
sw -= ib[1]
while idat < length:
binlist[-1][0], binlist[-1][1] = values[idat]
idat += 1
'''
To create the Root histogram, an array of doubles has to be created, with the
minimum value for the bins
'''
bins = np.array((nbins + 1)*[0.], dtype = float)
for i, ib in enumerate(binlist):
bins[i] = ib[0]
bins[-1] = vmax
hist = histcall('', '', nbins, bins)
format_plottable_2d(hist, name, title, xtitle, ytitle)
return hist
def corr_hist( matrix, name = '', title = None, vartitles = None ):
'''
Creates a correlation histogram given a list of lists. By default it is drawn
in color, without palette, and with the contents written inside each bin. No
statistical box is displayed neither.
'''
lm = len(matrix)
vartitles = vartitles or []
if vartitles != []:
if lm != len(vartitles):
sendErrorMsg('Number of titles is not the same as that of the matrix')
else:
vartitles = ['Variable_' + str(i) for i in xrange(lm)]
corr_matrix = 100*np.corrcoef(matrix)
hist = rt.TH2D('', '', lm, 0, lm, lm, 0, lm)
for i, row in enumerate(corr_matrix):
for j, el in enumerate(row):
hist.SetBinContent(i + 1, j + 1, int(el))
for i, tit in enumerate(vartitles):
hist.GetXaxis().SetBinLabel(i + 1, tit)
hist.GetYaxis().SetBinLabel(i + 1, tit)
format_plottable_2d(hist, name, title, '', '')
hist.GetXaxis().SetTickLength(0)
hist.GetYaxis().SetTickLength(0)
hist.SetOption('COLTEXT')
hist.SetStats(False)
return hist
def cumulative( hist, name = '', norm = False, title = None ):
'''
Returns a histogram containing the cumulative distribution of that given. If
the option < norm > is given, the histogram will be scaled in such a way that
the maximum value will be one.
'''
chist = hist.Clone()
cumulative = chist.GetBinContent(1)
for i in xrange(2, hist.GetNbinsX() + 1):
cumulative += hist.GetBinContent(i)
chist.SetBinContent(i, cumulative)
chist.SetBinError(i, sqrt(cumulative))
if norm:
chist.Scale(1./chist.GetMaximum())
format_plottable_2d(hist, name, title)
return chist
def hist( var,
name = '',
nbins = 100,
htype = 'double',
title = None,
vmin = None,
vmax = None,
wvar = None,
xtitle = '',
ytitle = '' ):
'''
Function to generate a Root histogram given a list. By default no y-title is
drawn, but it can be set with the < ytitle > option. For values of type int,
the histogram will be of type double.
'''
histcall = hist_ctor_from_type(htype, 1)
vmin, vmax = hist_bounds(var, vmin = vmin, vmax = vmax)
hist = histcall('', '', nbins, vmin, vmax)
if wvar:
for el, w in zip(var, wvar):
hist.Fill(el, w)
else:
for el in var:
hist.Fill(el)
format_plottable_2d(hist, name, title, xtitle, ytitle)
return hist
def hist2d( xvar, yvar,
name = '',
htype = 'double',
title = None,
wvar = None,
xbins = 100,
xmax = None,
xmin = None,
xtitle = '',
ybins = 100,
ymax = None,
ymin = None,
ytitle = '' ):
'''
Creates a 2-dimensional histogram given two lists
'''
histcall = hist_ctor_from_type(htype, 2)
xmin, xmax = hist_bounds(xvar, vmin = xmin, vmax = xmax)
ymin, ymax = hist_bounds(yvar, vmin = ymin, vmax = ymax)
hist = histcall('', '', xbins, xmin, xmax, ybins, ymin, ymax)
if wvar:
for x, y, w in zip(xvar, yvar, wvar):
hist.Fill( x, y, w )
else:
for x, y in zip(xvar, yvar):
hist.Fill(x, y)
format_plottable_2d(hist, name, title, xtitle, ytitle)
return hist
def kstest( mgrA, mgrB, variables,
vartitles = [], usecl = True, name = '', title = '' ):
'''
Perform Kolmogorov-Smirnov tests for the two given DataMgr objects and variables.
If "usecl" is set to True, the confidence level of exclusion that the two
samples follow the same distribution is given instead of the p-value. Returns
a dictionary with the values of the test statistic and p-value for each variable,
together with the filled histograms.
'''
results = {var: sc.stats.ks_2samp(mgrA[var], mgrB[var])
for var in variables}
lm = len(variables)
if vartitles != []:
if lm != len(vartitles):
sendErrorMsg('Number of titles is not the same as the number of variables')
else:
vartitles = variables
h_tstat = rt.TH1D('', '', lm, 0, lm)
h_pval = rt.TH1D('', '', lm, 0, lm)
for i, (v, t) in enumerate(zip(variables, vartitles)):
tstat, pval = results[v]
if usecl:
pval = (1. - pval)*100.
h_tstat.SetBinContent(i + 1, tstat)
h_pval.SetBinContent(i + 1, pval)
for h in (h_tstat, h_pval):
h.GetXaxis().SetBinLabel(i + 1, t)
if usecl:
h_pval.GetYaxis().SetRangeUser(0, 110)
else:
h_pval.GetYaxis().SetRangeUser(0, 1.1)
h_tstat.GetYaxis().SetRangeUser(0, 1.1)
return results, h_tstat, h_pval
class _GraphInConfig:
'''
Auxiliar class to build a TGraph, TGraphErrors or TGraphAsymmErrors objects
depending on the input arguments
'''
def __init__( self, values, err, errlo, errup ):
'''
Values, symmetric and asymmetric errors must be provided
'''
self.values = np.array(values, dtype = float)
self.err = any((el is not False) for el in (err, errlo, errup))
self.sym = None
self.errLo = None
self.errUp = None
self.errors = None
if self.err:
if err:
''' Symmetric errors '''
self.sym = True
if errlo or errup:
sendWarningMsg('Specified both sym. and asym. errors; only sym. '\
'will be considered')
self.errors = np.array(err, dtype = float)
self._build_asym()
else:
''' Asymmetric errors '''
self.sym = False
if errlo:
self.errLo = np.array(errlo, dtype = float)
else:
self.errLo = np.zeros(len(errup), dtype = float)
if errup:
self.errUp = np.array(errup, dtype = float)
else:
self.errUp = np.zeros(len(errlo), dtype = float)
self.errors = np.zeros(len(self.errLo), dtype = float)
else:
''' If no errors are specified, they are considered symmetric '''
self.sym = True
self.err = False
self.errors = np.zeros(len(values), dtype = float)
self._build_asym()
def _build_asym( self ):
'''
Sets the lower and upper errors to the symmetric errors
'''
self.errLo = self.errUp = self.errors
def multiplot( mgrs, variables,
cuts = False,
errors = False,
flist = None,
legend = True,
name = 'canvas',
nbins = 100,
norm = True,
ranges = None,
title = None ):
'''
This function plots in the same canvas the distributions of the given
variables from different "DataMgr" classes. Different options can also
been provided to modify the canvas and the information displayed. If
< ranges > is provided, it must contain the same name of the variables
passed in < variables > (this applies also to formulas).
'''
ranges = ranges or {}
flist = flist or FormatList()
title = title or name
nvars = len(variables) + 1
if all(var in mgr for mgr in mgrs for var in variables):
''' Generates and divides the canvas '''
nyvars, nxvars = opt_canvas_div( nvars )
canvas = rt.TCanvas(name, title, 300*nyvars, 300*nxvars)
canvas.Divide(nyvars, nxvars)
canvas_info = CanvasStorer(canvas)
nmgrs = len(mgrs)
''' If cuts are specified it calculates the true managers '''
if cuts:
for i, mgr in enumerate(mgrs):
mgrs[i] = mgr.subsample(cuts = cuts, name = mgr.name)
''' Disables the stat box of the histograms '''
rt.gStyle.SetOptStat( 0 )
''' Constructs the legend and the information panel if specified '''
if legend:
pave_dim = (0.1, 0.8 - nmgrs*0.05, 0.9, 0.9)
text_size = 0.075
rlegend = rt.TLegend(*pave_dim)
rlegend.SetHeader('#bf{-- Legend --}')
rlegend.SetTextAlign(22)
rlegend.SetTextSize(text_size)
rlegend.SetFillColor(15)
rtxtinf = rt.TPaveText(*pave_dim)
rtxtinf.AddText( '-- Number of entries --')
rtxtinf.SetTextSize(text_size)
rtxtinf.SetFillColor(42)
rtxtinf.SetShadowColor(0)
canvas_info.infoObjs += [rlegend, rtxtinf]
''' Generates and draws the histograms '''
for iv, var in enumerate(variables):
canvas.cd(iv + 1)
''' This is done to reduce disk usage '''
tot = np.array([m[var].values.T for m in mgrs])
''' Extract the ranges for each variable (if any) '''
if var in ranges.keys():
rnbins, vmin, vmax = ranges[var]
else:
if any(len(m) != 0 for m in tot):
vmin = tot.min()
vmax = tot.max()
else:
vmin = 0
vmax = 1
rnbins = nbins
entries = []
hists = []
for im, (mgr, vals) in enumerate(zip(mgrs, tot)):
hname = mgr.name + '_' + var
h = hist( vals,
name = hname,
title = var,
nbins = rnbins,
vmin = vmin,
vmax = vmax )
entries.append(h.GetEntries())
hists.append(h)
flist[im].apply(h)
h.GetXaxis().SetTitle(var)
''' Draw histograms, with error bars if specified '''
hists = draw_hists(hists, drawopt = errors*'E', norm = norm, title = '')
if legend and iv == 0:
''' In the first iteration add the entries to the legend '''
for mgr, h, sw in zip(mgrs, hists[1:], entries):
rlegend.AddEntry(h, '#bf{{{}}}'.format(mgr.name), 'L')
rtxtinf.AddText('{}: {}'.format(mgr.name, sw))
canvas_info.dataObjs += hists
if legend:
pad = canvas.cd(nvars)
pad.Divide(2, 1)
pad.cd(1); rlegend.Draw()
pad.cd(2); rtxtinf.Draw()
canvas.Update()
return canvas_info
else:
sendErrorMsg('Some managers do not have access to some of the variables')
return
def opt_canvas_div( nvars ):
'''
Create the optimal canvas division for a given number of pads
'''
nstsq = int(sqrt(nearest_square(nvars)))
if nstsq**2 > nvars:
nxvars = nstsq
nyvars = nstsq
else:
nxvars = nstsq
nyvars = nstsq
while nxvars*nyvars < nvars:
nyvars += 1
return nyvars, nxvars
def scatter_plot( xvar, yvar,
xerr = False,
xerrlo = False,
xerrup = False,
yerr = False,
yerrlo = False,
yerrup = False,
name = '',
title = None,
xtitle = '',
ytitle = '' ):
'''
Generates a scatter plot given two lists of data
'''
xconfig = _GraphInConfig(xvar, xerr, xerrlo, xerrup)
yconfig = _GraphInConfig(yvar, yerr, yerrlo, yerrup)
npoints = len(xconfig.values)
xvar = xconfig.values
yvar = yconfig.values
if not xconfig.err and not yconfig.err:
graph = rt.TGraph(npoints, xvar, yvar)
else:
if xconfig.sym and yconfig.sym:
xerr = xconfig.errors
yerr = yconfig.errors
graph = rt.TGraphErrors(npoints, xvar, yvar, xerr, yerr)
else:
xerrlo, xerrup = xconfig.errLo, xconfig.errUp
yerrlo, yerrup = yconfig.errLo, yconfig.errUp
graph = rt.TGraphAsymmErrors(npoints, xvar, yvar, xerrlo, xerrup, yerrlo, yerrup)
format_plottable_2d(graph, name, title, xtitle, ytitle)
return graph
def superimposed_pads( canvas ):
'''
Add two pads to the given canvas, both being transparent
'''
pad1 = rt.TPad('pad1', '', 0, 0, 1, 1)
pad2 = rt.TPad('pad2', '', 0, 0, 1, 1)
for pad in (pad1, pad2):
canvas.cd()
pad.SetLineStyle(0)
pad.SetFillStyle(4000)
pad.SetFillColor(0)
pad.SetFrameFillStyle(4000)
pad.Draw()
canvas.cd()
return pad1, pad2 | |
"""
Primary function of recipe here
"""
import mbuild as mb
import numpy as np
from numpy import sqrt, pi, arctan2, arcsin
class build_silica_NP(mb.Compound):
"""
Build a tethered_NP compound.
Example would be a silica nanoparticle covered in alkane chains
Parameters
----------
Args:
n_core_particles (int): Number of particles that makes up the central NP
ball_radius (float): Radius of the nanoparticle.
n_chains (int): Number of chains to attach to the nanoparticle.
chain_length (int): Length of the chains being attached.
monomer (Compound, optional): Type of chain being attached
"""
def __init__(self, n_core_particles=129, ball_radius=10, n_chains=4,
chain_length=10, monomer=None):
#n_core_particles can be related to the radius by a percent area
super(build_silica_NP, self).__init__()
class Bead(mb.Compound):
"""A point particle with two ports pointing in opposite directions"""
def __init__(self,particle_kind):
super(Bead,self).__init__()
self.add(mb.Particle(name=particle_kind), particle_kind)
self.add(mb.Port(anchor=self.labels[particle_kind]),'up')
self['up'].translate(np.array([0, 0.7, 0]))
self.add(mb.Port(anchor=self.labels[particle_kind]), 'down')
self['down'].translate(np.array([0, -0.7, 0]))
"""Create a cg bead to use as the NP chains"""
if not monomer:
monomer = Bead(particle_kind='chain_monomer')
"""Create the particles that make up the core sphere"""
class Sphere(mb.Compound):
def __init__(self, n=65, radius=1, port_distance_from_surface=0.07):
"""Initialize a sphere object
Args:
n (int): Number of points used to construct the Sphere
radius (float, nm): Radius of the sphere from np center to center of CG particles
port_distance_from_surface (float, nm): Distance of Sphere Ports
"""
super(Sphere,self).__init__()
particle = mb.Particle(name='np')
particle.add(mb.Port(anchor=particle), label='out')
#Generate points on sphere surface
pattern=mb.SpherePattern(n)
pattern.scale(radius)
particles=pattern.apply(particle, orientation='normal', compound_port='out')
self.add(particles, label='np_[$]')
#Create particles and Ports at pattern positions
for i, pos in enumerate(pattern.points):
particle = mb.Particle(name="np",pos=pos)
self.add(particle, "np_{}".format(i))
port=mb.Port(anchor=particle)
self.add(port, "port_{}".format(i))
#Make the top of the port point towards the positive x axis
port.spin(-pi/2, [0,0,1])
#Raise up or down the top of the port in the z direction
port.spin(-arcsin(pos[2]/radius), [0, 1, 0])
#rotate the port along the z axis
port.spin(arctan2(pos[1], pos[0]), [0, 0, 1])
#Move the Port a bit away from the surface of the Sphere
port.translate(pos/radius * port_distance_from_surface)
#Add in the correct number of particles around a sphere
self.add(Sphere(n=n_core_particles, radius=ball_radius,
port_distance_from_surface=0.7), label="np")
# Generate points on the surface of a unit sphere to attach chains.
pattern = mb.SpherePattern(n_chains)
# Magnify it a bit.
pattern.scale(ball_radius)
chain_proto = mb.recipes.Polymer(monomer, n=chain_length)
# Apply chains to pattern.
chain_protos, empty_backfill = pattern.apply_to_compound(chain_proto,
guest_port_name="down", host=self['np'])
self.add(chain_protos)
self.generate_bonds('np', 'np', sqrt(4 * ball_radius ** 2 * pi / n_core_particles) - 0.5,
sqrt(4 * ball_radius**2 * pi / n_core_particles) + 0.5)
self.generate_bonds('np', 'chain_monomer', 0.1, 0.3)
self.generate_bonds('chain_monomer', 'np', 0.1, 0.3) | |
import argparse
import logging
import os, sys
import csv
import numpy as np
import random
import time
from run_ple_utils import make_ple_env
def main_event_dependent():
parser = argparse.ArgumentParser()
parser.add_argument('--test_env', help='testv environment ID', default='ContFlappyBird-v3')
parser.add_argument('--total_timesteps', help='Total number of env steps', type=int, default=int(2e5))
parser.add_argument('--seed', help='RNG seed', type=int, default=1)
parser.add_argument('--logdir', default='/home/mara/Desktop/logs/ED_CONTROL',
help='directory where logs are stored')
parser.add_argument('--show_interval', type=int, default=1,
help='Env is rendered every n-th episode. 0 = no rendering')
parser.add_argument('--eval_model', choices=['all', 'inter', 'final'], default='inter',
help='Eval all stored models, only the final model or only the intermediately stored models (while testing the best algorithm configs)')
args = parser.parse_args()
np.random.seed(args.seed)
random.seed(args.seed)
# Init test_results.csv
# rnd_output_dir = args.logdir
#
# logger = logging.getLogger()
# fh = logging.FileHandler(os.path.join(rnd_output_dir, 'algo.log'))
# fh.setLevel(logging.INFO)
# fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s:%(name)s: %(message)s'))
# logger.addHandler(fh)
# logger.setLevel(logging.INFO)
# logger.propagate = False
#
# result_path = os.path.join(rnd_output_dir, 'test_results.csv')
for s in range(100, 120):
# logger.info('make env with seed %s' % s)
test_env = make_ple_env(args.test_env, seed=s)
state = test_env.reset()
#print(state)
# logger.info('reset')
total_return = 0
rew_traj =[]
t = 0
while t < args.total_timesteps:
t+=1
if t%20 == 0:
a = 1
if args.show_interval > 0:
test_env.render()
time.sleep(0.01)
# logger.info('render')
# logger.info('step')
if state[0] > 0.5*(state[2]+state[3]):
action = 0 # FLAP
else:
action = 1
state, reward, dones, _ = test_env.step(action)
#print(state)
# logger.info('stepped')
# reward_window.append(reward)
total_return += reward
rew_traj.append(reward)
test_env.close()
# with open(result_path, "a") as csvfile:
# logger.info('write csv')
# writer = csv.writer(csvfile)
# # rew_traj[0:0] = [s, 0, np.mean(rew_traj)]
# # writer.writerow(rew_traj)
# writer.writerow([s, 0, np.mean(rew_traj)])
if __name__ == '__main__':
main_event_dependent() | |
"""Function to show an example of the created points of the sampler.
"""
import numpy as np
import matplotlib.pyplot as plt
def scatter(subspace, *samplers):
"""Shows (one batch) of used points in the training. If the sampler is
static, the shown points will be the points for the training. If not
the points may vary, depending of the sampler.
Parameters
----------
subspace : torchphysics.problem.Space
The (sub-)space of which the points should be plotted.
Only plotting for dimensions <= 3 is possible.
*samplers : torchphysics.problem.Samplers
The diffrent samplers for which the points should be plotted.
The plot for each sampler will be created in the order there were
passed in.
Returns
-------
fig : matplotlib.pyplot.figure
The figure handle of the plot.
"""
assert subspace.dim <= 3, "Can only scatter points in dimensions <= 3."
fig, ax, scatter_fn = _choose_scatter_function(subspace.dim)
ax.grid()
for sampler in samplers:
points = sampler.sample_points()[:, list(subspace.keys())]
numpy_points = points.as_tensor.detach().cpu().numpy()
labels = _create_labels(subspace)
scatter_fn(ax, numpy_points, labels)
return fig
def _create_labels(subspace):
labels = []
for var in subspace:
if subspace[var] == 1:
labels.append(var)
else:
for i in range(subspace[var]):
labels.append(var+f'_{i+1}')
return labels
def _choose_scatter_function(space_dim):
fig = plt.figure()
if space_dim == 1:
ax = fig.add_subplot()
return fig, ax, _scatter_1D
elif space_dim == 2:
ax = fig.add_subplot()
return fig, ax, _scatter_2D
else:
ax = fig.add_subplot(projection='3d')
return fig, ax, _scatter_3D
def _scatter_1D(ax, points, labels):
ax.scatter(points, np.zeros_like(points))
ax.set_xlabel(labels[0])
def _scatter_2D(ax, points, labels):
ax.scatter(points[:, 0], points[:, 1])
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
def _scatter_3D(ax, points, labels):
ax.scatter(points[:, 0], points[:, 1], points[:, 2])
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
ax.set_zlabel(labels[2]) | |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017 by University of Kassel and Fraunhofer Institute for Wind Energy and
# Energy System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed
# by a BSD-style license that can be found in the LICENSE file.
import numpy as np
import pytest
import pandapower as pp
import pandapower.shortcircuit as sc
from pandapower.shortcircuit.toolbox import detect_power_station_unit, calc_sc_on_line
def iec_60909_4():
net = pp.create_empty_network(sn_mva=34)
b1 = pp.create_bus(net, vn_kv=380.)
b2 = pp.create_bus(net, vn_kv=110.)
b3 = pp.create_bus(net, vn_kv=110.)
b4 = pp.create_bus(net, vn_kv=110.)
b5 = pp.create_bus(net, vn_kv=110.)
b6 = pp.create_bus(net, vn_kv=10.)
b7 = pp.create_bus(net, vn_kv=10.)
b8 = pp.create_bus(net, vn_kv=30.)
HG1 = pp.create_bus(net, vn_kv=20)
HG2 = pp.create_bus(net, vn_kv=10) # 10.5kV?
T_T5 = pp.create_bus(net, vn_kv=10)
T_T6 = pp.create_bus(net, vn_kv=10)
H = pp.create_bus(net, vn_kv=30.)
pp.create_ext_grid(net, b1, s_sc_max_mva=38 * 380 * np.sqrt(3), rx_max=0.1, x0x_max=3, r0x0_max=0.15)
pp.create_ext_grid(net, b5, s_sc_max_mva=16 * 110 * np.sqrt(3), rx_max=0.1, x0x_max=3.3, r0x0_max=0.2)
# t1 = pp.create_transformer_from_parameters(net, b4, HG1, sn_mva=150,
# pfe_kw=0, i0_percent=0,
# vn_hv_kv=115., vn_lv_kv=21, vk_percent=16, vkr_percent=0.5,
# pt_percent=12, oltc=True)
t1 = pp.create_transformer_from_parameters(net, b4, HG1, sn_mva=150,
pfe_kw=0, i0_percent=0,
vn_hv_kv=115., vn_lv_kv=21, vk_percent=16, vkr_percent=0.5,
pt_percent=12, oltc=True, vk0_percent=15.2,
vkr0_percent=0.5, xn_ohm=22, vector_group="YNd",
mag0_percent=100, mag0_rx=0, si0_hv_partial=0.5,
power_station_unit=True)
pp.create_gen(net, HG1, p_mw=0.85 * 150, vn_kv=21,
xdss_pu=0.14, rdss_ohm=0.002, cos_phi=0.85, sn_mva=150, pg_percent=0,
power_station_trafo=t1)
t2 = pp.create_transformer_from_parameters(net, b3, HG2, sn_mva=100,
pfe_kw=0, i0_percent=0, vn_hv_kv=120., vn_lv_kv=10.5, vk_percent=12,
vkr_percent=0.5,
oltc=False, vk0_percent=12, vkr0_percent=0.5, vector_group="Yd",
mag0_percent=100, mag0_rx=0, si0_hv_partial=0.5,
power_station_unit=True)
pp.create_gen(net, HG2, p_mw=0.9 * 100, vn_kv=10.5,
xdss_pu=0.16, rdss_ohm=0.005, cos_phi=0.9, sn_mva=100, pg_percent=7.5,
slack=True, power_station_trafo=t2)
# # Add gen 3
# pp.create_gen(net, b6, p_mw=0.9 * 100, vn_kv=10.5,
# xdss_pu=0.1, rdss_ohm=0.018, cos_phi=0.8, sn_mva=10, pg_percent=5)
# Add gen 3
pp.create_gen(net, b6, p_mw=0, vn_kv=10.5,
xdss_pu=0.1, rdss_ohm=0.018, cos_phi=0.8, sn_mva=10, pg_percent=0)
pp.create_transformer3w_from_parameters(net,
hv_bus=b1, mv_bus=b2, lv_bus=H,
vn_hv_kv=400, vn_mv_kv=120, vn_lv_kv=30,
sn_hv_mva=350, sn_mv_mva=350, sn_lv_mva=50,
pfe_kw=0, i0_percent=0,
vk_hv_percent=21, vkr_hv_percent=.26,
vk_mv_percent=7, vkr_mv_percent=.16,
vk_lv_percent=10., vkr_lv_percent=.16,
vk0_hv_percent=44.1, vkr0_hv_percent=0.26,
vk0_mv_percent=6.299627, vkr0_mv_percent=0.03714286,
vk0_lv_percent=6.299627, vkr0_lv_percent=0.03714286,
vector_group="YNyd",
tap_max=10, tap_min=-10, tap_pos=0, tap_neutral=0,
tap_side="hv", tap_step_percent=0.1) # vk0 = sqrt(vkr0^2 + vki0^2) = sqrt(vkr^2 + (2.1 * vki)^2) = sqrt(vkr^2 + (2.1)^2 * (vk^2 - vkr^2))
pp.create_transformer3w_from_parameters(net,
hv_bus=b1, mv_bus=b2, lv_bus=b8,
vn_hv_kv=400, vn_mv_kv=120, vn_lv_kv=30,
sn_hv_mva=350, sn_mv_mva=350, sn_lv_mva=50,
pfe_kw=0, i0_percent=0,
vk_hv_percent=21, vkr_hv_percent=.26,
vk_mv_percent=7, vkr_mv_percent=.16,
vk_lv_percent=10., vkr_lv_percent=.16,
vk0_hv_percent=44.1, vkr0_hv_percent=0.26,
vk0_mv_percent=6.299627, vkr0_mv_percent=0.03714286,
vk0_lv_percent=6.299627, vkr0_lv_percent=0.03714286,
vector_group="Yynd",
tap_max=10, tap_min=-10, tap_pos=0, tap_neutral=0,
tap_side="hv", tap_step_percent=0.1)
pp.create_transformer3w_from_parameters(net,
hv_bus=b5, mv_bus=b6, lv_bus=T_T5,
vn_hv_kv=115., vn_mv_kv=10.5, vn_lv_kv=10.5,
sn_hv_mva=31.5, sn_mv_mva=31.5, sn_lv_mva=31.5,
pfe_kw=0, i0_percent=0,
vk_hv_percent=12, vkr_hv_percent=.5,
vk_mv_percent=12, vkr_mv_percent=.5,
vk_lv_percent=12, vkr_lv_percent=.5,
vk0_hv_percent=12, vkr0_hv_percent=0.5,
vk0_mv_percent=12, vkr0_mv_percent=0.5,
vk0_lv_percent=12, vkr0_lv_percent=0.5,
vector_group="Yyd",
tap_max=10, tap_min=-10, tap_pos=0, tap_neutral=0,
tap_side="hv", tap_step_percent=0.1)
pp.create_transformer3w_from_parameters(net,
hv_bus=b5, mv_bus=b6, lv_bus=T_T6,
vn_hv_kv=115., vn_mv_kv=10.5, vn_lv_kv=10.5,
sn_hv_mva=31.5, sn_mv_mva=31.5, sn_lv_mva=31.5,
pfe_kw=0, i0_percent=0,
vk_hv_percent=12, vkr_hv_percent=.5,
vk_mv_percent=12, vkr_mv_percent=.5,
vk_lv_percent=12, vkr_lv_percent=.5,
vk0_hv_percent=12, vkr0_hv_percent=0.5,
vk0_mv_percent=12, vkr0_mv_percent=0.5,
vk0_lv_percent=12, vkr0_lv_percent=0.5,
vector_group="Yynd",
tap_max=10, tap_min=-10, tap_pos=0, tap_neutral=0,
tap_side="hv", tap_step_percent=0.1) # reactor is 100 Ohm
pp.create_motor(net, b7, pn_mech_mw=5.0, cos_phi=0.88, cos_phi_n=0.88,
efficiency_n_percent=97.5,
vn_kv=10, rx=0.1, lrc_pu=5)
for _ in range(2):
pp.create_motor(net, b7, pn_mech_mw=2.0, cos_phi=0.89, cos_phi_n=0.89,
efficiency_n_percent=96.8,
vn_kv=10, rx=0.1, lrc_pu=5.2)
pp.create_line_from_parameters(net, b2, b3, name="L1",
c_nf_per_km=0, max_i_ka=0, # FIXME: Optional for SC
length_km=20, r_ohm_per_km=0.12, x_ohm_per_km=0.39,
r0_ohm_per_km=0.32, x0_ohm_per_km=1.26, c0_nf_per_km=0, g0_us_per_km=0)
pp.create_line_from_parameters(net, b3, b4, name="L2",
c_nf_per_km=0, max_i_ka=0,
length_km=10, r_ohm_per_km=0.12, x_ohm_per_km=0.39,
r0_ohm_per_km=0.32, x0_ohm_per_km=1.26, c0_nf_per_km=0, g0_us_per_km=0)
pp.create_line_from_parameters(net, b2, b5, name="L3a",
c_nf_per_km=0, max_i_ka=0,
length_km=5, r_ohm_per_km=0.12, x_ohm_per_km=0.39,
r0_ohm_per_km=0.52, x0_ohm_per_km=1.86, c0_nf_per_km=0, g0_us_per_km=0)
pp.create_line_from_parameters(net, b2, b5, name="L3b",
c_nf_per_km=0, max_i_ka=0,
length_km=5, r_ohm_per_km=0.12, x_ohm_per_km=0.39,
r0_ohm_per_km=0.52, x0_ohm_per_km=1.86, c0_nf_per_km=0, g0_us_per_km=0)
pp.create_line_from_parameters(net, b5, b3, name="L4",
c_nf_per_km=0, max_i_ka=0,
length_km=10, r_ohm_per_km=0.096, x_ohm_per_km=0.388,
r0_ohm_per_km=0.22, x0_ohm_per_km=1.1, c0_nf_per_km=0, g0_us_per_km=0)
pp.create_line_from_parameters(net, b5, b4, name="L5",
c_nf_per_km=0, max_i_ka=0,
length_km=15, r_ohm_per_km=0.12, x_ohm_per_km=0.386,
r0_ohm_per_km=0.22, x0_ohm_per_km=1.1, c0_nf_per_km=0, g0_us_per_km=0)
pp.create_line_from_parameters(net, b6, b7, name="L6",
c_nf_per_km=0, max_i_ka=0,
length_km=1, r_ohm_per_km=0.082, x_ohm_per_km=0.086,
r0_ohm_per_km=0.082, x0_ohm_per_km=0.086, c0_nf_per_km=0, g0_us_per_km=0)
# bus F for 1ph fault: 1, 2, 3, 4
return net
def iec_60909_4_small(with_xward=False):
net = pp.create_empty_network(sn_mva=6)
b1 = pp.create_bus(net, vn_kv=380.)
b2 = pp.create_bus(net, vn_kv=110.)
b3 = pp.create_bus(net, vn_kv=110.)
b5 = pp.create_bus(net, vn_kv=110.)
b8 = pp.create_bus(net, vn_kv=30.)
H = pp.create_bus(net, vn_kv=30.)
HG2 = pp.create_bus(net, vn_kv=10)
pp.create_ext_grid(net, b1, s_sc_max_mva=38 * 380 * np.sqrt(3), rx_max=0.1, x0x_max=3, r0x0_max=0.15)
pp.create_ext_grid(net, b5, s_sc_max_mva=16 * 110 * np.sqrt(3), rx_max=0.1, x0x_max=3.3, r0x0_max=0.2)
t1 = pp.create_transformer_from_parameters(net, b3, HG2, sn_mva=100,
pfe_kw=0, i0_percent=0, vn_hv_kv=120., vn_lv_kv=10.5, vk_percent=12, vkr_percent=0.5,
vk0_percent=12, vkr0_percent=0.5, mag0_percent=100, mag0_rx=0, si0_hv_partial=0.5,
shift_degree=5, vector_group="Yd", power_station_unit=True)
pp.create_gen(net, HG2, p_mw=0.9 * 100, vn_kv=10.5,
xdss_pu=0.16, rdss_ohm=0.005, cos_phi=0.9, sn_mva=100, pg_percent=7.5,
slack=True, power_station_trafo=t1)
pp.create_transformer3w_from_parameters(net,
hv_bus=b1, mv_bus=b2, lv_bus=H,
vn_hv_kv=400, vn_mv_kv=120, vn_lv_kv=30,
sn_hv_mva=350, sn_mv_mva=350, sn_lv_mva=50,
pfe_kw=0, i0_percent=0, # FIXME: Optional for SC
vk_hv_percent=21, vkr_hv_percent=.26,
vk_mv_percent=7, vkr_mv_percent=.16,
vk_lv_percent=10., vkr_lv_percent=.16)
pp.create_transformer3w_from_parameters(net,
hv_bus=b1, mv_bus=b2, lv_bus=b8,
vn_hv_kv=400, vn_mv_kv=120, vn_lv_kv=30,
sn_hv_mva=350, sn_mv_mva=350, sn_lv_mva=50,
pfe_kw=0, i0_percent=0,
vk_hv_percent=21, vkr_hv_percent=.26,
vk_mv_percent=7, vkr_mv_percent=.16,
vk_lv_percent=10., vkr_lv_percent=.16)
pp.create_line_from_parameters(net, b2, b3, name="L1",
c_nf_per_km=0, max_i_ka=0, # FIXME: Optional for SC
length_km=20, r_ohm_per_km=0.12, x_ohm_per_km=0.39,
r0_ohm_per_km=0.32, x0_ohm_per_km=1.26, c0_nf_per_km=0, g0_us_per_km=0)
pp.create_line_from_parameters(net, b2, b5, name="L3a",
c_nf_per_km=0, max_i_ka=0,
length_km=5, r_ohm_per_km=0.12, x_ohm_per_km=0.39,
r0_ohm_per_km=0.52, x0_ohm_per_km=1.86, c0_nf_per_km=0, g0_us_per_km=0)
pp.create_line_from_parameters(net, b2, b5, name="L3b",
c_nf_per_km=0, max_i_ka=0,
length_km=5, r_ohm_per_km=0.12, x_ohm_per_km=0.39,
r0_ohm_per_km=0.52, x0_ohm_per_km=1.86, c0_nf_per_km=0, g0_us_per_km=0)
pp.create_line_from_parameters(net, b5, b3, name="L4",
c_nf_per_km=0, max_i_ka=0,
length_km=10, r_ohm_per_km=0.096, x_ohm_per_km=0.388,
r0_ohm_per_km=0.22, x0_ohm_per_km=1.1, c0_nf_per_km=0, g0_us_per_km=0)
if with_xward:
# impedance 10 Ohm and 20 Ohm is different than the 10 Ohm and 20 Ohm
# in PowerFactory in "Short-Circuit VDE/IEC". In order to get to the 10 Ohm and 20 Ohm,
# one must calculate the pz_mw and qz_mva so that the resulting
# shunt impedance ends up being 10 Ohm and 20 Ohm.
# how to calculate r and x in Ohm:
# z_ward_pu = 1/y_ward_pu
# vn_net = net.bus.loc[ward_buses, "vn_kv"].values
# z_base_ohm = (vn_net ** 2)# / base_sn_mva)
# z_ward_ohm = z_ward_pu * z_base_ohm
pp.create_xward(net, b5, 1, 0, 242, -484, 10, 20, 1)
return net
def iec_60909_4_small_gen_only():
net = pp.create_empty_network(sn_mva=56)
b3 = pp.create_bus(net, vn_kv=110.)
HG2 = pp.create_bus(net, vn_kv=10)
t1 = pp.create_transformer_from_parameters(net, b3, HG2, sn_mva=100,
pfe_kw=0, i0_percent=0, vn_hv_kv=120., vn_lv_kv=10.5, vk_percent=12, vkr_percent=0.5,
vk0_percent=12, vkr0_percent=0.5, mag0_percent=100, mag0_rx=0, si0_hv_partial=0.5, vector_group="Yd",
power_station_unit=True)
pp.create_gen(net, HG2, p_mw=0.9 * 100, vn_kv=10.5,
xdss_pu=0.16, rdss_ohm=0.005, cos_phi=0.9, sn_mva=100, pg_percent=7.5,
slack=True, power_station_trafo=t1)
return net
def iec_60909_4_2gen():
net = pp.create_empty_network(sn_mva=12)
b3 = pp.create_bus(net, vn_kv=110.)
b4 = pp.create_bus(net, vn_kv=110.)
HG1 = pp.create_bus(net, vn_kv=20.)
HG2 = pp.create_bus(net, vn_kv=10.)
t1 = pp.create_transformer_from_parameters(net, b4, HG1, sn_mva=150,
pfe_kw=0, i0_percent=0,
vn_hv_kv=115., vn_lv_kv=21, vk_percent=16, vkr_percent=0.5,
pt_percent=12, oltc=True, power_station_unit=True)
pp.create_gen(net, HG1, p_mw=0.85 * 150, vn_kv=21,
xdss_pu=0.14, rdss_ohm=0.002, cos_phi=0.85, sn_mva=150, pg_percent=0,
power_station_trafo=t1)
t2 = pp.create_transformer_from_parameters(net, b3, HG2, sn_mva=100,
pfe_kw=0, i0_percent=0, vn_hv_kv=120., vn_lv_kv=10.5, vk_percent=12, vkr_percent=0.5, oltc=False, power_station_unit=True)
pp.create_gen(net, HG2, p_mw=0.9 * 100, vn_kv=10.5,
xdss_pu=0.16, rdss_ohm=0.005, cos_phi=0.9, sn_mva=100, pg_percent=7.5,
slack=True, power_station_trafo=t2)
pp.create_line_from_parameters(net, b3, b4, name="L2",
c_nf_per_km=0, max_i_ka=0,
length_km=10, r_ohm_per_km=0.12, x_ohm_per_km=0.39)
return net
def vde_232():
net = pp.create_empty_network(sn_mva=13)
# hv buses
pp.create_bus(net, 110)
pp.create_bus(net, 21)
pp.create_ext_grid(net, 0, s_sc_max_mva=13.61213 * 110 * np.sqrt(3), rx_max=0.20328,
x0x_max=3.47927, r0x0_max=3.03361)
pp.create_transformer_from_parameters(net, 0, 1, 150, 115, 21, 0.5, 16,
pfe_kw=0, i0_percent=0, tap_step_percent=1,
tap_max=12, tap_min=-12, tap_neutral=0, tap_side='hv',
vector_group="YNd",
vk0_percent=np.sqrt(np.square(0.95*15.99219) + np.square(0.5)),
vkr0_percent=0.5,
mag0_percent=100, mag0_rx=0,
si0_hv_partial=0.9,
pt_percent=12, oltc=True)
# todo: implement Zn (reactance grounding) -> Z_(0)S = Z_(0)THV*K_S + 3*Z_N
pp.create_gen(net, 1, 150, 1, 150, vn_kv=21, xdss_pu=0.14, rdss_ohm=0.002, cos_phi=0.85, power_station_trafo=0)
return net
def test_iec_60909_4_3ph_small_without_gen():
net = iec_60909_4_small()
# Deactivate all gens
net.gen = net.gen.iloc[0:0, :]
sc.calc_sc(net, fault="3ph", case="max", ip=True, tk_s=0.1, kappa_method="C")
ikss_pf = [40.3390, 28.4130, 14.2095, 28.7195, 13.4191]
ip_pf = [99.7374, 72.6580, 32.1954, 72.1443, 36.5036]
assert np.allclose(net.res_bus_sc.ikss_ka.values[:5], np.array(ikss_pf), atol=1e-3)
assert np.allclose(net.res_bus_sc.ip_ka.values[:5], np.array(ip_pf), atol=1e-3)
def test_iec_60909_4_3ph_small_with_gen():
net = iec_60909_4_small()
sc.calc_sc(net, fault="3ph", case="max", ip=True, tk_s=0.1, kappa_method="C")
ikss_pf = [40.4754, 29.8334, 16.1684, 30.3573]
ip_pf = [100.1164, 76.1134, 37.3576, 76.2689]
ib_pf = [40.4754, 29.7337, 15.9593, 30.2245]
kappa_pf = [1.7490, 1.8040, 1.6338 , 1.7765]
assert np.allclose(net.res_bus_sc.ikss_ka.values[:4], np.array(ikss_pf), atol=1e-3)
assert np.allclose(net.res_bus_sc.ip_ka.values[:4], np.array(ip_pf), atol=1e-3)
def test_iec_60909_4_3ph_small_with_gen_xward():
net = iec_60909_4_small(with_xward=True)
sc.calc_sc(net, fault="3ph", case="max", ip=True, tk_s=0.1, kappa_method="C")
ikss_pf = [40.6422, 31.6394, 16.7409, 33.2808]
assert np.allclose(net.res_bus_sc.ikss_ka.values[:4], np.array(ikss_pf), atol=1e-3)
def test_iec_60909_4_3ph_small_gen_only():
net = iec_60909_4_small_gen_only()
sc.calc_sc(net, fault="3ph", case="max", ip=True, ith=True, tk_s=0.1, kappa_method="C")
ikss_pf = [1.9755, 39.5042]
ip_pf = [5.2316, 104.1085]
ib_pf = [1.6071, 27.3470]
kappa = [1.8726, 1.8635]
assert np.allclose(net.res_bus_sc.ikss_ka[:2].values, np.array(ikss_pf), atol=1e-3)
assert np.allclose(net.res_bus_sc.ip_ka[:2].values, np.array(ip_pf), atol=1e-3)
def test_iec_60909_4_3ph_2gen():
net = iec_60909_4_2gen()
sc.calc_sc(net, fault="3ph", case="max", ip=True, tk_s=0.1, kappa_method="C")
ikss_pf = [4.2821, 4.4280, 39.1090, 57.8129]
ip_pf = [11.1157, 11.6306, 102.7821, 151.5569]
ib_pf = [3.6605, 3.7571, 28.3801, 45.3742]
assert np.allclose(net.res_bus_sc.ikss_ka[:4].values, np.array(ikss_pf), atol=1e-3)
# TODO: Check this
assert np.allclose(net.res_bus_sc.ip_ka[:4].values, np.array(ip_pf), atol=1e-1)
def test_iec_60909_4_3ph_2gen_no_ps_detection():
net = iec_60909_4_2gen()
net.gen.power_station_trafo = np.nan
net.trafo.power_station_unit = False
net.gen.at[0, "in_service"] = False
net.gen = net.gen.query("in_service")
sc.calc_sc(net, fault="3ph", case="max", ip=True, tk_s=0.1, kappa_method="C")
ikss_pf = [1.8460, 1.6715, 6.8953, 39.5042]
assert np.allclose(net.res_bus_sc.ikss_ka[:4].values, np.array(ikss_pf), atol=1e-3)
def test_iec_60909_4_3ph_without_motor():
# Generator connected to normal bus does not need voltage correction
net = iec_60909_4()
net.motor = net.motor.iloc[0:0, :]
sc.calc_sc(net, fault="3ph", case="max", ip=True, tk_s=0.1, kappa_method="C")
ikss_pf = [40.6347, 31.6635, 19.6231, 16.1956, 32.9971, 34.3559, 22.2762, 13.5726]
ip_pf = [100.5427, 80.3509, 45.7157, 36.7855, 82.9406, 90.6143, 43.3826, 36.9103]
assert np.allclose(net.res_bus_sc.ikss_ka.values[:8], np.array(ikss_pf), atol=1e-3)
assert np.allclose(net.res_bus_sc.ip_ka[:8].values, np.array(ip_pf), atol=1e-3)
def test_iec_60909_4_3ph():
net = iec_60909_4()
sc.calc_sc(net, fault="3ph", case="max", ip=True, tk_s=0.1, kappa_method="C")
ikss = [40.6447, 31.7831, 19.6730, 16.2277, 33.1894,
37.5629, 25.5895, 13.5778, 52.4438, 80.5720]
# Ip for kappa B
ip_pf = [100.5766, 80.8249, 45.8249, 36.8041, 83.6266,
99.1910, 51.3864, 36.9201, 136.2801, 210.3159]
ip_standard_kappa_c = [100.5677, 80.6079, 45.8111, 36.8427,
83.4033, 98.1434, 51.6899, 36.9227]
ib = [40.645, 31.570, 19.388, 16.017, 32.795, 34.028,
23.212, 13.578, 42.3867, 68.4172]
skss = [26751.51, 6055.49, 3748.20, 3091.78, 6323.43,
650.61, 443.22, 705.52, 1816.71, 1395.55]
assert np.allclose(net.res_bus_sc.ikss_ka.values[:10], np.array(ikss), atol=1e-3)
assert np.allclose(net.res_bus_sc.ip_ka.values[:8], np.array(ip_standard_kappa_c ), atol=1e-3)
assert np.allclose(net.res_bus_sc.skss_mw.values[:10], np.array(skss), atol=1e-2)
def test_iec_60909_4_3ph_min():
net = iec_60909_4()
net.line["endtemp_degree"] = 80.0
net.ext_grid["s_sc_min_mva"] = net.ext_grid["s_sc_max_mva"]/10
net.ext_grid["rx_min"] = net.ext_grid["rx_max"]
sc.calc_sc(net, fault="3ph", case="min", ip=True, tk_s=0.1, kappa_method="C")
ikss_min = [5.0501, 12.2915, 10.3292, 9.4708, 11.8604,
28.3052, 18.6148, 10.9005, 44.5098, 67.9578]
assert np.allclose(net.res_bus_sc.ikss_ka.values[:10], np.array(ikss_min), atol=1e-3)
def test_iec_60909_4_3ph_ps_trafo_flag():
net = iec_60909_4()
net.trafo["power_station_unit"] = False
ps_trafo = net.gen.power_station_trafo.values
ps_trafo = ps_trafo[~np.isnan(ps_trafo)].astype(int)
net.trafo.loc[ps_trafo, "power_station_unit"] = True
net.gen.power_station_trafo.values[:] = np.nan
detect_power_station_unit(net, mode="trafo")
sc.calc_sc(net, fault="3ph", case="max", ip=True, tk_s=0.1, kappa_method="C")
ikss = [40.6447, 31.7831, 19.6730, 16.2277, 33.1894,
37.5629, 25.5895, 13.5778, 52.4438, 80.5720]
assert np.allclose(net.res_bus_sc.ikss_ka.values[:10], np.array(ikss), atol=1e-3)
def test_iec_60909_4_2ph():
net = iec_60909_4()
sc.calc_sc(net, fault="2ph", case="max", ip=True, tk_s=0.1, kappa_method="C")
ikss = [35.1994, 27.5249, 17.0373, 14.0536, 28.7429,
32.5304, 22.1611, 11.7586, 45.4177, 69.7774]
ip = [87.0941, 69.8085, 39.6736, 31.9067, 72.2294,
84.9946, 44.7648, 31.9760, 118.0221, 182.1389]
# No ib for 2ph sc calculation
skss = [7722.50, 1748.07, 1082.01, 892.52, 1825.42,
187.81, 127.95, 203.67, 524.44, 402.86]
assert np.allclose(net.res_bus_sc.ikss_ka.values[:10], np.array(ikss), atol=1e-3)
assert np.allclose(net.res_bus_sc.ip_ka.values[:10], np.array(ip), atol=1e-3)
assert np.allclose(net.res_bus_sc.skss_mw.values[:10], np.array(skss), atol=1e-1)
@pytest.mark.skip("1ph gen-close sc calculation still under develop")
def test_iec_60909_4_1ph():
net = iec_60909_4()
sc.calc_sc(net, fault="1ph", case="max", ip=True, tk_s=0.1, kappa_method="C")
ikss = [24.6526, 15.9722, 10.4106, 9.0498, 17.0452,
0.06337, 0.0633, 0, 0.0001, 0.0001]
ip = [60.9982, 40.5086, 24.2424, 20.5464, 42.8337,
0.1656, 0.1279, 0.0, 0.00025, 0.00033]
# No ib for 1ph sc calculation
assert np.allclose(net.res_bus_sc.ikss_ka.values[:10], np.array(ikss), atol=1e-4)
# assert np.allclose(net.res_bus_sc.ip.values[:8], np.array(ip), rtol=1e-4)
def test_detect_power_station_units():
net = iec_60909_4()
net.gen.power_station_trafo[:] = None
detect_power_station_unit(net)
assert np.all(net.gen.power_station_trafo.values[[0,1]] == np.array([0,1]))
net.gen.power_station_trafo[:] = None
detect_power_station_unit(net, mode="trafo")
assert np.all(net.gen.power_station_trafo.values[[0,1]] == np.array([0,1]))
def test_sc_on_line():
net = iec_60909_4()
calc_sc_on_line(net, 2, 0.3)
def test_vde_232():
net = vde_232()
sc.calc_sc(net, fault="3ph", case="max", ip=True, tk_s=0.1, kappa_method="C")
if __name__ == '__main__':
pytest.main(["test_iec60909_4.py"]) | |
from sklearn.ensemble import RandomForestRegressor
import time
from sklearn.base import BaseEstimator
from typing import Optional, Dict, Union, Tuple
import pandas as pd
import numpy as np
from sklearn.linear_model import RidgeCV
def train_ridge_lr_model(
xtrain: Union[np.ndarray, pd.DataFrame],
ytrain: Union[np.ndarray, pd.DataFrame],
verbose: int = 0,
n_jobs: int = 1,
) -> BaseEstimator:
# Initialize GLM
lr_model = RidgeCV()
# train GLM
t0 = time.time()
lr_model.fit(xtrain, ytrain)
t1 = time.time() - t0
if verbose > 0:
print(f"Training time: {t1:.3f} secs.")
return lr_model
def train_rf_model(
xtrain: Union[np.ndarray, pd.DataFrame],
ytrain: Union[np.ndarray, pd.DataFrame],
params: Optional[Dict] = None,
) -> BaseEstimator:
"""Train a basic Random Forest (RF) Regressor
Parameters
----------
xtrain : np.ndarray, pd.DataFrame
(n_samples x d_features)
input training data
ytrain : np.ndarray, pd.DataFrame
(n_samples x p_outputs)
labeled training data
verbose : int, default=0
option to print out training messages
Returns
-------
rf_model : BaseEstimator
the trained model
"""
if params is None:
params = {
"n_estimators": 100,
"criterion": "mse",
"n_jobs": -1,
"random_state": 123,
"warm_start": False,
"verbose": 0,
}
# initialize baseline RF model
rf_model = RandomForestRegressor(**params)
# train RF model
t0 = time.time()
rf_model.fit(xtrain, ytrain)
t1 = time.time() - t0
if params["verbose"] > 0:
print(f"Training time: {t1:.3f} secs.")
return rf_model | |
import unittest
import numpy as np
import numpy.testing as npt
import wisdem.drivetrainse.layout as lay
npts = 12
ct = np.cos(np.deg2rad(5))
st = np.sin(np.deg2rad(5))
class TestDirectLayout(unittest.TestCase):
def setUp(self):
self.inputs = {}
self.outputs = {}
self.discrete_inputs = {}
self.discrete_outputs = {}
self.inputs["L_12"] = 2.0
self.inputs["L_h1"] = 1.0
self.inputs["L_generator"] = 3.25
# self.inputs['L_2n'] = 1.5
# self.inputs['L_grs'] = 1.1
# self.inputs['L_gsn'] = 1.1
self.inputs["overhang"] = 6.25 + 0.5 * 6.5 + 2
self.inputs["drive_height"] = 4.875
self.inputs["tilt"] = 5.0
self.inputs["access_diameter"] = 0.9
myones = np.ones(2)
self.inputs["lss_diameter"] = 2.3 * myones
self.inputs["nose_diameter"] = 1.33 * myones
self.inputs["lss_wall_thickness"] = 0.05 * myones
self.inputs["nose_wall_thickness"] = 0.04 * myones
self.inputs["bedplate_wall_thickness"] = 0.06 * np.ones(4)
self.inputs["D_top"] = 6.5
self.inputs["hub_diameter"] = 4.0
self.inputs["lss_rho"] = self.inputs["bedplate_rho"] = 7850.0
self.discrete_inputs["upwind"] = True
def testBedplateLengthHeight(self):
self.inputs["tilt"] = 0.0
myobj = lay.DirectLayout()
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
self.assertAlmostEqual(self.outputs["L_nose"], 3.5)
self.assertAlmostEqual(self.outputs["L_lss"], 3.0)
self.assertAlmostEqual(self.outputs["L_drive"], 4.5)
self.assertAlmostEqual(self.outputs["L_bedplate"], 5.0)
self.assertAlmostEqual(self.outputs["H_bedplate"], 4.875)
self.assertAlmostEqual(self.outputs["constr_length"], 5 - 0.5 * 6.5)
self.assertAlmostEqual(self.outputs["constr_height"], 4.875)
self.inputs["overhang"] = 2.0 + 0.5 * 6.5 + 2
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
self.assertAlmostEqual(self.outputs["L_nose"], 3.5)
self.assertAlmostEqual(self.outputs["L_lss"], 3.0)
self.assertAlmostEqual(self.outputs["L_drive"], 4.5)
self.assertAlmostEqual(self.outputs["L_bedplate"], 0.5 * 6.5)
self.assertAlmostEqual(self.outputs["H_bedplate"], 4.875)
self.assertAlmostEqual(self.outputs["constr_length"], -2.5)
self.assertAlmostEqual(self.outputs["constr_height"], 4.875)
def testNoTiltUpwind(self):
self.inputs["tilt"] = 0.0
myobj = lay.DirectLayout()
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
self.assertAlmostEqual(self.outputs["L_nose"], 3.5)
self.assertAlmostEqual(self.outputs["L_lss"], 3.0)
self.assertAlmostEqual(self.outputs["L_drive"], 4.5)
self.assertAlmostEqual(self.outputs["L_bedplate"], self.inputs["overhang"] - self.outputs["L_drive"] - 2)
self.assertAlmostEqual(self.outputs["H_bedplate"], self.inputs["drive_height"])
self.assertAlmostEqual(self.outputs["D_bearing1"], 2.3 - 0.05 - 1.33)
self.assertAlmostEqual(self.outputs["D_bearing2"], 2.3 - 0.05 - 1.33)
npt.assert_equal(self.outputs["constr_access"][:, -1], 1.33 - 0.08 - 0.9)
npt.assert_equal(self.outputs["constr_access"][:, 0], 2.3 - 0.1 - 1.33 - 0.25 * 0.9)
self.assertAlmostEqual(self.outputs["constr_length"], 5 - 0.5 * 6.5)
self.assertAlmostEqual(self.outputs["constr_height"], self.outputs["H_bedplate"])
self.assertAlmostEqual(self.outputs["s_rotor"], 2 + 1.5 + 0.5)
self.assertAlmostEqual(self.outputs["s_stator"], 0.75)
self.assertAlmostEqual(self.outputs["s_mb1"], 1.5 + 2.0)
self.assertAlmostEqual(self.outputs["s_mb2"], 1.5)
self.assertAlmostEqual(self.outputs["x_bedplate"][-1], -5.0)
self.assertAlmostEqual(self.outputs["x_bedplate_inner"][-1], -5.0)
self.assertAlmostEqual(self.outputs["x_bedplate_outer"][-1], -5.0)
self.assertAlmostEqual(self.outputs["x_bedplate"][0], 0.0)
self.assertAlmostEqual(self.outputs["x_bedplate_inner"][0], -0.5 * 6.5)
self.assertAlmostEqual(self.outputs["x_bedplate_outer"][0], 0.5 * 6.5)
self.assertAlmostEqual(self.outputs["D_bedplate"][0], 6.5)
self.assertAlmostEqual(self.outputs["z_bedplate"][0], 0.0)
self.assertAlmostEqual(self.outputs["z_bedplate_inner"][0], 0.0)
self.assertAlmostEqual(self.outputs["z_bedplate_outer"][0], 0.0)
self.assertAlmostEqual(self.outputs["z_bedplate"][-1], 4.875)
self.assertAlmostEqual(self.outputs["z_bedplate_inner"][-1], 4.875 - 0.5 * 1.33)
self.assertAlmostEqual(self.outputs["z_bedplate_outer"][-1], 4.875 + 0.5 * 1.33)
self.assertAlmostEqual(self.outputs["D_bedplate"][-1], 1.33)
def testTiltUpwind(self):
self.inputs["tilt"] = 5.0
self.inputs["overhang"] = 5 + (2 + 4.5) * ct
self.inputs["drive_height"] = 4.875 + (2 + 4.5) * st
myobj = lay.DirectLayout()
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
self.assertAlmostEqual(self.outputs["L_nose"], 3.5)
self.assertAlmostEqual(self.outputs["L_lss"], 3.0)
self.assertAlmostEqual(self.outputs["L_drive"], 4.5)
self.assertAlmostEqual(self.outputs["L_bedplate"], self.inputs["overhang"] - (2 + self.outputs["L_drive"]) * ct)
self.assertAlmostEqual(
self.outputs["H_bedplate"], self.inputs["drive_height"] - (2 + self.outputs["L_drive"]) * st
)
self.assertAlmostEqual(self.outputs["D_bearing1"], 2.3 - 0.05 - 1.33)
self.assertAlmostEqual(self.outputs["D_bearing2"], 2.3 - 0.05 - 1.33)
npt.assert_equal(self.outputs["constr_access"][:, -1], 1.33 - 0.08 - 0.9)
npt.assert_equal(self.outputs["constr_access"][:, 0], 2.3 - 0.1 - 1.33 - 0.25 * 0.9)
self.assertAlmostEqual(
self.outputs["constr_length"],
self.inputs["overhang"] - (2 + self.outputs["L_drive"]) * ct - 0.5 * self.inputs["D_top"],
)
self.assertAlmostEqual(self.outputs["constr_height"], self.outputs["H_bedplate"])
self.assertAlmostEqual(self.outputs["s_rotor"], 2 + 1.5 + 0.5)
self.assertAlmostEqual(self.outputs["s_stator"], 0.75)
self.assertAlmostEqual(self.outputs["s_mb1"], 1.5 + 2.0)
self.assertAlmostEqual(self.outputs["s_mb2"], 1.5)
self.assertAlmostEqual(self.outputs["x_bedplate"][-1], -5.0)
self.assertAlmostEqual(self.outputs["x_bedplate_inner"][-1], -5.0)
self.assertAlmostEqual(self.outputs["x_bedplate_outer"][-1], -5.0)
self.assertAlmostEqual(self.outputs["x_bedplate"][0], 0.0)
self.assertAlmostEqual(self.outputs["x_bedplate_inner"][0], -0.5 * 6.5)
self.assertAlmostEqual(self.outputs["x_bedplate_outer"][0], 0.5 * 6.5)
self.assertAlmostEqual(self.outputs["D_bedplate"][0], 6.5)
self.assertAlmostEqual(self.outputs["z_bedplate"][0], 0.0)
self.assertAlmostEqual(self.outputs["z_bedplate_inner"][0], 0.0)
self.assertAlmostEqual(self.outputs["z_bedplate_outer"][0], 0.0)
self.assertAlmostEqual(self.outputs["z_bedplate"][-1], 4.875)
self.assertAlmostEqual(self.outputs["z_bedplate_inner"][-1], 4.875 - 0.5 * 1.33)
self.assertAlmostEqual(self.outputs["z_bedplate_outer"][-1], 4.875 + 0.5 * 1.33)
self.assertAlmostEqual(self.outputs["D_bedplate"][-1], 1.33)
def testNoTiltDownwind(self):
self.discrete_inputs["upwind"] = False
self.inputs["tilt"] = 0.0
myobj = lay.DirectLayout()
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
self.assertAlmostEqual(self.outputs["L_nose"], 3.5)
self.assertAlmostEqual(self.outputs["L_lss"], 3.0)
self.assertAlmostEqual(self.outputs["L_drive"], 4.5)
self.assertAlmostEqual(self.outputs["L_bedplate"], self.inputs["overhang"] - self.outputs["L_drive"] - 2)
self.assertAlmostEqual(self.outputs["H_bedplate"], self.inputs["drive_height"])
self.assertAlmostEqual(self.outputs["D_bearing1"], 2.3 - 0.05 - 1.33)
self.assertAlmostEqual(self.outputs["D_bearing2"], 2.3 - 0.05 - 1.33)
npt.assert_equal(self.outputs["constr_access"][:, -1], 1.33 - 0.08 - 0.9)
npt.assert_equal(self.outputs["constr_access"][:, 0], 2.3 - 0.1 - 1.33 - 0.25 * 0.9)
self.assertAlmostEqual(self.outputs["constr_length"], 5 - 0.5 * 6.5)
self.assertAlmostEqual(self.outputs["constr_height"], self.outputs["H_bedplate"])
self.assertAlmostEqual(self.outputs["s_rotor"], 2 + 1.5 + 0.5)
self.assertAlmostEqual(self.outputs["s_stator"], 0.75)
self.assertAlmostEqual(self.outputs["s_mb1"], 1.5 + 2.0)
self.assertAlmostEqual(self.outputs["s_mb2"], 1.5)
self.assertAlmostEqual(self.outputs["x_bedplate"][-1], 5.0)
self.assertAlmostEqual(self.outputs["x_bedplate_inner"][-1], 5.0)
self.assertAlmostEqual(self.outputs["x_bedplate_outer"][-1], 5.0)
self.assertAlmostEqual(self.outputs["x_bedplate"][0], 0.0)
self.assertAlmostEqual(self.outputs["x_bedplate_inner"][0], 0.5 * 6.5)
self.assertAlmostEqual(self.outputs["x_bedplate_outer"][0], -0.5 * 6.5)
self.assertAlmostEqual(self.outputs["D_bedplate"][0], 6.5)
self.assertAlmostEqual(self.outputs["z_bedplate"][0], 0.0)
self.assertAlmostEqual(self.outputs["z_bedplate_inner"][0], 0.0)
self.assertAlmostEqual(self.outputs["z_bedplate_outer"][0], 0.0)
self.assertAlmostEqual(self.outputs["z_bedplate"][-1], 4.875)
self.assertAlmostEqual(self.outputs["z_bedplate_inner"][-1], 4.875 - 0.5 * 1.33)
self.assertAlmostEqual(self.outputs["z_bedplate_outer"][-1], 4.875 + 0.5 * 1.33)
self.assertAlmostEqual(self.outputs["D_bedplate"][-1], 1.33)
def testTiltDownwind(self):
self.discrete_inputs["upwind"] = False
self.inputs["tilt"] = 5.0
self.inputs["overhang"] = 5 + (2 + 4.5) * ct
self.inputs["drive_height"] = 4.875 + (2 + 4.5) * st
myobj = lay.DirectLayout()
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
self.assertAlmostEqual(self.outputs["L_nose"], 3.5)
self.assertAlmostEqual(self.outputs["L_lss"], 3.0)
self.assertAlmostEqual(self.outputs["L_drive"], 4.5)
self.assertAlmostEqual(self.outputs["L_bedplate"], self.inputs["overhang"] - (2 + self.outputs["L_drive"]) * ct)
self.assertAlmostEqual(
self.outputs["H_bedplate"], self.inputs["drive_height"] - (2 + self.outputs["L_drive"]) * st
)
self.assertAlmostEqual(self.outputs["D_bearing1"], 2.3 - 0.05 - 1.33)
self.assertAlmostEqual(self.outputs["D_bearing2"], 2.3 - 0.05 - 1.33)
npt.assert_equal(self.outputs["constr_access"][:, -1], 1.33 - 0.08 - 0.9)
npt.assert_equal(self.outputs["constr_access"][:, 0], 2.3 - 0.1 - 1.33 - 0.25 * 0.9)
self.assertAlmostEqual(
self.outputs["constr_length"],
self.inputs["overhang"] - (2 + self.outputs["L_drive"]) * ct - 0.5 * self.inputs["D_top"],
)
self.assertAlmostEqual(self.outputs["constr_height"], self.outputs["H_bedplate"])
self.assertAlmostEqual(self.outputs["s_rotor"], 2 + 1.5 + 0.5)
self.assertAlmostEqual(self.outputs["s_stator"], 0.75)
self.assertAlmostEqual(self.outputs["s_mb1"], 1.5 + 2.0)
self.assertAlmostEqual(self.outputs["s_mb2"], 1.5)
self.assertAlmostEqual(self.outputs["x_bedplate"][-1], 5.0)
self.assertAlmostEqual(self.outputs["x_bedplate_inner"][-1], 5.0)
self.assertAlmostEqual(self.outputs["x_bedplate_outer"][-1], 5.0)
self.assertAlmostEqual(self.outputs["x_bedplate"][0], 0.0)
self.assertAlmostEqual(self.outputs["x_bedplate_inner"][0], 0.5 * 6.5)
self.assertAlmostEqual(self.outputs["x_bedplate_outer"][0], -0.5 * 6.5)
self.assertAlmostEqual(self.outputs["D_bedplate"][0], 6.5)
self.assertAlmostEqual(self.outputs["z_bedplate"][0], 0.0)
self.assertAlmostEqual(self.outputs["z_bedplate_inner"][0], 0.0)
self.assertAlmostEqual(self.outputs["z_bedplate_outer"][0], 0.0)
self.assertAlmostEqual(self.outputs["z_bedplate"][-1], 4.875)
self.assertAlmostEqual(self.outputs["z_bedplate_inner"][-1], 4.875 - 0.5 * 1.33)
self.assertAlmostEqual(self.outputs["z_bedplate_outer"][-1], 4.875 + 0.5 * 1.33)
self.assertAlmostEqual(self.outputs["D_bedplate"][-1], 1.33)
def testMassValues(self):
self.discrete_inputs["upwind"] = True
self.inputs["tilt"] = 0.0
self.inputs["drive_height"] = 5.0
self.inputs["D_top"] = 3.0
self.inputs["overhang"] = 4.5 + 3.5 + 0.5 * 3.0 + 2
myones = np.ones(5)
self.inputs["lss_diameter"] = 2.0 * myones
self.inputs["nose_diameter"] = 3.0 * myones
self.inputs["lss_wall_thickness"] = 0.05 * myones
self.inputs["nose_wall_thickness"] = 0.05 * myones
self.inputs["bedplate_wall_thickness"] = 0.05 * np.ones(npts)
myobj = lay.DirectLayout()
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
rho = self.inputs["lss_rho"]
m_bedplate = 5 * 0.5 * np.pi * np.pi * (1.5 ** 2 - (1.5 - 0.05) ** 2) * rho
self.assertAlmostEqual(self.outputs["bedplate_mass"], m_bedplate)
self.assertAlmostEqual(self.outputs["bedplate_cm"][0], np.mean(self.outputs["x_bedplate"]), 0)
self.assertAlmostEqual(self.outputs["bedplate_cm"][1], 0.0)
self.assertAlmostEqual(self.outputs["bedplate_cm"][2], np.mean(self.outputs["z_bedplate"]), 0)
m_lss = rho * np.pi * (1 ** 2 - 0.95 ** 2) * self.outputs["L_lss"]
self.assertAlmostEqual(self.outputs["lss_mass"], m_lss)
self.assertAlmostEqual(self.outputs["lss_cm"], 0.5 * (self.outputs["s_lss"][0] + self.outputs["s_lss"][-1]))
self.assertAlmostEqual(self.outputs["lss_I"][0], 0.5 * m_lss * (1 ** 2 + 0.95 ** 2))
self.assertAlmostEqual(
self.outputs["lss_I"][1], (1 / 12) * m_lss * (3 * (1 ** 2 + 0.95 ** 2) + self.outputs["L_lss"] ** 2)
)
m_nose = rho * np.pi * (1.5 ** 2 - 1.45 ** 2) * self.outputs["L_nose"]
self.assertAlmostEqual(self.outputs["nose_mass"], m_nose)
self.assertAlmostEqual(self.outputs["nose_cm"], 0.5 * (self.outputs["s_nose"][0] + self.outputs["s_nose"][-1]))
self.assertAlmostEqual(self.outputs["nose_I"][0], 0.5 * m_nose * (1.5 ** 2 + 1.45 ** 2))
self.assertAlmostEqual(
self.outputs["nose_I"][1], (1 / 12) * m_nose * (3 * (1.5 ** 2 + 1.45 ** 2) + self.outputs["L_nose"] ** 2)
)
class TestGearedLayout(unittest.TestCase):
def setUp(self):
self.inputs = {}
self.outputs = {}
self.discrete_inputs = {}
self.discrete_outputs = {}
self.inputs["L_12"] = 2.0
self.inputs["L_h1"] = 1.0
self.inputs["overhang"] = 2.0 + 2.0
self.inputs["drive_height"] = 4.875
self.inputs["L_hss"] = 1.5
self.inputs["L_generator"] = 1.25
self.inputs["L_gearbox"] = 1.1
self.inputs["tilt"] = 5.0
myones = np.ones(2)
self.inputs["lss_diameter"] = 2.3 * myones
self.inputs["lss_wall_thickness"] = 0.05 * myones
self.inputs["hss_diameter"] = 2.0 * myones
self.inputs["hss_wall_thickness"] = 0.05 * myones
self.inputs["bedplate_flange_width"] = 1.5
self.inputs["bedplate_flange_thickness"] = 0.05
# self.inputs['bedplate_web_height'] = 1.0
self.inputs["bedplate_web_thickness"] = 0.05
self.inputs["D_top"] = 6.5
self.inputs["hub_diameter"] = 4.0
self.inputs["lss_rho"] = self.inputs["hss_rho"] = self.inputs["bedplate_rho"] = 7850.0
self.discrete_inputs["upwind"] = True
def testNoTilt(self):
self.inputs["tilt"] = 0.0
myobj = lay.GearedLayout()
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
ds = 6.95 - 2
self.assertAlmostEqual(self.outputs["L_lss"], 3.1)
self.assertAlmostEqual(self.outputs["L_drive"], 6.95)
npt.assert_almost_equal(
self.outputs["s_drive"],
np.array([0.0, 0.625, 1.25, 2.0, 2.75, 3.3, 3.85, 3.95, 4.95, 5.95, 6.45, 6.95]) - ds,
)
self.assertAlmostEqual(self.outputs["s_generator"], 0.625 - ds)
self.assertAlmostEqual(self.outputs["s_gearbox"], 3.3 - ds)
self.assertAlmostEqual(self.outputs["s_mb1"], 5.95 - ds)
self.assertAlmostEqual(self.outputs["s_mb2"], 3.95 - ds)
self.assertAlmostEqual(self.outputs["L_bedplate"], 6.95)
self.assertAlmostEqual(self.outputs["H_bedplate"], 4.875)
self.assertAlmostEqual(self.outputs["bedplate_web_height"], 4.725)
self.assertAlmostEqual(self.outputs["constr_length"], 6.95 - 2 - 0.5 * 6.5)
self.assertAlmostEqual(self.outputs["constr_height"], 4.875)
def testTilt(self):
myobj = lay.GearedLayout()
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
ds = 6.95 + 2 - 4 / ct
self.assertAlmostEqual(self.outputs["L_lss"], 3.1)
self.assertAlmostEqual(self.outputs["L_drive"], 6.95)
npt.assert_almost_equal(
self.outputs["s_drive"],
np.array([0.0, 0.625, 1.25, 2.0, 2.75, 3.3, 3.85, 3.95, 4.95, 5.95, 6.45, 6.95]) - ds,
)
self.assertAlmostEqual(self.outputs["s_generator"], 0.625 - ds)
self.assertAlmostEqual(self.outputs["s_gearbox"], 3.3 - ds)
self.assertAlmostEqual(self.outputs["s_mb1"], 5.95 - ds)
self.assertAlmostEqual(self.outputs["s_mb2"], 3.95 - ds)
self.assertAlmostEqual(self.outputs["L_bedplate"], 6.95 * ct)
self.assertAlmostEqual(self.outputs["H_bedplate"], 4.875 - (2 + 6.95) * st)
self.assertAlmostEqual(self.outputs["bedplate_web_height"], 4.725 - (2 + 6.95) * st)
self.assertAlmostEqual(self.outputs["constr_length"], (2 + 6.95) * ct - 2 - 2 - 0.5 * 6.5)
self.assertAlmostEqual(self.outputs["constr_height"], 4.875 - (2 + 6.95) * st)
def testMassValues(self):
self.inputs["tilt"] = 0.0
self.discrete_inputs["upwind"] = True
myones = np.ones(5)
self.inputs["lss_diameter"] = 2.0 * myones
self.inputs["lss_wall_thickness"] = 0.05 * myones
myones = np.ones(3)
self.inputs["hss_diameter"] = 1.5 * myones
self.inputs["hss_wall_thickness"] = 0.04 * myones
myobj = lay.GearedLayout()
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
rho = self.inputs["lss_rho"]
m_bedplate = 2 * rho * (2 * 1.5 * 0.05 + 4.725 * 0.05) * 6.95
self.assertAlmostEqual(self.outputs["bedplate_mass"], m_bedplate)
npt.assert_almost_equal(self.outputs["bedplate_cm"], np.r_[0.5 * 6.95 - 2 - 2.0, 0.0, 0.5 * 4.725 + 0.05])
m_lss = rho * np.pi * (1 ** 2 - 0.95 ** 2) * self.outputs["L_lss"]
self.assertAlmostEqual(self.outputs["lss_mass"], m_lss)
self.assertAlmostEqual(self.outputs["lss_cm"], 0.5 * (self.outputs["s_lss"][0] + self.outputs["s_lss"][-1]))
self.assertAlmostEqual(self.outputs["lss_I"][0], 0.5 * m_lss * (1 ** 2 + 0.95 ** 2))
self.assertAlmostEqual(
self.outputs["lss_I"][1], (1 / 12) * m_lss * (3 * (1 ** 2 + 0.95 ** 2) + self.outputs["L_lss"] ** 2)
)
m_hss = rho * np.pi * (0.75 ** 2 - 0.71 ** 2) * self.inputs["L_hss"]
self.assertAlmostEqual(self.outputs["hss_mass"], m_hss)
self.assertAlmostEqual(self.outputs["hss_cm"], 0.5 * (self.outputs["s_hss"][0] + self.outputs["s_hss"][-1]))
self.assertAlmostEqual(self.outputs["hss_I"][0], 0.5 * m_hss * (0.75 ** 2 + 0.71 ** 2))
self.assertAlmostEqual(
self.outputs["hss_I"][1], (1 / 12) * m_hss * (3 * (0.75 ** 2 + 0.71 ** 2) + self.inputs["L_hss"] ** 2)
)
self.discrete_inputs["upwind"] = False
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["bedplate_cm"], np.r_[(2 + 2 - 0.5 * 6.95), 0.0, 0.5 * 4.725 + 0.05])
self.assertAlmostEqual(self.outputs["lss_cm"], 0.5 * (self.outputs["s_lss"][0] + self.outputs["s_lss"][-1]))
self.assertAlmostEqual(self.outputs["hss_cm"], 0.5 * (self.outputs["s_hss"][0] + self.outputs["s_hss"][-1]))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestDirectLayout))
suite.addTest(unittest.makeSuite(TestGearedLayout))
return suite
if __name__ == "__main__":
result = unittest.TextTestRunner().run(suite())
if result.wasSuccessful():
exit(0)
else:
exit(1) | |
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes
Email: danaukes<at>gmail.com
Please see LICENSE for full license.
"""
import pynamics
from pynamics.frame import Frame
from pynamics.variable_types import Differentiable,Constant,Variable
from pynamics.system import System
from pynamics.body import Body
from pynamics.dyadic import Dyadic
from pynamics.output import Output
from pynamics.output import PointsOutput
from pynamics.particle import Particle
import pynamics.integration
import sympy
import numpy
import matplotlib.pyplot as plt
plt.ion()
from math import pi
system = System()
pynamics.set_system(__name__,system)
error = 1e-3
error_tol = 1e-3
alpha = 1e6
beta = 1e5
#preload1 = Constant('preload1',0*pi/180,system)
a = Constant(0,'a',system)
l1 = Constant(1,'l1',system)
m1 = Constant(1e1,'m1',system)
m2 = Constant(1e0,'m2',system)
k = Constant(1e4,'k',system)
l0 = Constant(1,'l0',system)
b = Constant(5e0,'b',system)
g = Constant(9.81,'g',system)
Ixx_A = Constant(1,'Ixx_A',system)
Iyy_A = Constant(1,'Iyy_A',system)
Izz_A = Constant(1,'Izz_A',system)
tinitial = 0
tfinal = 10
tstep = 1/30
t = numpy.r_[tinitial:tfinal:tstep]
x1,x1_d,x1_dd = Differentiable('x1',system)
y1,y1_d,y1_dd = Differentiable('y1',system)
q1,q1_d,q1_dd = Differentiable('q1',system)
y2,y2_d,y2_dd = Differentiable('x2',system)
initialvalues = {}
initialvalues[q1]=0
initialvalues[q1_d]=.01
initialvalues[x1]=0
initialvalues[x1_d]=0
initialvalues[y1]=2
initialvalues[y1_d]=0
initialvalues[y2]=1
initialvalues[y2_d]=0
statevariables = system.get_state_variables()
ini = [initialvalues[item] for item in statevariables]
N = Frame('N',system)
A = Frame('A',system)
system.set_newtonian(N)
A.rotate_fixed_axis(N,[0,0,1],q1,system)
pOrigin = 0*N.x
pm1 = x1*N.x +y1*N.y
pm2 = pm1 +a*A.x - y2*A.y
IA = Dyadic.build(A,Ixx_A,Iyy_A,Izz_A)
BodyA = Body('BodyA',A,pm1,m1,IA,system)
Particle2 = Particle(pm2,m2,'Particle2',system)
vpm1 = pm1.time_derivative(N,system)
vpm2 = pm2.time_derivative(N,system)
l = pm1-pm2
vl = l.time_derivative(N,system)
l_d_scalar=vl.length()
stretch = l.length() - l0
ul_ = l.unit()
system.add_spring_force1(k,stretch*ul_,vl)
#system.addforce(-k*stretch*ul_,vpm1)
#system.addforce(k*stretch*ul_,vpm2)
system.addforce(-b*l_d_scalar*ul_,vpm1)
system.addforce(b*l_d_scalar*ul_,vpm2)
#system.addforce(k*l*ul_,vpm2)
#system.addforce(-b*vl,vl)
#system.addforce(-b*vl,vl)
#system.addforce(-b*vl,vl)
system.addforcegravity(-g*N.y)
#system.addforcegravity(-g*N.y)
#system.addforcegravity(-g*N.y)
eq1 = []
eq1.append(pm1.dot(N.y)-0)
eq1.append(pm2.dot(N.y)-0)
eq1_d=[system.derivative(item) for item in eq1]
eq1_dd=[system.derivative(system.derivative(item)) for item in eq1]
a = []
a.append(0-pm1.dot(N.y))
a.append(0-pm2.dot(N.y))
b = [(item+abs(item)) for item in a]
x1 = BodyA.pCM.dot(N.y)
x2 = Particle2.pCM.dot(N.y)
f,ma = system.getdynamics()
#func = system.state_space_post_invert(f,ma,eq)
func = system.state_space_post_invert2(f,ma,eq1_dd,eq1_d,eq1,eq_active = b)
states=pynamics.integration.integrate_odeint(func,ini,t,rtol = error, atol = error, args=({'alpha':alpha,'beta':beta, 'constants':system.constant_values},),full_output = 1,mxstep = int(1e5))
states = states[0]
KE = system.get_KE()
PE = system.getPEGravity(pOrigin) - system.getPESprings()
output = Output([x1,x2,l.length(), KE-PE],system)
y = output.calc(states,t)
plt.figure(0)
plt.plot(t,y[:,0])
plt.plot(t,y[:,1])
plt.axis('equal')
plt.figure(1)
plt.plot(t,y[:,2])
plt.axis('equal')
plt.figure(2)
plt.plot(t,y[:,3])
#plt.axis('equal')
points = [BodyA.pCM,Particle2.pCM]
points = PointsOutput(points)
points.calc(states,t)
# points.animate(fps = 30, movie_name='bouncy2.mp4',lw=2) | |
"""Provides data structures for encapsulating loss data."""
import numpy
class Loss:
"""Encapsulates training loss data.
.. py:attribute:: label
A string that will be used in graph legends for this loss data.
.. py:attribute:: loss_values
A numpy.ndarray containing the training loss data.
.. py:attribute:: precision_values
A numpy.ndarray containing the training precision data.
"""
def __init__(
self, label: str, loss_values: numpy.ndarray, precision_values: numpy.ndarray
):
self.label = label
self.loss_values = loss_values
self.precision_values = precision_values
LossList = list
def has_invalid_values(loss: Loss) -> bool:
"""Determine if loss or precision data has invalid values.
:param data: The loss or precision data to check for invalid values. This
should be a Loss or Precision object.
:returns: True is returned if data has at least one invalid value. False is
returned if all values in data are valid.
:rtype: bool
This function will tell you if the data has any values that are NaN,
+infinity, or -infinity.
"""
return numpy.any(numpy.logical_not(numpy.isfinite(loss.loss_values))) or numpy.any(
numpy.logical_not(numpy.isfinite(loss.precision_values))
)
def sort_by_loss(losses: LossList, algorithm: str) -> None:
"""Sort the loss data according to the specified algorithm.
:param LossList losses: The list of loss data to sort. This list is sorted
in place.
:param str algorithm: The algorithm to use for sorting. See the loss
configuration item ``sort_algorithm`` for acceptable values.
:returns: None
This function sorts the loss data, based on loss values (not precision
values). The list is sorted from best to worst. For loss, best is always the
lowest. The list below defines what is compared to determine what is lowest.
For the examples described below, assume two sets of loss data, with the
given values:
.. code-block::
baseline = [ 5, 2, 1, 2 ]
new_loss = [ 4, 3, 2, 1 ]
:last: Comparisons are made between the last value in each list of loss
data. Using this algorithm for the example data, ``new_loss`` will be
sorted ahead of ``baseline``. ``baseline[4]`` is 2, while
``new_loss[4]`` is 1.
"""
if algorithm == "last":
losses.sort(key=lambda l: l.loss_values[-1])
def sort_by_precision(losses: LossList, algorithm: str) -> None:
"""Sort the loss data according to the specified algorithm.
:param LossList losses: The list of loss data to sort. This list is sorted
in place.
:param str algorithm: The algorithm to use for sorting. See the loss
configuration item ``sort_algorithm`` for acceptable values.
:returns: None
This function sorts the loss data, based on precision values (not loss
values). The list is sorted from best to worst. For precision, best is
always the highest. The list below defines what is compared to determine
what is highest. For the examples described below, assume two sets of
precision data, with the given values:
.. code-block::
baseline = [ 5, 2, 1, 2 ]
new_loss = [ 4, 3, 2, 1 ]
:last: Comparisons are made between the last value in each list of loss
data. Using this algorithm for the example data, ``baseline`` will be
sorted ahead of ``new_loss``. ``baseline[4]`` is 2, while
``new_loss[4]`` is 1.
"""
if algorithm == "last":
losses.sort(key=lambda l: l.precision_values[-1], reverse=True) | |
"""
This file is the main source file of the ProcessMCRaT library which is used to read and process
the results of a MCRaT simulation
Written by: Tyler Parsotan April 2021
"""
import os
import astropy as ap
import h5py as h5
import numpy as np
from astropy import units as u
from astropy import constants as const
from astropy.units import UnitConversionError
class PhotonList(object):
def __init__(self, r0, r1, r2, p0, p1, p2, p3, weight, scatterings, file_index, comv_p0=None, comv_p1=None, comv_p2=None,\
comv_p3=None, s0=None, s1=None, s2=None, s3=None, photon_type=None):
"""
Iniitalizes the 4 momenta (lab and comoving), position, stokes parameters, weight, number of scatterings, and the photon type of each
photon in the MCRaT file. units are cgs units
:param r0:
:param r1:
:param r2:
:param s0:
:param s1:
:param s2:
:param s3:
:param p0:
:param p1:
:param p2:
:param p3:
:param comv_p0:
:param comv_p1:
:param comv_p2:
:param comv_p3:
:param weight:
:param scatterings:
"""
self.p0=p0
self.p1=p1
self.p2=p2
self.p3=p3
self.comv_p0=comv_p0
self.comv_p1=comv_p1
self.comv_p2=comv_p2
self.comv_p3=comv_p3
self.r0=r0
self.r1=r1
self.r2=r2
self.s0=s0
self.s1=s1
self.s2=s2
self.s3=s3
self.weight=weight
self.scatterings=scatterings
self.photon_type=photon_type
self.file_index=file_index
def get_energies(self, unit=u.keV):
try:
return self.p0 * (const.c.cgs.value * u.erg).to(unit).value
except UnitConversionError:
#trying to get wavelength so need to convert to si units for energy first
x=self.p0 * (const.c.cgs.value * u.erg)
return x.to(unit, equivalencies=u.spectral()).value
def get_comv_energies(self, unit=u.keV):
try:
return self.comv_p0*(const.c.cgs.value*u.erg).to(unit).value
except UnitConversionError:
#trying to get wavelength so need to convert to si units for energy first
x=self.comv_p0 * (const.c.cgs.value * u.erg)
return x.to(unit, equivalencies=u.spectral()).value
def curdir():
"""
Get the current working directory.
"""
curdir = os.getcwd() + '/'
return curdir
class McratSimLoad(object):
def __init__(self, file_directory=None):
"""
Initalized the mload class with the directory that the MCRaT files are located in, and the frames per second of
the simulation (this is found in the MCRaT mc.par file).
:param file_directory:
:param frames_per_second:
"""
if file_directory is not None:
self.file_directory=file_directory
else:
self.file_directory=curdir()
def load_frame(self, frame_num, read_comv=False, read_stokes=False, read_type=False):
"""
Reads in MCRaT data for current version of MCRaT that outputs data in hdf5 files. Also has support for various
MCRaT switches that can be turned on by the user.
:param frame_num:
:param read_comv:
:param read_stokes:
:param read_type:
:return:
"""
with h5.File(self.file_directory+"mcdata_" + np.str_(frame_num) + '.h5', 'r') as f:
pw = f['PW'][:]
ns = f['NS'][:]
p0 = f['P0'][:]
p1 = f['P1'][:]
p2 = f['P2'][:]
p3 = f['P3'][:]
r0 = f['R0'][:]
r1 = f['R1'][:]
r2 = f['R2'][:]
if read_stokes:
s0 = f['S0'][:]
s1 = f['S1'][:]
s2 = f['S2'][:]
s3 = f['S3'][:]
else:
s0 = np.zeros(pw.size)
s1 = np.zeros(pw.size)
s2 = np.zeros(pw.size)
s3 = np.zeros(pw.size)
if read_comv:
comv_p0 = f['COMV_P0'][:]
comv_p1 = f['COMV_P1'][:]
comv_p2 = f['COMV_P2'][:]
comv_p3 = f['COMV_P3'][:]
else:
comv_p0 = np.zeros(pw.size)
comv_p1 = np.zeros(pw.size)
comv_p2 = np.zeros(pw.size)
comv_p3 = np.zeros(pw.size)
if read_type:
pt = f['PT'][:]
pt = np.array([i for i in bytes(pt).decode()])
else:
pt = np.full(pw.size, None)
idx=np.arange(pw.size)
photons=PhotonList(r0, r1, r2, p0, p1, p2, p3, pw, ns, idx, comv_p0=comv_p0, comv_p1=comv_p1,\
comv_p2=comv_p2, comv_p3=comv_p3, s0=s0, s1=s1, s2=s2, s3=s3, photon_type=pt)
self.loaded_photons=photons
self.read_stokes=read_stokes
self.read_comv=read_comv
self.read_type=read_type
self.frame_num=frame_num | |
from DataSocket import TCPSendSocket, RAW, TCPReceiveSocket
import time
import numpy as np
import threading
import sys
send_port = 4242
receive_port = 4343
ip = '0.0.0.0'
start_time = time.time()
# define function to print the echo back from matlab
def print_data(data):
global start_time
now = time.time()
print('length of returned array:', np.frombuffer(data, dtype='float32').shape[0], 'Time for full trip:', now - start_time)
# create a send and receive socket
send_socket = TCPSendSocket(tcp_port=send_port, tcp_ip='', send_type=RAW)
rec_socket = TCPReceiveSocket(tcp_port=receive_port, handler_function=print_data, receive_as_raw=True, receive_buffer_size=65536)
# start the sockets
send_socket.start()
rec_socket.start()
stop_flag = threading.Event()
def send_sig():
global start_time
while not stop_flag.is_set():
data = np.random.random((100, 100)) # create 100x100 array of random numbers
data_as_bytes = data.astype('float32').flatten().tostring() # flatten it before sending
start_time = time.time()
send_socket.send_data(data_as_bytes)
time.sleep(0.5)
thread = threading.Thread(target=send_sig)
thread.start()
input('Press enter to shutdown.')
stop_flag.set()
thread.join()
# close the sockets
rec_socket.stop()
send_socket.stop()
sys.exit() | |
import copy
import numpy as np
import torch
from sklearn.cluster import KMeans
from utils.checkings import *
from torch.optim.lr_scheduler import LambdaLR
os.environ["OMP_NUM_THREADS"] = "8" # Limit the CPU usage during KMeans clustering
def create_folders(names, data_dir):
datasets = ["ethucy", "SDD"]
folders = {"ethucy": ["eth", "hotel", "univ", "zara1", "zara2"],
"SDD": ["SDD"]}
for folder_name in names:
for dataset in datasets:
for sub_folder in folders[dataset]:
path = os.path.join(folder_name, dataset, sub_folder)
if not os.path.exists(path):
os.makedirs(path)
path = os.path.join(data_dir, dataset, "processed")
if not os.path.exists(path):
os.makedirs(path)
def display_performance(perf_dict):
print("==> Current Performances (ADE & FDE):")
for a, b in perf_dict.items():
c = copy.deepcopy(b)
if a in ["Obs_Encoder", "Pred_Encoder"]:
c[0] = np.round(c[0], 4)
c[1] = np.round(c[1], 4)
print(" ", a, c)
else:
c[-1][0] = np.round(c[-1][0], 4)
c[-1][1] = np.round(c[-1][1], 4)
print(" ", a, c[-1])
def gen_memory(data, model, save_dir=None, split=10, bi_shot=False, speed_data=None, scene_data=None):
memory_data = []
batch = len(data) // split
if not bi_shot:
for i in range(split - 1):
tmp = model.calc_memory(data[batch * i:batch * (i + 1)]).numpy()
memory_data.append(tmp)
tmp = model.calc_memory(data[batch * (split - 1):]).numpy()
memory_data.append(tmp)
else:
for i in range(split - 1):
tmp = model.calc_memory(data[batch * i:batch * (i + 1)], speed_data[batch * i:batch * (i + 1)],
scene_data[batch * i:batch * (i + 1)]).numpy()
memory_data.append(tmp)
tmp = model.calc_memory(data[batch * (split - 1):], speed_data[batch * (split - 1):],
scene_data[batch * (split - 1):]).numpy()
memory_data.append(tmp)
memory_data = np.concatenate(memory_data, axis=0)
if save_dir is not None:
np.save(os.path.join(save_dir, "Memory.npy"), memory_data)
return memory_data
def get_memory_data(dataset, save_dir, args):
if os.path.exists(os.path.join(save_dir, "memory.npy")) and check_memory_consistency(save_dir, args):
memory_data = np.load(os.path.join(save_dir, "memory.npy"))
else:
print("==> Generating Memory...")
memory_data = torch.cat([dataset.obs_enc, dataset.pred_enc], dim=1)
memory_data = memory_data.cpu().numpy()
np.save(os.path.join(save_dir, "memory.npy"), memory_data)
print("==> Memory Shape", memory_data.shape)
try:
cluster_result = np.load(os.path.join(save_dir, "cluster_result.npy"))
size_array = np.load(os.path.join(save_dir, "size_array.npy"))
if not check_cluster_consistency(save_dir, size_array, args):
raise FileNotFoundError
except FileNotFoundError:
print("==> Clustering Memory...")
cluster_result, size_array = cluster_data(copy.deepcopy(memory_data), n_cluster=args.n_cluster,
save_dir=save_dir)
return memory_data, cluster_result, size_array
def get_encoded_obs(net, data, scene, bi_shot):
if not bi_shot:
assert scene is None
obs_encoded = net.calc_obs_memory(data)
else:
assert scene is not None
obs_encoded = net.calc_obs_memory([data, scene], bi_shot=True)
return obs_encoded
def get_modality(memory_data, cluster_result, num_cluster):
cluster_dict = {}
modalities = []
for i in range(len(cluster_result)):
cluster_dict[cluster_result[i]] = cluster_dict.get(cluster_result[i], []) + [i]
for i in range(num_cluster):
modalities.append(torch.mean(memory_data[cluster_dict[i]], dim=0))
return torch.stack(modalities, dim=0)
def cluster_data(data, n_cluster=200, save_dir=None):
if not isinstance(data, np.ndarray):
data = np.array(data).astype(float)
cluster_result, size_array = k_means_cluster(data, n_cluster)
if save_dir is not None:
np.save(os.path.join(save_dir, "cluster_result.npy"), cluster_result)
np.save(os.path.join(save_dir, "size_array.npy"), size_array)
return cluster_result, size_array
def k_means_cluster(data, n_clusters, init=20):
clustering = KMeans(n_clusters=n_clusters, n_init=init).fit(data)
n_classes = len(clustering.cluster_centers_)
res = dict()
size_array = []
for i in clustering.labels_:
res[i] = res.get(i, 0) + 1
for i in range(n_classes):
size_array.append(res[i])
cluster_result = np.array(clustering.labels_) # cluster_result[i] -> class of item i
size_array = np.array(size_array) # size_array[i] -> #samples in cluster i
return cluster_result, size_array
def get_lr_scheduler(lr_policy, optimizer, max_iter=None):
if lr_policy['name'] == "Poly":
assert max_iter > 0
num_groups = len(optimizer.param_groups)
def lambda_f(cur_iter):
return (1 - (cur_iter * 1.0) / max_iter) ** lr_policy['power']
scheduler = LambdaLR(optimizer, lr_lambda=[lambda_f] * num_groups)
else:
raise NotImplementedError("lr policy not supported")
return scheduler | |
"""
Copyright 2019 Zachary Phillips, Waller Lab, University of California, Berkeley
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
import scipy as sp
import math
from . import config
import builtins
from functools import reduce
from .decorators import numpy_function, real_valued_function
import skimage
# Allow division by zero to return NaN without a warning
np.seterr(divide='ignore', invalid='ignore')
# Try to import arrayfire - continue if import fails
try:
import arrayfire
except ImportError:
pass
# Try to import torch - continue if import fails
try:
import torch
except ImportError:
pass
def bcast(X, v):
"""Performs array broadcasting along last dimension of array."""
backend = getBackend(X)
if backend == 'numpy':
return X * v
elif backend == 'arrayfire':
mul = lambda A, v: A * v
return arrayfire.broadcast(mul, X, v.T)
else:
raise NotImplementedError(
'Backend %s is not implemented!' % backend)
def next_even_number(val):
"""Returns the next even number after the input. If input is even, returns the same value."""
return math.ceil(val / 2) * 2
def next_fast_even_number(val):
"""Returns the next even number after the input. If input is even, returns the same value."""
_val = val
while sp.fftpack.next_fast_len(_val) != next_even_number(_val):
_val = sp.fftpack.next_fast_len(_val + 1)
return _val
def setArrayfireBackend(af_backend):
"""
A function which sets the default backend for the arrayfire interface
"""
if af_backend in arrayfire.get_available_backends():
arrayfire.set_backend(af_backend)
else:
raise ValueError('Backend %d is not supported! Available backends: %s' %
(af_backend, arrayfire.get_available_backends()))
def getArrayfireBackend():
"""
A function that gets the current backend of the arrayfire interface
"""
return arrayfire.get_active_backend()
def getNativeDatatype(dtype_in, backend):
"""
A function to get the correct datatype class given a datatype label and backend
"""
# Check to see if the dtype is of numpy base class
if type(dtype_in) is not str:
if 'complex64' in dtype_in:
dtype_in = 'complex32'
elif 'complex128' in dtype_in:
dtype_in = 'complex64'
else:
dtype_in = str(dtype_in)
if backend == 'numpy':
if dtype_in == 'complex32':
return np.complex64
elif dtype_in == 'complex64':
return np.complex128
elif dtype_in == 'uint16':
return np.uint16
elif dtype_in in config.valid_dtypes:
return np.dtype(dtype_in)
elif backend == 'arrayfire':
if dtype_in == 'complex32':
return (arrayfire.Dtype.c32)
elif dtype_in == 'complex64':
return (arrayfire.Dtype.c64)
elif dtype_in == 'float32':
return (arrayfire.Dtype.f32)
elif dtype_in == 'float64':
return (arrayfire.Dtype.f64)
elif dtype_in == 'int16':
return (arrayfire.Dtype.s16)
elif dtype_in == 'uint16':
return (arrayfire.Dtype.u16)
elif dtype_in == 'int32':
return (arrayfire.Dtype.s32)
elif dtype_in == 'uint32':
return (arrayfire.Dtype.u32)
elif dtype_in == 'int64':
return (arrayfire.Dtype.s64)
elif dtype_in == 'uint64':
return (arrayfire.Dtype.u64)
else:
raise ValueError(
'Invalid datatype/backend combination (dtype=%s, backend=%s)' %
(dtype_in, backend))
elif backend == 'torch':
if dtype_in == 'complex32':
raise ValueError('Pytorch does not support complex dtypes.')
elif dtype_in == 'complex64':
raise ValueError('Pytorch does not support complex dtypes.')
elif dtype_in == 'float32':
return torch.float32
elif dtype_in == 'float64':
return torch.float64
elif dtype_in == 'int16':
return torch.int16
elif dtype_in == 'uint16':
raise ValueError('Pytorch does not support unsigned dtypes.')
elif dtype_in == 'int32':
return torch.int32
elif dtype_in == 'uint32':
raise ValueError('Pytorch does not support unsigned dtypes.')
elif dtype_in == 'int64':
return torch.int64
elif dtype_in == 'uint64':
raise ValueError('Pytorch does not support unsigned dtypes.')
else:
raise ValueError(
'Invalid datatype/backend combination (dtype=%s, backend=%s)' %
(dtype_in, backend))
def getBackend(x):
"""
This function determines the the backend of a given variable
"""
if 'numpy' in str(x.__class__):
return 'numpy'
elif 'arrayfire' in str(x.__class__):
return 'arrayfire'
elif 'torch' in str(x.__class__):
return 'torch'
elif str(x.__class__) in [
"<class 'complex'>", "<class 'float'>", "<class 'int'>"
]:
return 'scalar' # This is a hack for now, but numpy will treat scalars as if they were numpy types
elif 'Operator' in str(x.__class__):
return x.backend
elif type(x) is list:
return 'list'
elif type(x) is tuple:
return 'tuple'
elif x is None:
return None
else:
return type(x)
# raise ValueError("Type %s is not supported!" % (str(x.__class__)))
def getDatatype(x):
"""
This function determines the the datatype of a given variable in terms of our
"""
backend = getBackend(x)
if 'numpy' in backend:
if 'complex64' in str(x.dtype):
return 'complex32'
elif 'complex128' in str(x.dtype):
return 'complex64'
else:
return str(x.dtype)
elif 'arrayfire' in backend:
if x.dtype() == arrayfire.Dtype.c32:
return 'complex32'
elif x.dtype() == arrayfire.Dtype.c64:
return 'complex64'
elif x.dtype() == arrayfire.Dtype.f32:
return 'float32'
elif x.dtype() == arrayfire.Dtype.f64:
return 'float64'
elif x.dtype() == arrayfire.Dtype.s16:
return 'int16'
elif x.dtype() == arrayfire.Dtype.s32:
return 'int32'
elif x.dtype() == arrayfire.Dtype.s64:
return 'int64'
elif x.dtype() == arrayfire.Dtype.u16:
return 'uint16'
elif x.dtype() == arrayfire.Dtype.u32:
return 'uint32'
elif x.dtype() == arrayfire.Dtype.u64:
return 'uint64'
elif x.dtype() == arrayfire.Dtype.b8:
return 'bool'
else:
raise ValueError("Invalid arrayfire datatype %s" % x.dtype())
elif 'torch' in backend:
return str(x.dtype)
elif 'Operator' in str(x.__class__):
return x.dtype
elif type(x) in (list, tuple):
if isscalar(x[0]):
return getDatatype(np.asarray(x[0]))
else:
return getDatatype(x[0])
else:
raise ValueError("Backend %s is not supported!" % (backend))
def dtype(x):
"""Shorthand for getDatatype(x)."""
return getDatatype(x)
def getByteOrder(x):
"""
This function returns the byte order of a given array
"""
backend = getBackend(x)
if backend == 'numpy':
if x.flags['F_CONTIGUOUS']:
return 'F'
else:
return 'C'
elif backend == 'arrayfire':
return 'F'
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def setByteOrder(x, new_byte_order):
"""
This function sets the byte order of an array
"""
backend = getBackend(x)
if backend == 'numpy':
if new_byte_order.lower() == 'f':
return np.asfortranarray(x)
elif new_byte_order.lower() == 'c':
return np.ascontiguousarray(x)
else:
print('Invalid byte order %s' % new_byte_order)
elif backend == 'arrayfire':
if new_byte_order.lower() == 'f':
return x
elif new_byte_order.lower() == 'c':
raise NotImplementedError(
'Arrayfire does not support C-contiguous arrays!')
else:
print('Invalid byte order %s' % new_byte_order)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def makeComplex(dtype_or_array):
"""Makes a datatype or array complex-valued."""
if isarray(dtype_or_array):
return astype(dtype_or_array, makeComplex(getDatatype(dtype_or_array)))
else:
if dtype_or_array in ('float64', 'complex64'):
return 'complex64'
else:
return 'complex32'
def precision(x, for_sum=False):
"""
This function returns the precision of a given datatype using a comporable numpy array
"""
if 'str' in str(type(x)):
dtype_np = getNativeDatatype(x, 'numpy')
else:
dtype_np = getNativeDatatype(getDatatype(x), 'numpy')
if not for_sum:
return np.finfo(dtype_np).eps
else:
return np.finfo(dtype_np).eps * size(x)
def concatenate(a, b=None, axis=0):
"""
Generic concatenate operator for two arrays with backend selector
"""
if b is not None:
backend = getBackend(a)
assert backend == getBackend(b)
if backend == 'numpy':
return np.append(a, b, axis)
elif backend == 'arrayfire':
return arrayfire.data.join(axis, a, b)
else:
raise NotImplementedError(
'Backend %s is not implemented!' % backend)
elif type(a) is list and b is None:
backend = getBackend(a[0])
assert all([backend == getBackend(_a) for _a in a])
if backend == 'numpy':
return np.concatenate(a)
elif backend == 'arrayfire':
result = a[0]
for _a in a[1:]:
result = arrayfire.data.join(axis, result, _a)
return result
else:
raise NotImplementedError(
'Backend %s is not implemented!' % backend)
def norm(x):
"""
A generic norm operator with backend selector
"""
backend = getBackend(x)
if backend == 'numpy':
return np.linalg.norm(x)
elif backend == 'arrayfire':
return arrayfire.lapack.norm(x)
elif backend == 'torch':
return x.norm(p=2)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def sign(x):
"""
A generic sign operator with backend selector
"""
backend = getBackend(x)
if backend == 'numpy':
return np.sign(x)
elif backend == 'arrayfire':
s = x / arrayfire.arith.sqrt(x * x)
s[isnan(s)] = 0
return s
elif backend == 'torch':
return x.sign()
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def abs(x, return_real=True):
"""
A generic absolute value operator with backend selector
"""
backend = getBackend(x)
if backend == 'scalar':
if not return_real:
return np.abs(x).astype(x.dtype)
else:
return np.abs(x)
elif backend == 'numpy':
if not return_real:
return np.abs(x).astype(x.dtype)
else:
return np.abs(x)
elif backend == 'arrayfire':
if not return_real:
return arrayfire.arith.abs(x).as_type(x.dtype())
else:
return arrayfire.arith.abs(x)
elif backend == 'torch':
return x.abs()
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def angle(x):
"""
A generic complex angle operator with backend selector
"""
backend = getBackend(x)
if backend == 'numpy':
return np.angle(x)
elif backend == 'arrayfire':
# The following two lines should be equilavent
# return arrayfire.arith.imag(arrayfire.arith.log(x))
return arrayfire.arith.atan2(
arrayfire.arith.imag(x), arrayfire.arith.real(x))
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def isComplex(x, check_values=True):
"""
Checks if x is complex
"""
if check_values:
return sum(imag(x)) > 0
else:
return 'complex' in getDatatype(x)
def real(x):
"""
A generic real-part operator with backend selector
"""
backend = getBackend(x)
if backend == 'numpy':
return np.real(x)
elif backend == 'arrayfire':
return arrayfire.arith.real(x)
elif backend == 'scalar':
return np.real(x)
elif backend == 'list':
return np.real(np.asarray(x)).tolist()
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def imag(x):
"""
A generic real-part operator with backend selector
"""
backend = getBackend(x)
if backend == 'numpy':
return np.imag(x)
elif backend == 'arrayfire':
return arrayfire.arith.imag(x)
elif backend == 'list':
return np.imag(np.asarray(x)).tolist()
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def log(x):
"""
Natural log with backend selector.
"""
backend = getBackend(x)
if backend == 'scalar':
return np.log(x)
elif backend == 'numpy':
return np.log(x)
elif backend == 'arrayfire':
return arrayfire.arith.log(x)
elif backend == 'torch':
return x.log()
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def log10(x):
"""
Natural log with backend selector.
"""
backend = getBackend(x)
if backend == 'scalar':
return np.log10(x)
elif backend == 'numpy':
return np.log10(x)
elif backend == 'arrayfire':
return arrayfire.arith.log10(x)
elif backend == 'torch':
return x.log10()
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def sqrt(x):
"""
A generic sqrt operator with backend selector
"""
backend = getBackend(x)
if backend == 'scalar':
return np.sqrt(x)
elif backend == 'numpy':
# assert np.all(x >= 0.0), '%s' % x
return np.sqrt(x)
elif backend == 'arrayfire':
return arrayfire.arith.sqrt(x)
elif backend == 'torch':
return x.sqrt()
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def cos(x):
"""
A generic cosine operator with backend selector
"""
backend = getBackend(x)
if backend == 'scalar':
return np.cos(x)
elif backend == 'numpy':
return np.cos(x)
elif backend == 'arrayfire':
return arrayfire.arith.cos(x)
elif backend == 'torch':
return x.cos()
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def sin(x):
"""
A generic sine operator with backend selector
"""
backend = getBackend(x)
if backend == 'scalar':
return np.sin(x)
elif backend == 'numpy':
return np.sin(x)
elif backend == 'arrayfire':
return arrayfire.arith.sin(x)
elif backend == 'torch':
return x.sin()
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def tan(x):
"""
A generic tangent operator with backend selector
"""
backend = getBackend(x)
if backend == 'scalar':
return np.tan(x)
elif backend == 'numpy':
return np.tan(x)
elif backend == 'arrayfire':
return arrayfire.arith.tan(x)
elif backend == 'torch':
return x.tan()
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def arccos(x):
"""
A generic cosine operator with backend selector
"""
backend = getBackend(x)
if backend == 'scalar':
return np.arccos(x)
elif backend == 'numpy':
return np.arccos(x)
elif backend == 'arrayfire':
return arrayfire.arith.acos(x)
elif backend == 'torch':
return x.acos()
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def arcsin(x):
"""
A generic sine operator with backend selector
"""
backend = getBackend(x)
if backend == 'scalar':
return np.arcsin(x)
elif backend == 'numpy':
return np.arcsin(x)
elif backend == 'arrayfire':
return arrayfire.arith.asin(x)
elif backend == 'torch':
return x.asin()
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def arctan(x):
"""
A generic tangent operator with backend selector
"""
backend = getBackend(x)
if backend == 'scalar':
return np.arctan(x)
elif backend == 'numpy':
return np.arctan(x)
elif backend == 'arrayfire':
return arrayfire.arith.atan(x)
elif backend == 'torch':
return x.atan()
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def isnan(x):
"""
This function returns a boolean array indicating the location of NaN values
"""
# Get the backend of the variable
backend = getBackend(x)
if backend == 'scalar':
return np.isnan(x)
elif backend == 'numpy':
return np.isnan(x)
elif backend == 'arrayfire':
return arrayfire.arith.isnan(x)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def exp(x):
"""
A generic exp operator with backend selector
TODO (sarah) check for numerical overflow
"""
backend = getBackend(x)
if backend == 'scalar':
return np.exp(x)
elif backend == 'numpy':
return np.exp(x)
elif backend == 'arrayfire':
return arrayfire.arith.exp(x)
elif backend == 'torch':
return x.exp()
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def unique(x):
"""
Returns unique elements in an array.
Parameters
----------
x: array-like
The array to find unique elements of.
Returns
-------
array-like:
The unique elements in the array.
"""
backend = getBackend(x)
if backend == 'scalar':
return x
elif backend == 'list':
return np.unique(np.asarray(x)).tolist()
elif backend == 'numpy':
return np.unique(x)
elif backend == 'torch':
return x.unique()
elif backend == 'arrayfire':
return arrayfire.algorithm.set_unique(x)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def zeros(shape, dtype=None, backend=None):
"""
Returns an array of zeros.
Parameters
----------
shape: list or tuple
The desired shape of the array
dtype: string
Optional. The desired datatype, if different from the default.
backend: string
Optional. The desired backend, if different from the default.
Returns
-------
array-like:
An array of zeros of the desired shape, dtype, and backend.
"""
backend = backend if backend is not None else config.default_backend
dtype = dtype if dtype is not None else config.default_dtype
if type(shape) not in [list, tuple, np.ndarray]:
shape = [shape]
_dtype = getNativeDatatype(dtype, backend)
if backend == 'numpy':
return np.asfortranarray(np.zeros(shape, _dtype))
elif backend == 'arrayfire':
if len(shape) == 1:
return arrayfire.data.constant(0, shape[0], dtype=_dtype)
elif len(shape) == 2:
return arrayfire.data.constant(0, shape[0], shape[1], dtype=_dtype)
elif len(shape) == 3:
return arrayfire.data.constant(
0, shape[0], shape[1], shape[2], dtype=_dtype)
else:
raise NotImplementedError
elif backend == 'torch':
return torch.zeros(shape, dtype=_dtype)
elif backend == 'list':
return [0] * prod(shape)
elif backend == 'tuple':
return tuple([0] * prod(shape))
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def zeros_like(x):
"""
Returns an array of zeros. This array has the same shape, datatype, and
backend as the input.
Parameters
----------
x: array-like
The array to draw paremeters from.
Returns
-------
array-like:
An array of zeros with the same shape, datatype, and backend as the input.
"""
return zeros(shape(x), getDatatype(x), getBackend(x))
def ones(shape, dtype=None, backend=None):
"""
Returns an array of ones.
Parameters
----------
shape: list or tuple
The desired shape of the array
dtype: string
Optional. The desired datatype, if different from the default.
backend: string
Optional. The desired backend, if different from the default.
Returns
-------
array-like:
An array of ones of the desired shape, dtype, and backend.
"""
backend = backend if backend is not None else config.default_backend
dtype = dtype if dtype is not None else config.default_dtype
if type(shape) not in [list, tuple, np.ndarray]:
shape = [shape]
_dtype = getNativeDatatype(dtype, backend)
if backend == 'numpy':
return np.asfortranarray(np.ones(shape, _dtype))
elif backend == 'arrayfire':
if len(shape) == 1:
return arrayfire.data.constant(1, shape[0], dtype=_dtype)
elif len(shape) == 2:
return arrayfire.data.constant(1, shape[0], shape[1], dtype=_dtype)
elif len(shape) == 3:
return arrayfire.data.constant(
1, shape[0], shape[1], shape[2], dtype=_dtype)
else:
raise NotImplementedError
elif backend == 'torch':
return torch.ones(shape, dtype=_dtype)
elif backend == 'list':
# TODO support for floats
if 'complex' in dtype:
return [1 + 0j] * prod(shape)
else:
return [1] * prod(shape)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def ones_like(x):
"""
Returns an array of ones. This array has the same shape, datatype, and
backend as the input.
Parameters
----------
x: array-like
The array to draw paremeters from.
Returns
-------
array-like:
An array of ones with the same shape, datatype, and backend as the input.
"""
return ones(shape(x), getDatatype(x), getBackend(x))
def randn(shape, dtype=None, backend=None):
"""
Returns an array of random values drawn from a normal distribution.
Parameters
----------
shape: list or tuple
The desired shape of the array
dtype: string
Optional. The desired datatype, if different from the default.
backend: string
Optional. The desired backend, if different from the default.
Returns
-------
array-like:
An array of random values drawn from the normal distribution.
"""
backend = backend if backend is not None else config.default_backend
dtype = dtype if dtype is not None else config.default_dtype
if type(shape) not in [list, tuple, np.ndarray]:
shape = [shape]
_dtype = getNativeDatatype(dtype, backend)
if backend == 'numpy':
if len(shape) == 1:
return np.asfortranarray(np.random.randn(shape[0]).astype(_dtype))
elif len(shape) == 2:
return np.asfortranarray(
np.random.randn(shape[0], shape[1]).astype(_dtype))
elif len(shape) == 3:
return np.asfortranarray(
np.random.randn(shape[0], shape[1],
shape[2]).astype(_dtype).asfortranarray())
else:
raise NotImplementedError
elif backend == 'arrayfire':
if len(shape) == 1:
return arrayfire.random.randn(shape[0], dtype=_dtype)
elif len(shape) == 2:
return arrayfire.random.randn(shape[0], shape[1], dtype=_dtype)
elif len(shape) == 3:
return arrayfire.random.randn(
shape[0], shape[1], shape[2], dtype=_dtype)
else:
raise NotImplementedError
elif backend == 'torch':
return torch.randn(shape, dtype=_dtype)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def randn_like(x):
"""
Returns an array of random values drawn from a normal distribution. This
array has the same shape, datatype, and backend as the input.
Parameters
----------
x: array-like
The array to draw paremeters from.
Returns
-------
array-like:
An array of random values drawn from the normal distribution with the
same shape, datatype, and backend as the input.
"""
return randn(shape(x), getDatatype(x), getBackend(x))
def randu(shape, dtype=None, backend=None):
"""
Returns an array of random values drawn from a uniform distribution.
Parameters
----------
shape: list or tuple
The desired shape of the array
dtype: string
Optional. The desired datatype, if different from the default.
backend: string
Optional. The desired backend, if different from the default.
Returns
-------
array-like:
An array of random values drawn from the uniform distribution.
"""
backend = backend if backend is not None else config.default_backend
dtype = dtype if dtype is not None else config.default_dtype
if type(shape) not in [list, tuple, np.ndarray]:
shape = [shape]
_dtype = getNativeDatatype(dtype, backend)
if backend == 'numpy':
if len(shape) == 1:
return np.asfortranarray(np.random.rand(shape[0]).astype(_dtype))
elif len(shape) == 2:
return np.asfortranarray(
np.random.rand(shape[0], shape[1]).astype(_dtype))
elif len(shape) == 3:
return np.asfortranarray(
np.random.rand(shape[0], shape[1], shape[2]).astype(_dtype))
else:
raise NotImplementedError
elif backend == 'arrayfire':
if len(shape) == 1:
return arrayfire.random.randu(shape[0], dtype=_dtype)
elif len(shape) == 2:
return arrayfire.random.randu(shape[0], shape[1], dtype=_dtype)
elif len(shape) == 3:
return arrayfire.random.randu(
shape[0], shape[1], shape[2], dtype=_dtype)
else:
raise NotImplementedError
elif backend == 'torch':
return torch.rand(shape, dtype=_dtype)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def randu_like(x):
"""
Returns an array of random values drawn from a uniform distribution. This
array has the same shape, datatype, and backend as the input.
Parameters
----------
x: array-like
The array to draw paremeters from.
Returns
-------
array-like:
An array of random values drawn from the uniform distribution with the
same shape, datatype, and backend as the input.
"""
return randu(shape(x), getDatatype(x), getBackend(x))
def rand(shape, dtype=None, backend=None):
"""
Returns an array of random values drawn from a uniform distribution.
Internally, this function calls the randu function.
Parameters
----------
shape: list or tuple
The desired shape of the array
dtype: string
Optional. The desired datatype, if different from the default.
backend: string
Optional. The desired backend, if different from the default.
Returns
-------
array-like:
An array of random values drawn from the uniform distribution.
"""
backend = backend if backend is not None else config.default_backend
dtype = dtype if dtype is not None else config.default_dtype
return (randu(shape, dtype, backend))
def rand_like(x):
"""
Returns an array of random values drawn from a uniform distribution. This
array has the same shape, datatype, and backend as the input.
Parameters
----------
x: array-like
The array to draw paremeters from.
Returns
-------
array-like:
An array of random values drawn from the uniform distribution with the
same shape, datatype, and backend as the input.
"""
return rand(shape(x), getDatatype(x), getBackend(x))
def where(x):
"""
Returns a list of locations with non-zero values in an array
Parameters
----------
x: array-like
The array to search for non-zero values
Returns
-------
tuple:
Tuple of positions in an array (one tuple for position)
"""
# Get backend
backend = getBackend(x)
# Get precision
tol = precision(x)
if backend == 'numpy':
return tuple([(i[0], i[1]) for i in np.asarray(np.where(np.abs(x) > tol)).T])
elif backend == 'arrayfire':
return tuple([
tuple(reversed(np.unravel_index(i, tuple(reversed(x.shape)))))
for i in np.asarray(arrayfire.algorithm.where(abs(x) > tol))
])
elif backend == 'torch':
return torch.where(x.abs() > tol, 0, 1)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def max(x, axis=None):
"""
Returns the maximum of an array across all dimensions.
Parameters
----------
x: array-like
The array to evaluate the maximum of. Evaluates only the real values of
the input.
Returns
-------
float:
The maximum real value of the array across all dimensions.
"""
backend = getBackend(x)
if backend == 'numpy':
return np.max(real(x), axis=axis)
elif backend == 'arrayfire':
return scalar(arrayfire.algorithm.max(real(x), dim=axis))
elif backend == 'scalar':
return x
elif backend in ['list', 'tuple']:
return builtins.max(x)
elif backend == 'torch':
return torch.max(x, dim=axis)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def amax(x):
"""Short-hand for llops.max."""
return max(x)
def argmax(x, axis=None):
"""
Returns the coordinates of the global maximum of an array. Only considers
the real part of the array.
Parameters
----------
x: array-like
The first vector to dot product
Returns
-------
tuple:
The coordinates of the mininum value
"""
backend = getBackend(x)
_shape = shape(x)
if backend == 'numpy':
return tuple(np.unravel_index(np.argmax(real(x)), _shape))
elif backend == 'arrayfire':
return tuple(np.unravel_index(arrayfire.algorithm.imax(real(x.T))[1], _shape))
elif backend == 'scalar':
return x
elif backend in ('list', 'tuple'):
return argmax(np.asarray(x), axis=axis)
elif backend == 'torch':
return torch.argmax(x, dim=axis)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def min(x, axis=None):
"""
Returns the minimum of an array across all dimensions.
Parameters
----------
x: array-like
The array to evaluate the minimum of. Evaluates only the real values of
the input.
Returns
-------
float:
The minimum real value of the array across all dimensions.
"""
backend = getBackend(x)
if backend == 'numpy':
return np.min(real(x), axis=axis)
elif backend == 'arrayfire':
return scalar(arrayfire.algorithm.min(real(x), dim=axis))
elif backend == 'scalar':
return x
elif backend in ['list', 'tuple']:
return builtins.min(x)
elif backend == 'torch':
return torch.min(x, dim=axis)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def amin(x, axis=None):
"""Short-hand for llops.min."""
return min(x, axis=axis)
def argmin(x, axis=None):
"""
Returns the coordinates of the global mininum of an array. Only conisders
the real part of the input.
Parameters
----------
x: array-like
The first vector to dot product
Returns
-------
tuple:
The coordinates of the mininum value
"""
backend = getBackend(x)
_shape = shape(x)
if backend == 'numpy':
return tuple(np.unravel_index(np.argmin(real(x)), _shape))
elif backend == 'arrayfire':
return tuple(
np.unravel_index(arrayfire.algorithm.imin(real(x.T))[1], _shape))
elif backend == 'scalar':
return x
elif backend == 'torch':
return torch.argmin(x, dim=axis)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def conj(x):
"""
Element-wise complex conjugate of an array
Parameters
----------
x: array-like
The first vector to dot product
Returns
-------
array-like:
The complex conjugate of x
"""
backend = getBackend(x)
if backend == 'numpy':
return np.conj(x)
elif backend == 'arrayfire':
return arrayfire.arith.conjg(x)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def dot(lhs, rhs):
"""
Dot product of two operators
Parameters
----------
lhs: array-like
The first vector to dot product
rhs: array-like
The second vector to dot product
Returns
-------
array-like:
The dot product of lhs and rhs
"""
backend = getBackend(rhs)
if backend == 'numpy' or backend == 'arrayfire' or backend == 'torch':
return sum(lhs * rhs)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def flip(x, axis=None):
"""
Flip an operator about an axis.
Parameters
----------
x: array-like
The array we wish to fftshift.
axes:
Optional. Axes to flip across.
Returns
-------
array-like:
The flipped array
"""
backend = getBackend(x)
# Parse axes
if axis is None:
axis = list(range(ndim(x)))
elif type(axis) is not list:
axis = [axis]
# If no axes are provided, just return x
if len(axis) == 0:
return x
if backend == 'numpy':
for ax in axis:
x = np.flip(x, ax)
return x
elif backend == 'arrayfire':
for ax in axis:
x = arrayfire.data.flip(x, ax)
return x
elif backend == 'torch':
return torch.flip(x, dim=axis)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def fliplr(x):
"""Helper function to flip an array along second dimension."""
return flip(x, axis=1)
def flipud(x):
"""Helper function to flip an array along first dimension."""
return flip(x, axis=0)
def roll(x, shift, axis=None, y=None):
"""
Roll an array about an axis
This function rolls an array along one or more axes.
Parameters
----------
x: array-like
The array we wish to fftshift
shift: list, tuple
Amount to shift
axes:
Optional. Axes to shift along
Returns
-------
array-like:
The roll array
"""
backend = getBackend(x)
# Deal with lists using recursion
if getBackend(shift) in ('list', 'tuple', 'arrayfire', 'numpy'):
if axis is None:
axis = range(len(shift))
for sh, ax in zip(shift, axis):
x = roll(x, int(real(sh)), ax)
return x
# Set axis to default
if axis is None:
axis = 0
else:
assert axis in range(ndim(x)), 'Axis %s is invalid' % str(axis)
if backend == 'numpy':
return np.roll(x, shift, axis)
elif backend == 'arrayfire':
if axis == 0:
return arrayfire.data.shift(x, shift)
elif axis == 1:
return arrayfire.data.shift(x, 0, shift)
elif axis == 2:
return arrayfire.data.shift(x, 0, 0, shift)
else:
raise NotImplementedError
elif backend in ('list', 'tuple'):
return x[-shift:] + x[:-shift]
elif backend == 'torch':
if shift < 0:
shift = -shift
gap = x.index_select(axis, torch.arange(shift))
return torch.cat([x.index_select(axis, torch.arange(shift, x.size(axis))), gap], dim=axis)
else:
shift = x.size(axis) - shift
gap = x.index_select(axis, torch.arange(shift, x.size(axis)))
return torch.cat([gap, x.index_select(axis, torch.arange(shift))], dim=axis)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def circshift(x, shift, axis=None):
"""
Circular shift an array.
This function performs a circshift operation, which rolls an array along one
or more axes.
Parameters
----------
x: array-like
The array we wish to fftshift
shift: list, tuple
Amount to shift
axes:
Optional. Axes to shift along
Returns
-------
array-like:
The circshifted array
"""
return roll(x, shift, axis)
def fftshift(x, inverse=False):
"""
FFT shift an array.
This function performs a fftshift operation. It is the same as ifftshift
for arrays with even shapes.
Parameters
----------
x: array-like
The array we wish to fftshift
inverse: bool
Whether to inverse (ifftshift) the array
Returns
-------
array-like:
The fft-shifted array
"""
backend = getBackend(x)
if backend == 'numpy':
if inverse:
return sp.fftpack.ifftshift(x)
else:
return sp.fftpack.fftshift(x)
elif backend == 'arrayfire':
if inverse:
s = [math.floor(i / 2) for i in x.shape]
else:
s = [math.ceil(i / 2) for i in x.shape]
if len(s) == 1:
return arrayfire.data.shift(x, s[0])
elif len(s) == 2:
return arrayfire.data.shift(x, s[0], s[1])
elif len(s) == 3:
return arrayfire.data.shift(x, s[0], s[1], s[2])
else:
raise NotImplementedError
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def ifftshift(x):
"""
Inverse FFT shift an array.
This function performs an ifftshift operation. It is the same as fftshift
for arrays with even shapes.
Parameters
----------
x: array-like
The array we wish to fftshift
Returns
-------
array-like:
The inverse fft-shifted array
"""
return fftshift(x, True)
def transpose(x, hermitian=True):
"""
Transpose an array.
This function performs calculates the transpose of an array. It returns
the hermitian transpose by default.
Parameters
----------
x: array-like
The array we wish to tranpose.
hermitian: bool
Whether to conjigate the array in additon to transposing it.
Returns
-------
array-like:
The (hermitian) transposed array.
"""
backend = getBackend(x)
if backend == 'numpy':
if hermitian:
return np.conj(x.T)
else:
return x.T
elif backend == 'arrayfire':
return arrayfire.array.transpose(x, conj=hermitian)
elif backend == 'torch':
return x.transpose(-1,0)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def matmul(A, x, y=None):
"""
Matrix-vector multiplication.
This function performs matrix-vector multiplication.
Parameters
----------
A : array-like
The matrix to multiply
x : array-like
The vector (array) we wish to multiply. This will be vectorized if it
is not already.
y: array-like
Optional. Output to write to.
Returns
-------
array-like:
The output of the matrix-vector multiplcation or None if y is not None.
"""
backend = getBackend(x)
if backend == 'numpy':
if y is not None:
np.matmul(A, x, y)
else:
return np.matmul(A, x)
elif backend == 'arrayfire':
if y is not None:
y[:] = arrayfire.blas.matmul(A, x, y)
else:
return arrayfire.blas.matmul(A, x)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def innerproduct(x, y):
"""
A generic inner product operation with backend selector
"""
return matmul(transpose(x, conj=True), y)
def all(x):
"""
Returns whether all values are True in the input.
Analyzes the real values only.
Parameters
----------
x: array-like or scalar
The array to search for boolean true values
Returns
-------
bool:
Whether all of the values in the array are True.
"""
# Check if x is a boolean
if type(x) is bool:
return x
else:
backend = getBackend(x)
if backend == 'numpy':
return np.any(real(x))
elif backend == 'arrayfire':
return arrayfire.algorithm.all_true(real(x)) != 0
elif backend in ['list', 'tuple']:
return builtins.all(x)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def any(x):
"""
Returns whether any values are True in the input.
Analyzes the real values only.
Parameters
----------
x: array-like or scalar
The array to search for boolean true values
Returns
-------
bool:
Whether any of the values in the array are True.
"""
# Check if x is a boolean
if type(x) is bool:
return x
else:
backend = getBackend(x)
if backend == 'numpy':
return np.all(real(x))
elif backend == 'arrayfire':
return arrayfire.algorithm.any_true(real(x)) != 0
elif backend in ['list', 'tuple']:
return builtins.any(x)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def prod(x, axes=None):
"""
A generic product operator with backend selector.
"""
backend = getBackend(x)
if backend in ['tuple', 'list']:
return reduce(lambda x, y: x * y, x) if len(x) > 0 else []
if backend in ['numpy', 'tuple', 'list']:
if axes is None:
return np.prod(x)
else:
return np.prod(x, axis=tuple(axes), keepdims=True)
elif backend == 'arrayfire':
if axes is None:
_axes = list(range(len(shape(x))))
else:
_axes = axes
# Sum over defined axes
a = x.copy()
for axis in _axes:
a = arrayfire.algorithm.prod(a, axis)
if axes is None:
return scalar(a.as_type(x.dtype()))
else:
return a.as_type(x.dtype())
elif backend == 'scalar':
return x
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def sum(x, axis=None):
"""
A generic sum operator with backend selector
"""
# Ensure axis is a list
if axis is not None and type(axis) not in [list, tuple, np.ndarray]:
axis = [axis]
backend = getBackend(x)
if backend == 'numpy':
if axis is None:
return np.sum(x)
else:
return np.sum(x, axis=tuple(axis), keepdims=True)
elif backend == 'arrayfire':
if axis is None:
_axes = list(range(len(shape(x))))
else:
_axes = axis
# Sum over defined axes
a = x.copy()
for axis in _axes:
a = arrayfire.algorithm.sum(a, axis)
if axis is None:
return scalar(a.as_type(x.dtype()))
else:
return a.as_type(x.dtype())
elif backend == 'list':
return builtins.sum(x)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def tile(x, reps):
"""
A generic tile operation with backend selector
"""
backend = getBackend(x)
if backend == 'numpy':
return np.tile(x, reps)
elif backend == 'arrayfire':
if len(reps) == 1:
return arrayfire.data.tile(x, int(reps[0]))
elif len(reps) == 2:
return arrayfire.data.tile(x, int(reps[0]), int(reps[1]))
elif len(reps) == 3:
return arrayfire.data.tile(x, int(reps[0]), int(reps[1]),
int(reps[2]))
else:
raise NotImplementedError
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def isscalar(x):
""" TODO: Only works for numpy arrays """
return not hasattr(x, "__len__")
def shape(x, ndim=None):
"""
A method which returns the shape of an array in row-major format
"""
backend = getBackend(x)
if backend == 'numpy':
if isscalar(x):
return (1,)
else:
return tuple(np.asarray(x).shape)
elif backend == 'arrayfire':
# Arrayfire arrays ALWAYS have 4 dimensions. The .shape property squeezes
# out all extra dimensions, which is inconsistent with numpy.shape.
# The ndim parameter compensates for this by enforcing the return of a
# tuple of len ndim regardless of the number of dimensions used.
_shape = x.shape
if ndim is not None:
if len(_shape) != ndim:
_shape = tuple(list(_shape) + [1] * (ndim - len(_shape)))
return _shape
elif backend == 'scalar':
return (1,)
elif backend == 'torch':
return tuple(x.shape)
elif backend in ['tuple', 'list']:
return len(x)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def scalar(x):
"""
A method which returns the first value of the array as either a complex or float
"""
if isscalar(x):
return x
else:
backend = getBackend(x)
datatype = getDatatype(x)
if backend in ['numpy', 'arrayfire', 'torch']:
if 'complex' in datatype:
return np.complex(np.asarray(x).item(0))
else:
return np.float32(np.asarray(x).item(0))
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def changeBackend(x, new_backend=None):
"""
A method which converts an array to the given backend
"""
# Use default backend by default
if new_backend is None:
new_backend = config.default_backend
# Deal with tuples and lists
if type(x) in (list, tuple):
x = [changeBackend(_x) for _x in x]
# Get current backend
current_backend = getBackend(x)
# Check and change backend
if new_backend == current_backend:
return x
else:
if current_backend == 'numpy' and new_backend == 'arrayfire':
""" Numpy to arrayfire """
return arrayfire.interop.np_to_af_array(x)
elif current_backend == 'numpy' and new_backend == 'torch':
""" Numpy to pytorch """
return torch.from_numpy(x)
elif current_backend == 'numpy' and new_backend == 'list':
return x.toList()
elif current_backend == 'numpy' and new_backend == 'tuple':
return tuple(x.toList())
elif current_backend == 'arrayfire':
""" arrayfire to numpy """
return changeBackend(x.__array__(), new_backend)
elif current_backend in ("list", "tuple", "scalar"):
""" List/tuple to any other backend."""
return changeBackend(np.asarray(x), new_backend)
elif current_backend is 'torch':
return changeBackend(x.numpy(), new_backend)
else:
raise ValueError(
"Array with backend %s cannot be converted to new backend %s" %
(current_backend, new_backend))
def asbackend(x, new_backend=None):
""" Wrapper class for changeBackend for convenience """
return changeBackend(x, new_backend)
def asarray(x, dtype=None, backend=None):
""" Wrapper class for changeBackend for convenience """
# Ensure output is complex if input is complex
if "complex" in getDatatype(x):
dtype = "complex32"
# If x is a list, convert to numpy first, then to the appropriate dtype
if type(x) in (list, tuple):
x = changeBackend(np.asarray(x), backend)
else:
x = changeBackend(x, backend)
# Convert datatype
x = astype(x, dtype)
# Return
return x
def astype(x, new_dtype=None):
"""
A method which converts an array to the given datatype
"""
# Use default backend if no argument passed
if new_dtype is None:
new_dtype = config.default_dtype
# Pre-convert to tuples or lists
if type(x) in [tuple, list]:
x = np.asarray(x)
# Get current backend and datatype
backend = getBackend(x)
current_dtype = getDatatype(x)
if new_dtype == current_dtype:
return x # No change
else:
if backend == 'numpy':
# Take the real part if we're converting from complex to real
if 'complex' in current_dtype and 'complex' not in new_dtype:
x = real(x)
return x.astype(getNativeDatatype(new_dtype, 'numpy'))
elif backend == 'arrayfire':
if 'complex' in getDatatype(x) and 'complex' not in new_dtype:
return arrayfire.arith.cast(
real(x), getNativeDatatype(new_dtype, 'arrayfire'))
else:
return arrayfire.arith.cast(
x, getNativeDatatype(new_dtype, 'arrayfire'))
else:
raise ValueError(
"Array with backend %s cannot be operated on" % (backend))
def isarray(x):
"""
Determines if the input is an array.
Parameters
----------
x: object
The object to observe.
Returns
-------
bool:
True if the array has one of the valid backends of this package.
"""
return getBackend(x) in config.valid_backends
def cast(x, dtype=None, backend=None):
"""
Casts an object to a specific dtype and backend
"""
if backend is None:
backend = config.default_backend
if dtype is None:
dtype = config.default_dtype
return astype(asbackend(x, backend), dtype)
def cast_like(x, template):
"""
Casts an input array to be the same dtype and backend as the second input.
Parameters
----------
x: array-like
The array to cast.
template: array-like
The array to use as a template for backend and dtype.
Returns
-------
array-like:
The first input cast to be like the second input
"""
# Get backend and dtype of template
template_backend = getBackend(template)
template_dtype = getDatatype(template)
# Return casted array
return cast(x, dtype=template_dtype, backend=template_backend)
def reshape(x, N, no_warnings=False):
"""
A method which vectorizes an array
"""
# If array is already the same shape, just return
if tuple(N) == shape(x):
return x
if type(N) not in [list, tuple, np.ndarray]:
N = [N]
elif type(N) is np.ndarray:
N = N.tolist()
# If array is already the same shape, just return
if tuple(N) == shape(x):
return x
# If this is just a dimension expansion, call expandDims
if len(N) > ndim(x) and N[-1] is 1:
return expandDims(x, len(N))
# Store existing backend
backend = getBackend(x)
# Check that number of elements is consistent
assert np.prod(N) == size(x), "Number of elements is not consistent (size(x)=%d, N=%d)" % (np.prod(N), size(x))
if config.WARN_FOR_EXPENSIVE_OPERATIONS and not no_warnings:
print("WARNING: calling reshape can be an expensive operation, it is normally advised to avoid this.")
if backend == 'numpy':
return np.reshape(x, N)
elif backend == 'arrayfire':
if len(N) is 1:
return vectorize(x)
elif len(N) is 2:
y = arrayfire.transpose(
arrayfire.moddims(arrayfire.transpose(x), N[1], N[0]))
garbageCollect(backend)
return y
elif len(N) is 3:
y = arrayfire.transpose(
arrayfire.moddims(arrayfire.transpose(x), N[2], N[1], N[0]))
garbageCollect(backend)
return y
elif backend == 'torch':
return x.view(N)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def garbageCollect(backend=None):
"""
A method which calls garbage collect for the selected backend
"""
# If backend is not provided, call for all available backends
if backend is None:
for _backend in config.valid_backends:
garbageCollect(_backend)
return
if backend == 'numpy':
return
elif backend == 'arrayfire':
arrayfire.device.device_gc()
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def squeeze(x):
"""
A method which removes extra dimensions from an array
"""
backend = getBackend(x)
if backend == 'numpy':
return np.squeeze(x)
elif backend == 'arrayfire':
dims = list(range(4))
for dim in range(ndim(x)):
if shape(x)[dim] <= 1:
dims.append(dims.pop(dims[dim]))
return arrayfire.data.reorder(x, dims[0], dims[1], dims[2], dims[3])
elif backend == 'torch':
return torch.squeeze(x)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def vectorize(x, no_warnings=False):
"""
Return the vectorized (1D) version of the array.
This function can be a performance bottleneck.
If config.WARN_FOR_EXPENSIVE_OPERATIONS is True, this will print a warning
every time this function is called. pass no_warnings=True to over-ride.
Parameters
----------
x: array-like
The array to vectorize.
Returns
-------
scalar:
The vectorized array. Output is stacked along first dimension.
"""
backend = getBackend(x)
# squeeze dimensions that have length 1
if backend == 'numpy':
x = np.squeeze(x)
# If array is already vector, just return
if len(shape(x)) == 1:
return x
if config.WARN_FOR_EXPENSIVE_OPERATIONS and not no_warnings:
print(
"WARNING: calling reshape can be an expensive operation, it is normally advised to avoid this."
)
if backend == 'numpy':
return x.ravel()
elif backend == 'arrayfire':
return arrayfire.data.flat(transpose(x, hermitian=False))
elif backend == 'torch':
return x.view(size(x))
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def vec(x, no_warnings=False):
"""Short-hand for llops.vectorize."""
return vectorize(x, no_warnings)
def size(x):
"""
Return the number of elements of the input array.
Parameters
----------
x: array-like
The array to determine the number of element of.
Returns
-------
scalar:
The number of elements in the input.
"""
return prod(shape(x))
def ndim(x):
"""
Return the number of dimensions of the input array.
Note that arrays with the arrayfire backend have fixed array count. For these
arrays, this function will return the maximum dimension with non-unity size.
Parameters
----------
x: array-like
The array to check the dimensions of.
Returns
-------
scalar:
The number of dimensions in the input.
"""
backend = getBackend(x)
if backend == 'numpy':
return x.ndim
elif backend == 'arrayfire':
return len(x.dims())
elif backend == 'list':
_ndim = 1
_x = x
while type(_x[0]) is list:
_x = _x[0]
_ndim += 1
return _ndim
elif backend == 'scalar':
return 1
elif backend == 'torch':
return x.ndimension()
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def expandDims(x, new_dim_count):
"""
Expands the dimensions of an array to match new_dim_count.
Note that arrays with the arrayfire backend have fixed array count. For these
arrays, this function does nothing.
Parameters
----------
x: array-like
The array to expand dimensions of.
new_dim_count: tuple or list
The desired number of dimensions in the array.
Returns
-------
array-like:
The expanded array with dimension new_dim_count.
"""
backend = getBackend(x)
assert new_dim_count <= 4
if backend == 'numpy':
while np.ndim(x) < new_dim_count:
x = x[:, np.newaxis]
return x
elif backend == 'arrayfire':
return x # arrayfire arrays are always 4D
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def alloc(shape, dtype, backend):
"""
Allocates an empty array in memory with the given shape, dtype, and backend.
Parameters
----------
shape: tuple or list
The desired shape of the array.
dtype: str
Optinal. The desired datatype. If None, uses llops.config.default_dtype.
backend: str
Optinal. The desired backend. If None, uses llops.config.default_backend.
Returns
-------
array-like:
The allocated array.
"""
_dtype = getNativeDatatype(dtype, backend)
if backend == 'numpy':
return np.empty(shape, dtype=_dtype, order='F')
elif backend == 'arrayfire':
if type(shape) not in [tuple, list, np.ndarray]:
shape = [shape]
return arrayfire.Array(dims=shape, dtype=_dtype)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def dealloc(x):
"""
Frees memory associated with the input.
Parameters
----------
x: array-like
The array to erase from memory.
Returns
-------
"""
backend = getBackend(x)
if backend == 'numpy':
x = None
elif backend == 'arrayfire':
x = None
arrayfire.device_gc()
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def free(x):
"""Helper method for llops.dealloc."""
dealloc(x)
def dcopy(x):
"""
Returns a deep copy of an array.
Parameters
----------
x: array-like
The array to create a deep copy of.
Returns
-------
array-like:
The deep copy of the input.
"""
backend = getBackend(x)
if backend == 'numpy':
return x.copy()
elif backend == 'arrayfire':
xc = x.copy()
arrayfire.device_gc()
return xc
elif backend == 'list':
x_new = []
for _x in x:
x_new.append(dcopy(_x))
return x_new
elif backend == 'tuple':
x_new = []
for _x in x:
x_new.append(dcopy(_x))
return tuple(x_new)
elif backend == 'scalar':
return x
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def copy(x):
"""Convenience function for dcopy."""
return dcopy(x)
def shallow_copy(x):
"""Returns a shallow copy of an array.
This is really a placeholder for a future backend
which may not support shallow copy by [:]"""
return x[:]
def pointer(x):
"""Return an integer pointer to an array in memory."""
# Operate based on backend
backend = getBackend(x)
if backend == 'numpy':
return x.__array_interface__['data'][0]
elif backend == 'arrayfire':
return x.device_ptr()
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def std(x):
"""
Returns the standard deviation of all elements of an array.
Parameters
----------
x: array-like or scalar
The array to take the standard deviation of
Returns
-------
array-like:
The standard deviation of the input.
"""
backend = getBackend(x)
if backend == 'numpy':
return np.std(x)
elif backend == 'arrayfire':
return arrayfire.stdev(x)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def outer(a, b):
"""
Returns the outer product of two vectors.
Parameters
----------
a: array-like
The first (column) array
b: array-like
The second (row) array
Returns
-------
array-like:
The outer product of a and b.
"""
# Get backend
backend = getBackend(a)
if backend == 'numpy':
return a[:, np.newaxis] * b[np.newaxis, :]
elif backend == 'arrayfire':
mul = lambda x, y: x * y
return arrayfire.broadcast(mul, a, b.T)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def median(x):
"""
Returns the median of an array.
Parameters
----------
x: array-like or scalar
The array to take the global median of
Returns
-------
array-like:
The median of the input.
"""
backend = getBackend(x)
if backend == 'numpy':
return np.median(x)
elif backend == 'arrayfire':
if 'complex' in getDatatype(x):
m = arrayfire.median(real(x)) + 1j * arrayfire.median(imag(x))
else:
m = arrayfire.median(x)
arrayfire.device_gc()
return m
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def mean(x, axis=None):
"""
Returns the mean of an array.
Parameters
----------
x: array-like or scalar
The array to take the global mean of
axis: int
Optional. The axis over which to take the mean.
Returns
-------
array-like:
The mean of the input.
"""
if axis is None:
return scalar(sum(x) / size(x))
else:
return sum(x, axis=axis) / shape(x)[axis]
def fill(x, val):
"""
Fill all elements off array with the same value
"""
backend = getBackend(x)
if backend == 'numpy':
x.fill(scalar(val))
elif backend == 'arrayfire':
x[:] = scalar(val)
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def pad(x, M, crop_start=(0, 0), pad_value=0, y=None, center=False):
"""
Pad operation with backend selector
"""
# Get shape, backend, and datatype
N = shape(x)
if type(M) not in (list, tuple, np.ndarray):
M = [M]
if len(N) is not len(M):
N = list(N) + [1] * (len(M) - len(N))
backend = getBackend(x)
dtype = getDatatype(x)
# Check sizes
# assert builtins.all([crop_start[i] >= 0 for i in range(len(M))]), "crop_start must be > 0!"
# assert builtins.all(shape(x)[i] + crop_start[i] <= M[i] for i in range(len(M))), "crop_start would force pad outside of dimensions!"
# Determine if the cropped region is outside of the FOV
# Define a mask for assigning the output (used if crop extends outside object size)
input_roi = [slice(0, n) for n in N]
# If center flag is specified, over-ride the crop_start value
if center:
crop_start = [int(math.ceil(m / 2) - math.ceil(n / 2)) for (m, n) in zip(M, N)]
# If crop region resides outside bounds, shrink the output ROI to reflect this
for i in range(len(M)):
if crop_start[i] < 0:
input_roi[i] = slice(int(-crop_start[i]), int(N[i]))
elif crop_start[i] + N[i] > M[i]:
input_roi[i] = slice(0, int(M[i] - crop_start[i]))
input_roi = tuple(input_roi)
# Take note of whether we need to return y or not
return_y = y is None
# Allocate or check output variable
if y is None:
y = ones(M, dtype=dtype, backend=backend)
else:
assert getBackend(y) == backend, "Wrong backend for output (%s, needs to be %s)" % (getBackend(y), backend)
assert getDatatype(y) == dtype, "Wrong dtype for output (%s, needs to be %s)" % (getDatatype(y), dtype)
y = reshape(y, M)
# Determine how to pad the value
if isinstance(pad_value, str):
if pad_value == 'mean':
fill(y, mean(x))
elif pad_value == 'median':
fill(y, median(x))
elif pad_value == 'maximum':
fill(y, max(x))
elif pad_value == 'minimum':
fill(y, min(x))
elif pad_value in ['repeat', 'wrap']:
shifts = [(i, j) for i in (-1, 0, 1) for j in (-1, 0, 1)]
shifts.remove((0, 0))
for shift in shifts:
shift_amount = [shift[i] * N[i] for i in range(len(N))]
# Determine where to place copies of object
padded_start = [
builtins.max(shift_amount[i] + crop_start[i], 0)
for i in range(len(N))
]
padded_end = [
builtins.min(shift_amount[i] + crop_start[i] + N[i], M[i])
for i in range(len(N))
]
slc_padded = []
for i in range(len(N)):
slc_padded += [
slice(padded_start[i], padded_end[i]),
]
# Determine where to place copies of object
slc_input = []
for i in range(len(N)):
slc_input += [
slice(
padded_start[i] + -1 * shift[i] * N[i] -
crop_start[i], -crop_start[i] + padded_end[i] +
-1 * shift[i] * N[i]),
]
# Assign value in array
if builtins.all([pstart != pend for (pstart, pend) in zip(padded_start, padded_end)]):
y[tuple(slc_padded)] = x[tuple(slc_input)]
elif pad_value == 'reflect':
shifts = [(i, j) for i in (-1, 0, 1) for j in (-1, 0, 1)]
shifts.remove((0, 0))
for shift_0 in shifts:
shift = [-1 * s for s in shift_0]
shift_amount = [shift[i] * N[i] for i in range(len(N))]
# Determine where to place copies of object
padded_start = [
builtins.max(shift_amount[i] + crop_start[i], 0)
for i in range(len(N))
]
padded_end = [
builtins.min(shift_amount[i] + crop_start[i] + N[i], M[i])
for i in range(len(N))
]
slc_padded = []
for i in range(len(N)):
slc_padded += [
slice(padded_start[i], padded_end[i]),
]
# Determine where to place copies of object
input_start = [
padded_start[i] -
shift[i] * (padded_end[i] - padded_start[i]) - crop_start[i]
for i in range(len(N))
]
input_end = [
padded_end[i] - shift[i] * (padded_end[i] - padded_start[i])
- crop_start[i] for i in range(len(N))
]
slc_input = []
for i in range(len(N)):
slc_input += [slice(input_start[i], input_end[i]),]
# Flip positions if necessary
axes_to_flip = []
for axis, sh in enumerate(shift):
if np.abs(sh) > 0:
axes_to_flip.append(axis)
# Assign value in array
if builtins.all([pstart != pend for (pstart, pend) in zip(padded_start, padded_end)]):
y[tuple(slc_padded)] = flip(x[tuple(slc_input)], axis=axes_to_flip)
elif pad_value == 'rand':
# Pad with random values from a uniform distribution
# Keep same mean as image
values = rand(shape(y), dtype=dtype, backend=backend)
# Get object statistics
x_mean, x_range = mean(x), max(x) - min(x)
# Ensure padded values have same statistics
values *= x_range / 2
values += x_mean
# Assign values
y[:] = values
elif pad_value == 'randn':
# Pad with random values from a uniform distribution
# Keep same mean as image
values = randn(shape(y), dtype=dtype, backend=backend)
# Get object statistics
x_mean, x_range = mean(x), max(x) - min(x)
# Ensure padded values have same statistics
values *= x_range / 2
values += x_mean
# Assign values
y[:] = values
elif pad_value == 'edge':
# Determine regions of interest
shifts = [(i, j) for i in (-1, 0, 1) for j in (-1, 0, 1)]
# Remove center
shifts.remove((0, 0))
# Loop over regions
for shift_0 in shifts:
shift = [-1 * s for s in shift_0]
shift_amount = [shift[i] * N[i] for i in range(len(N))]
# Determine where to place copies of object
padded_start = [builtins.max(shift_amount[i] + crop_start[i], 0) for i in range(len(N))]
padded_end = [builtins.min(shift_amount[i] + crop_start[i] + N[i], M[i]) for i in range(len(N))]
slc_padded = []
for i in range(len(N)):
slc_padded += [slice(padded_start[i], padded_end[i]), ]
# Determine Edge values and repeat to match the padded size
input_start = []
input_end = []
slc_input = []
for axis_index in range(len(N)):
# Calculate start and end coordinates
input_start = (0 if shift[axis_index] <= 0 else N[axis_index] - 1)
input_end = (N[axis_index] if shift[axis_index] >= 0 else 1)
# Generate slices
slc_input += [slice(input_start, input_end), ]
# Calculate padded edge vector
if all([pstart != pend for (pstart, pend) in zip(padded_start, padded_end)]):
# Get padded edge
_padded_edge = x[tuple(slc_input)]
# Calculate shape
_shape = shape(_padded_edge, ndim=len(N))
# Extend edge vector to edge of padded region
for axis_index in range(len(N)):
if _shape[axis_index] == 1:
tile_count = [1 if n is not axis_index else abs(padded_start[axis_index] - padded_end[axis_index]) for n in range(len(N))]
_padded_edge = tile(_padded_edge, tile_count)
# Assign value in array if the size of the padded area is not zero
if all([s > 0 for s in shape(_padded_edge)]):
y[tuple(slc_padded)] = _padded_edge
else:
raise ValueError('Invalid pad_value (%s)' % pad_value)
elif getBackend(pad_value) == 'scalar':
fill(y, pad_value)
elif isarray(pad_value):
# pad_value is array
y[:] = pad_value
else:
raise ValueError('Invalid pad value %s' % str(pad_value))
# Determine ROI of y to assign x to
output_roi = []
for i in range(len(N)):
output_roi += [slice(int(builtins.max(crop_start[i], 0)), int(builtins.min(crop_start[i] + N[i], M[i]))), ]
output_roi = tuple(output_roi)
# Assign output
y[output_roi] = x[input_roi]
if return_y:
return y
def crop(x, M, crop_start=(0, 0), y=None, out_of_bounds_placeholder=None, center=False):
"""Crop a measurement."""
# Get backend, dtype, and shape
backend = getBackend(x)
dtype = getDatatype(x)
N = x.shape
# Define a mask for assigning the output (used if crop extends outside object size)
output_roi = [slice(0, m) for m in M]
# If center flag is specified, over-ride the crop_start value
if center:
crop_start = [int(math.floor(n / 2 - m / 2)) for (m, n) in zip(M, shape(x))]
# If crop region resides outside bounds, shrink the output ROI to reflect this
for i in range(len(N)):
if crop_start[i] < 0:
output_roi[i] = slice(int(-crop_start[i]), int(M[i]))
elif crop_start[i] + M[i] > N[i]:
output_roi[i] = slice(0, int(N[i] - crop_start[i]))
output_roi = tuple(output_roi)
# Determine crop region
input_roi = []
for i in range(len(N)):
input_roi += [slice(int(builtins.max(crop_start[i], 0)), int(builtins.min(crop_start[i] + M[i], N[i]))), ]
input_roi = tuple(input_roi)
# Check whether we nee to return
return_y = y is None
# Allocate y if it's not provided
if y is None:
if out_of_bounds_placeholder is not None:
y = ones(M, dtype=dtype, backend=backend) * out_of_bounds_placeholder
else:
y = ones(M, dtype=dtype, backend=backend) * np.nan
else:
if out_of_bounds_placeholder is not None:
y[:] = out_of_bounds_placeholder
else:
y[:] = np.nan
# Perform assignment
y[output_roi] = x[input_roi]
if return_y:
return y
def grid(shape, scale=1, offset=None, center=True, dtype=None, backend=None):
"""
MATLAB-style meshgrid operator. Takes a shape and scale and produces a list of coordinate grids.
Parameters
----------
shape: list, tuple
The desired shape of the grid
scale: list, tuple, int
Optinal. The scale of the grid. If provided as an integer, provides the
same scale across all axes. If provided as a list or tuple, must be of
the same length as shape
offset: list, tuple, int
Optinal. Offset of the grid. If provided as an integer, provides the
same offset across all axes. If provided as a list or tuple, must be of
the same length as shape.
dtype: string
Optional. The desired datatype, if different from the default.
backend: string
Optional. The desired backend, if different from the default.
Returns
-------
list:
List of arrays with provided backend and dtype corresponding to
coordinate systems along each dimension.
"""
# Parse scale operation
if type(scale) not in [list, tuple, np.array, np.ndarray]:
scale = [scale] * len(shape)
# Parse offset operation
if offset is None:
offset = [0] * len(shape)
backend = backend if backend is not None else config.default_backend
dtype = dtype if dtype is not None else config.default_dtype
if backend == 'numpy':
dtype_np = getNativeDatatype(dtype, 'numpy')
if len(shape) == 1:
grid = (
(np.arange(size, dtype=dtype) - shape[0] // 2) * scale[0]
- offset[0]).astype(dtype_np)
if not center:
grid -= min(grid)
return grid
elif len(shape) == 2:
# print((np.arange(shape[0], dtype=dtype) - shape[0] // 2))
dtype_np = getNativeDatatype(dtype, 'numpy')
lin_y = (np.arange(shape[0], dtype=dtype_np) - shape[0] // 2) * scale[0] - offset[0]
lin_x = (np.arange(shape[1], dtype=dtype_np) - shape[1] // 2) * scale[1] - offset[1]
grid_y = (lin_y[:, np.newaxis] * np.ones_like(lin_x)[np.newaxis, :]).astype(dtype_np)
grid_x = (lin_x[np.newaxis, :] * np.ones_like(lin_y)[:, np.newaxis]).astype(dtype_np)
if not center:
grid_y -= min(grid_y)
grid_x -= min(grid_x)
return ((grid_y, grid_x))
elif len(shape) == 3:
grid_z = ((np.arange(shape[0], dtype=dtype) - shape[0] // 2) *
scale[0] - offset[0]).astype(dtype_np)
grid_y = ((np.arange(shape[1], dtype=dtype) - shape[1] // 2) *
scale[1] - offset[1]).astype(dtype_np)
grid_x = ((np.arange(shape[2], dtype=dtype) - shape[2] // 2) *
scale[2] - offset[2]).astype(dtype_np)
if not center:
grid_y -= min(grid_y)
grid_x -= min(grid_x)
grid_z -= min(grid_z)
return ((grid_z, grid_y, grid_x))
elif backend == 'arrayfire':
if len(shape) == 1:
grid = arrayfire.range(shape[0]) - offset[0]
if not center:
grid -= min(grid)
return grid
elif len(shape) == 2:
grid_y = (arrayfire.range(shape[0], shape[1], dim=0) -
shape[0] // 2) * scale[0] - offset[0]
grid_x = (arrayfire.range(shape[0], shape[1], dim=1) -
shape[1] // 2) * scale[0] - offset[1]
if not center:
grid_y -= min(grid_y)
grid_x -= min(grid_x)
return ((grid_y, grid_x))
elif len(shape) == 3:
grid_z = (arrayfire.range(shape[0], shape[1], shape[2], dim=0) -
shape[0] // 2) * scale[0] - offset[0]
grid_y = (arrayfire.range(shape[0], shape[1], shape[2], dim=1) -
shape[1] // 2) * scale[0] - offset[1]
grid_x = (arrayfire.range(shape[0], shape[1], shape[2], dim=2) -
shape[2] // 2) * scale[0] - offset[2]
if not center:
grid_y -= min(grid_y)
grid_x -= min(grid_x)
grid_z -= min(grid_z)
return ((grid_z, grid_y, grid_x))
else:
raise NotImplementedError('Backend %s is not implemented!' % backend)
def assert_equality(x1, x2, metric='max', threshold=None):
"""
Check the equality of two arrays. The arrays need not be of the same datatype.
Parameters
----------
x1: array-like
The first array to compare
x2: array-like
The second array to compare
metric: str
Optional. Defines the metric used to determine equality. Can be 'max' or 'ssd'
threshold: float
Optional. The threshold to hold the SSD of x1 and x2 below.
If not provided, uses llops.precision(x1)
Returns
-------
None
"""
# Ensure both arrays are the same backend
x2 = asbackend(x2, getBackend(x1))
# Get threshold
threshold = precision(x1) * size(x1) if threshold is None else threshold
# Vectorize both arrays to remove extra dimensions
x1, x2 = vec(x1), vec(x2)
if metric is 'ssd':
# Determine SSD between arrays
ssd = sum(abs(x2 - x1) ** 2)
# Check equality
assert ssd < threshold, "SSD of inputs (%g) was greater then threshold (%g)" % (ssd, threshold)
elif metric is 'max':
# Check that max difference is less than precision
max_difference = max(abs(x1 - x2))
# Check equality
assert max_difference < threshold, "Max difference of inputs (%g) was greater then threshold (%g)" % (max_difference, threshold)
def ramp(shape, axis=0, min_value=0.0, max_value=1.0, reverse=False, dtype=None, backend=None):
"""
Return a linear ramp along a given axis with a given shape.
Parameters
----------
shape: list or tuple
The desired shape of the array.
axis: int
Optional. The axis over which to create the ramp.
min_value: float
Optional. The mininum value of the ramp.
max_value: float
Optional. The maximum value of the ramp.
reverse: bool
Optional. If true, the ramp is decreasing instead of increasing along axis.
dtype: string
Optional. The desired datatype, if different from the default.
backend: string
Optional. The desired backend, if different from the default.
Returns
-------
array-like:
A linear ramp along the given axis with the given shape.
"""
# Get default dtype and backend if none proided
backend = backend if backend is not None else config.default_backend
dtype = dtype if dtype is not None else config.default_dtype
# Return unit array if max and min values are the same
if max_value == min_value:
return ones(shape, dtype=dtype, backend=backend)
# Generate ramp
if reverse:
delta = builtins.round((min_value - max_value) / shape[axis], 8)
ramp_1d = slice(max_value, min_value + delta, delta)
else:
delta = builtins.round((max_value - min_value) / shape[axis], 8)
ramp_1d = slice(min_value, max_value - delta, delta)
# Generate slice coordinates
coordinates = [slice(0, sz) for sz in shape]
coordinates[axis] = ramp_1d
# Create ramp
ramp = resize(np.mgrid[coordinates][axis], shape)
# Return
return asarray(ramp, dtype, backend)
def round(x):
"""
Rounds all elements of an array to the nearest integer.
By convention, 0.5 is rounded to 1.0.
This function keeps the same datatype.
Parameters
----------
x: array-like
Array to round.
Returns
-------
array-like:
A linear ramp along the given axis with the given shape.
"""
backend = getBackend(x)
if backend == 'numpy':
return cast_like(np.round(x), x)
elif backend == 'arrayfire':
if isComplex(x):
return cast_like(arrayfire.arith.round(real(x)) + 1j * arrayfire.arith.round(imag(x)), x)
else:
return cast_like(arrayfire.arith.round(x), x)
elif backend == 'scalar':
return round(x)
elif backend == 'list':
return [round(item) for item in x]
elif backend == 'tuple':
return tuple([round(item) for item in x])
else:
raise ValueError('Backend %s is not supported!' % backend)
@numpy_function
@real_valued_function
def resize(x, new_shape):
"""Resize an array, allowing the number of dimensions to change."""
return skimage.transform.resize(x, new_shape, anti_aliasing=True, mode='edge', preserve_range=True) | |
#!/usr/bin/env python3
# Copyright 2017 Christian Henning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
@title :recordings.py
@author :ch
@contact :christian@ini.ethz.ch
@created :04/24/2017
@version :1.0
@python_version :3.5.2
This class takes care of recording state variables during simulation.
"""
import configuration as config
from util.config_exception import ConfigException
from util import utils
from pypatterns.singleton import Singleton
from pypatterns.observer import Observer
from simulation import Simulation
import brian2 as b2
import os
import _pickle as pickle
import matplotlib.pyplot as plt
import numpy as np
import logging
logger = logging.getLogger(config.logging_name)
class Recordings(Observer, metaclass=Singleton):
"""To understand the dynamics of a network, its internal state and
variables must be recordable. This class shall allow one to analyse the
dynamics during and after simulation according to the configs.
Under the hood, this class simply creates instances of the Brian classes
StateMonitor, PopulationRateMonitor and SpikeMonitor. However,
SpikeMonitors are not instantiated in this class due to efficiency
considerations. Moreover, the instantiation in the class NetworkModel is
modified. This introduces an unneccesary interweaving of recording code
with simulation code, but ensures that SpikeMonitors are only instantiated
once.
Attributes:
"""
def __init__(self, network):
"""Generate all recording objects and add them to the network.
Note, SpikeMonitors have been already instantiated.
Args:
network: An instance of class NetworkModel.
Returns:
"""
super().__init__()
self._network = network
Recordings._check_state_var_recordings()
Recordings._check_population_rate_recordings()
Recordings._check_spike_event_recordings()
# FIXME Following two methods are dirty and misplaced.
# Check that chosen layers exist (cannot be done in static methods, as
# network has to be known).
def layer_exists(tup, layer):
if not (layer >=0 and layer < network.num_layers):
raise ConfigException('Recording %s has non-existing layer.' \
% (str(tup)))
# Make sure, indices and vars exist in layer.
def vars_exists(tup, layer, source, var, inds=None):
if var is not None and isinstance(var, list):
for v in var:
if not hasattr(source, v):
print(source.get_states().keys())
raise ConfigException('Variable ' + v + ' does not ' \
+ 'exist from recording %s.' \
% (str(tup)))
if inds is not None and isinstance(inds, list):
for i in inds:
if not (i >=0 and i < source.N):
raise ConfigException('Recording %s cannot have ' \
% (str(tup)) + 'index %d.' % (i))
self._state_monitors = dict()
self._pop_rate_monitors = dict()
self._spike_monitors = dict()
for tup in config.state_var_recordings:
typ, layer, var, inds, dt, _ = tup
layer_exists(tup, layer)
exn, inn, eis, ies, ees = network.brian_objects(layer)
source = None
if typ == 'ne':
source = exn
elif typ == 'ni':
source = inn
elif typ == 'ei':
source = eis
elif typ == 'ie':
source = ies
else:
source = ees
vars_exists(tup, layer, source, var, inds)
dt = dt * b2.ms if dt is not None else dt
state_mon = b2.StateMonitor(source, var, inds, dt=dt)
self._state_monitors[str(tup)] = state_mon
network.add_component(state_mon)
for tup in config.population_rate_recordings:
typ, layer, _, _, _ = tup
layer_exists(tup, layer)
exn, inn, _, _, _ = network.brian_objects(layer)
source = None
if typ == 'ne':
source = exn
else:
source = inn
pop_rmon = b2.PopulationRateMonitor(source)
self._pop_rate_monitors[str(tup)] = pop_rmon
network.add_component(pop_rmon)
for tup in config.spike_event_recordings:
typ, layer, var, _ = tup
layer_exists(tup, layer)
sp_mon = None
if typ == 'ne':
sp_mon = network.exc_spike_monitor(layer)
else:
sp_mon = network.inh_spike_monitor(layer)
vars_exists(tup, layer, sp_mon.source, var)
self._spike_monitors[str(tup)] = sp_mon
# For online recordings, we need to know, when the network state has
# changed.
if config.online_recording:
sim = Simulation()
sim.register(self)
def update(self, *args, **kwargs):
"""Update plots for online recordings.
TODO: In future, one could incrementally write recordings to a file.
Args:
Returns:
"""
if args[0] == 'Simulation':
# TODO online plotting of recordings.
#print(kwargs['curr_sim_time'])
pass
else:
assert(False)
"""
Static class attribute, that contains the attributes passed to
SpikeMonitors.
"""
_spike_monitor_args = None
def store_recordings(self):
"""Store the whole recordings made during simulation into files and
optionally into plots.
Args:
Returns:
"""
plt.close('all')
if not os.path.isdir(config.recording_dir):
os.makedirs(config.recording_dir)
### Handle StateMonitors.
for tup in config.state_var_recordings:
state_mon = self._state_monitors[str(tup)]
typ, layer, var, inds, dt, duration = tup
var_str = utils.list_to_str(var)
inds_str = '_'+str(inds) if isinstance(inds, bool) \
else utils.list_to_str(inds)
folder_name = 'state_monitor_%s_%d_vars%s_indices%s_%s_%d' \
% (typ, layer, var_str, inds_str, str(dt), duration)
folder_name = os.path.join(config.recording_dir, folder_name)
os.mkdir(folder_name)
logger.info("StateMonitor recordings %s are stored in %s." \
% (str(tup), folder_name))
dump_obj = dict()
dump_obj['type'] = typ
dump_obj['layer'] = layer
dump_obj['variables'] = var
dump_obj['indices'] = inds
dump_obj['dt'] = dt
dump_obj['recordings'] = dict()
recs = dump_obj['recordings']
recs['t'] = np.array(getattr(state_mon, 't_'))
for v in var:
recs[v] = np.array(getattr(state_mon, '%s_' % v))
assert(len(recs[v].shape) == 2)
# Store recordings in file.
dump_file = os.path.join(folder_name, 'recordings.pickle')
with open(dump_file, 'wb') as f:
pickle.dump(dump_obj, f)
# Generate recording plots.
if config.save_recording_plots:
# Note, that duration is in ms, but as recs['t'] is
# dimensionless, its values are interpretable as seconds.
slice_gen = utils.list_to_val_dependent_slices(recs['t'],
duration/1000)
# For each slice, a variables with all its recorded indices
# will be part of a plot (one plot for each duration and
# variable).
# Compute min and max to scale y-axis uniformly per var.
mins = dict()
maxs = dict()
for v in var:
mins[v] = np.min(recs[v])
maxs[v] = np.max(recs[v])
for sind, eind in slice_gen:
for v in var:
# Note, that inds might be boolean.
ind_labels = inds
if not isinstance(inds, list):
ind_labels = list(range(recs[v].shape[0]))
vunit = getattr(state_mon.source,v).unit
Recordings._plot_slice(recs['t'], recs[v], sind, eind,
v, ind_labels, str(tup), vunit,
folder=folder_name,
miny=mins[v], maxy=maxs[v])
### Handle PopulationRateMonitors.
for tup in config.population_rate_recordings:
prate_mon = self._pop_rate_monitors[str(tup)]
typ, layer, duration, swin, swidth = tup
folder_name = 'pop_rate_monitor_%s_%d_%d_%s_%s' \
% (typ, layer, duration, str(swin), str(swidth))
folder_name = os.path.join(config.recording_dir, folder_name)
os.mkdir(folder_name)
logger.info("PopulationRate recordings %s are stored in %s." \
% (str(tup), folder_name))
dump_obj = dict()
dump_obj['type'] = typ
dump_obj['layer'] = layer
dump_obj['t'] = np.array(getattr(prate_mon, 't_'))
dump_obj['rate'] = np.array(getattr(prate_mon, 'rate_'))
# Store recordings in file.
dump_file = os.path.join(folder_name, 'recordings.pickle')
with open(dump_file, 'wb') as f:
pickle.dump(dump_obj, f)
# Generate recording plots.
if config.save_recording_plots:
slice_gen = utils.list_to_val_dependent_slices(dump_obj['t'],
duration/1000)
if swin is not None:
rates = np.array(prate_mon.smooth_rate(swin, swidth*b2.ms))
else:
rates = dump_obj['rate']
# Compute min and max to scale y-axis uniformly for rates.
miny = np.min(rates)
maxy = np.max(rates)
for sind, eind in slice_gen:
Recordings._plot_slice(dump_obj['t'], rates, sind, eind,
'rate', None, str(tup), b2.Hz,
folder=folder_name, miny=miny,
maxy=maxy)
### Handle SpikeMonitors.
for tup in config.spike_event_recordings:
spike_mon = self._spike_monitors[str(tup)]
typ, layer, var, duration = tup
var_str = '_None' if var is None else utils.list_to_str(var)
folder_name = 'spike_monitor_%s_%d_vars%s_%d' \
% (typ, layer, var_str, duration)
folder_name = os.path.join(config.recording_dir, folder_name)
os.mkdir(folder_name)
logger.info("Spike recordings %s are stored in %s." \
% (str(tup), folder_name))
dump_obj = dict()
dump_obj['type'] = typ
dump_obj['layer'] = layer
dump_obj['recordings'] = dict()
recs = dump_obj['recordings']
recs['t'] = np.array(getattr(spike_mon, 't_'))
recs['i'] = np.array(getattr(spike_mon, 'i_'))
if var is not None:
for v in var:
recs[v] = np.array(getattr(spike_mon, '%s_' % v))
# Store recordings in file.
dump_file = os.path.join(folder_name, 'recordings.pickle')
with open(dump_file, 'wb') as f:
pickle.dump(dump_obj, f)
# Generate recording plots.
if config.save_recording_plots and len(recs['i']) > 0:
# We need to keep track of the time to scale the x-axis.
# etime = stime + duration/1000
stime = 0
slice_gen = utils.list_to_val_dependent_slices(recs['t'],
duration/1000)
# Compute min and max values to properly and uniformly color
# code vars.
if var is not None:
mins = dict()
maxs = dict()
for v in var:
mins[v] = np.min(recs[v])
maxs[v] = np.max(recs[v])
# We need to know the number of neurons, to set ymax.
ymin = -0.5
ymax = spike_mon.source.N - 0.5
for sind, eind in slice_gen:
minx = stime
maxx = minx + duration/1000
stime = maxx
# Plot pure spike events.
Recordings._scatter_slice(recs['t'], recs['i'], sind, eind,
minx, maxx, ymin, ymax, str(tup),
folder=folder_name)
if var is None:
continue
for v in var:
vunit = getattr(spike_mon,v).unit
Recordings._scatter_slice(recs['t'], recs['i'], sind,
eind, minx, maxx, ymin, ymax,
str(tup), var=recs[v],
var_min=mins[v],
var_max=maxs[v], var_name=v,
var_unit=vunit,
folder=folder_name)
elif len(recs['i']) == 0:
logger.warning('Could not generate Plots for SpikeMonitor ' \
+ 'recordings %s. No spike events.' \
% (str(tup)))
@staticmethod
def get_spike_monitor_args(layer):
"""As SpikeMonitors are not instantiated in this class (as they are
needed by the network anyway, in order to compute firing rates), we
need to let the network know, which arguments it has to pass to the
SpikeMonitors it generates.
Args:
layer: The method returns the SpikeMonitor arguments for the
excitatory and inhibitory neurons in a specific layer.
Returns:
A tuple of tuples (actually lists). The returned list has the
following shape
[[exc_variables, exc_record], [inh_variables, inh_record]]
"""
if Recordings._spike_monitor_args is None:
Recordings._check_spike_event_recordings()
Recordings._spike_monitor_args = dict()
for tup in config.spike_event_recordings:
t, l, var, duration = tup
Recordings._spike_monitor_args.setdefault(l, [[None, False],
[None, False]])
te, ti = Recordings._spike_monitor_args[l]
curr_tup = None
if t == 'ne':
curr_tup = te
else:
curr_tup = ti
curr_tup[1] = True
if var is not None:
if curr_tup[0] is None:
curr_tup[0] = []
curr_tup[0].extend(var)
Recordings._spike_monitor_args.setdefault(layer, [[None, False],
[None, False]])
return Recordings._spike_monitor_args[layer]
@staticmethod
def _plot_slice(time, var, sind, eind, var_name, ind_names, title, unit,
folder=None, miny=None, maxy=None):
"""Plot a time slice for a variable recording.
Args:
time: The time array.
var: The recorded values.
sind: Start index of slice.
eind: End index of slice.
var_name: Name of the recorded variable.
ind_names: Names of the recorded indices.
title: Plot titel.
unit: Variable unit.
folder: Where to store plot.
miny: Minimum y limit.
maxy: Maximum y limit.
Returns:
"""
if len(var.shape) == 2:
for i in range(var.shape[0]):
label = '%s_%d' % (var_name, ind_names[i])
values = var[i,sind:eind]
b2.plot(time[sind:eind], values, label=label)
else:
label = var_name
values = var[sind:eind]
b2.plot(time[sind:eind], values, label=label)
plt.legend()
plt.title(title)
plt.xlabel('time (seconds)')
unit = str(unit)
if unit == 'rad':
plt.ylabel('%s' % (var_name))
else:
plt.ylabel('%s (%s)' % (var_name, unit))
axes = plt.gca()
# Make sure one also can see min and max vals in plot.
if miny is not None and maxy is not None:
eps = 0.01 * (maxy - miny)
miny -= eps
maxy += eps
if miny is not None:
axes.set_ylim(bottom=miny)
if maxy is not None:
axes.set_ylim(top=maxy)
if folder is not None:
plot_name = 'plot_%s_%d_%d.png' % (var_name, sind, eind)
plot_name = os.path.join(folder, plot_name)
plt.savefig(plot_name)
plt.close()
@staticmethod
def _scatter_slice(time, neurons, sind, eind, xmin, xmax, ymin, ymax,
title, y_label='Neuron i', var=None, var_min=None,
var_max=None, var_name=None, var_unit='rad',
folder=None):
"""Create a scatter plot of a time slice for a spike recording.
Variables recorded on spike events might be plottet as color coded
points.
Args:
time: The time array.
neurons: Which neuron spiked at a time point.
sind: Start index of slice.
eind: End index of slice.
xmin: Start Time (needed to set x-range).
xmax: End Time (needed to set x-range).
ymin: Lowest y-value.
ymax: Highest y-value.
title: Plot titel.
ylabel: y label.
var: An optional color coded variable, that was recorded on spike
events.
var_min: Minimal var value.
var_max: Maximum var value.
var_name: Name of var.
var_unit: Unit of var.
folder: Where to store plot.
Returns:
"""
if var is None:
b2.scatter(time[sind:eind], neurons[sind:eind])
else:
cm = plt.cm.get_cmap('coolwarm')
sp = b2.scatter(time[sind:eind], neurons[sind:eind],
c=var[sind:eind], vmin=var_min, vmax=var_max,
cmap=cm)
cb = plt.colorbar(sp)
if var_name is not None:
unit = str(var_unit)
if unit == 'rad':
cb.ax.set_xlabel('%s' % (var_name))
else:
cb.ax.set_xlabel('%s (%s)' % (var_name, unit))
axes = plt.gca()
axes.set_xlim(left=xmin, right=xmax)
axes.set_ylim(bottom=ymin, top=ymax)
plt.title(title)
plt.xlabel('time (seconds)')
plt.ylabel(y_label)
if folder is not None:
if var_name is None:
plot_name = 'plot_spikes_%d_%d.png' % (sind, eind)
else:
plot_name = 'plot_spikes_%s_%d_%d.png' % (var_name, sind, eind)
plot_name = os.path.join(folder, plot_name)
plt.savefig(plot_name)
plt.close()
@staticmethod
def _check_state_var_recordings():
"""Assert that the option state_var_recordings is properly defined.
Args:
Returns:
"""
for i, tup in enumerate(config.state_var_recordings):
if not isinstance(tup, tuple) or len(tup) != 6:
err_msg = 'Option \'state_var_recordings\' should be ' \
+ ' a list of tuples of size 6.'
raise ConfigException(err_msg)
t, l, var, inds, _, _ = tup
types = ['ne', 'ni', 'ee', 'ei', 'ie']
if not (t in types and isinstance(l, int) and l >= 0 \
and (isinstance(var, str) or isinstance(var, list)) \
and (isinstance(inds, (bool, int)) \
or isinstance(inds, list))):
err_msg = 'The tuple %s from option ' % (str(tup))\
+ '\'state_var_recordings\' is not properly ' \
+ 'formated.'
raise ConfigException(err_msg)
if isinstance(var, str):
config.state_var_recordings[i] = \
utils.set_tuple_item(tup, 2, [var])
if isinstance(inds, int) and not isinstance(inds, bool):
config.state_var_recordings[i] = \
utils.set_tuple_item(tup, 3, [inds])
@staticmethod
def _check_population_rate_recordings():
"""Assert that the option population_rate_recordings is properly
defined.
Args:
Returns:
"""
for tup in config.population_rate_recordings:
if not isinstance(tup, tuple) or len(tup) != 5:
err_msg = 'Option \'population_rate_recordings\' should be ' \
+ ' a list of tuples of size 5.'
raise ConfigException(err_msg)
t, l, _, _, _ = tup
if not (t in ['ne', 'ni'] and isinstance(l, int) and l >= 0):
err_msg = 'The tuple %s from option ' % (str(tup))\
+ '\'population_rate_recordings\' is not properly ' \
+ 'formated.'
raise ConfigException(err_msg)
@staticmethod
def _check_spike_event_recordings():
"""Assert that the option spike_event_recordings is properly defined.
Args:
Returns:
"""
for i, tup in enumerate(config.spike_event_recordings):
if not isinstance(tup, tuple) or len(tup) != 4:
err_msg = 'Option \'spike_event_recordings\' should be ' \
+ ' a list of tuples of size 4.'
raise ConfigException(err_msg)
t, l, var, _ = tup
if not (t in ['ne', 'ni'] and isinstance(l, int) and l >= 0 \
and (var is None or isinstance(var, str)
or isinstance(var, list))):
err_msg = 'The tuple %s from option ' % (str(tup))\
+ '\'spike_event_recordings\' is not properly ' \
+ 'formated.'
raise ConfigException(err_msg)
if isinstance(var, str):
config.spike_event_recordings[i] = \
utils.set_tuple_item(tup, 2, [var])
if __name__ == '__main__':
pass | |
#!/usr/bin/env python3s
import os.path
import tensorflow as tf
import helper
import warnings
from distutils.version import LooseVersion
import project_tests as tests
from tqdm import tqdm
import numpy as np
KEEP_PROB = 0.8 #lower value will help generalize more (but with fewer epochs, higher keep_prob creates more clearer segmentations)
LEARNING_RATE = 0.0009 #high learning rate will cause overshooting and huge oscillations in loss. (i.e. even 0.009 - 10 times higher will completely ruin the training)
IMAGE_SHAPE = (160, 576) #higher resolution will help segmenting in a more detailed fashion
EPOCHS = 50
BATCH_SIZE = 5 #with batch_size smaller, lower memory will be used as less number of images need to be loaded into memory, the training will go on in SGD fashion, and even with 1 epoch, the small batch size and SGD will make the training look like many epochs training if the trianing sets are somewhat similar (i.e. all roads and we're doing only 2 classes)
NUM_CLASSES = 2 #the smaller the classes, the easier it is to segment using lower number of epochs and batch_size
USE_L2_LOSS = False
L2_LOSS_WEIGHT = 0.01
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)
"""
# TODO: Implement function
# Use tf.saved_model.loader.load to load the model and weights
vgg_tag = 'vgg16'
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
vgg_layer3_out_tensor_name = 'layer3_out:0'
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'
#load the vgg model located at data/vgg16/vgg, this path is defined by vgg_path later
tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)
#make sure to load the default graph from the loaded model before pulling tensors by name into storage variables
graph = tf.get_default_graph()
input_image = graph.get_tensor_by_name(vgg_input_tensor_name)
keep_prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3_out = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4_out = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7_out = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)
# a = tf.Print(layer7_out, [tf.shape(layer7_out)])
# with tf.Session() as sess:
# sess.run(a)
return input_image, keep_prob, layer3_out, layer4_out, layer7_out
tests.test_load_vgg(load_vgg, tf)
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes, sess=None, vgg_input=None, keep_prob=None):
"""
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer3_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer7_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output
"""
# TODO: Implement function
#constructing FCN-8 architecture
#reduce the number of outputs to match the num_classes in the training set (in this case 2, roads vs not roads) by using 1x1 convolution
conv_1x1 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding="same",
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
#upsample or deconv (from 1x1 to 2v2 just like in the FCN paper: https://people.eecs.berkeley.edu/~jonlong/long_shelhamer_fcn.pdf)
layer4a_in1 = tf.layers.conv2d_transpose(conv_1x1, num_classes, 4, 2, padding="same", kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# make sure the shapes are the same!
# 1x1 convolution of vgg layer 4
layer4a_in2 = tf.layers.conv2d(vgg_layer4_out, num_classes, 1,
padding= 'same',
kernel_initializer= tf.random_normal_initializer(stddev=0.01),
kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))
# skip connection (element-wise addition)
layer4a_out = tf.add(layer4a_in1, layer4a_in2)
# upsample
layer3a_in1 = tf.layers.conv2d_transpose(layer4a_out, num_classes, 4,
strides= (2, 2),
padding= 'same',
kernel_initializer= tf.random_normal_initializer(stddev=0.01),
kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))
# 1x1 convolution of vgg layer 3
layer3a_in2 = tf.layers.conv2d(vgg_layer3_out, num_classes, 1,
padding= 'same',
kernel_initializer= tf.random_normal_initializer(stddev=0.01),
kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))
# skip connection (element-wise addition)
layer3a_out = tf.add(layer3a_in1, layer3a_in2)
# upsample
nn_last_layer = tf.layers.conv2d_transpose(layer3a_out, num_classes, 16,
strides= (8, 8),
padding= 'same',
kernel_initializer= tf.random_normal_initializer(stddev=0.01),
kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))
#following are used for printing shapes of layers of interest - very useful.
# if sess is not None:
# img = np.random.rand(1, 160, 576, 3)
# prints = [
# tf.Print(conv_1x1, [tf.shape(conv_1x1), " -------------------1x1conv before deconv starts -------------------"],
# summarize=4)]
# sess.run(tf.global_variables_initializer())
# sess.run(prints, feed_dict={vgg_input: img, keep_prob: 1.0})
# if sess is not None:
# img2 = np.random.rand(1, 160, 576, 3)
# prints = [
# tf.Print(vgg_layer7_out, [tf.shape(vgg_layer7_out), " ------------------- vgg_layer7_out -------------------"],
# summarize=4)]
# sess.run(tf.global_variables_initializer())
# sess.run(prints, feed_dict={vgg_input: img2, keep_prob: 1.0})
return nn_last_layer
tests.test_layers(layers)
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# make logits a 2D tensor where each row represents a pixel and each column a class
logits = tf.reshape(nn_last_layer, (-1, num_classes))
correct_label = tf.reshape(correct_label, (-1,num_classes))
# define loss function
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits= logits, labels= correct_label))
if USE_L2_LOSS:
#adding L2 losses to apply to loss
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) #collect all losses from every layer
reg_constant = L2_LOSS_WEIGHT # Choose an appropriate one.
final_loss = cross_entropy_loss + reg_constant * sum(reg_losses)
# define training operation
optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate)
if USE_L2_LOSS:
train_op = optimizer.minimize(final_loss)
else:
train_op = optimizer.minimize(cross_entropy_loss)
return logits, train_op, cross_entropy_loss
tests.test_optimize(optimize)
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate):
"""
Train neural network and print out the loss during training.
:param sess: TF Session
:param epochs: Number of epochs
:param batch_size: Batch size
:param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
:param train_op: TF Operation to train the neural network
:param cross_entropy_loss: TF Tensor for the amount of loss
:param input_image: TF Placeholder for input images
:param correct_label: TF Placeholder for label images
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate
"""
# TODO: Implement function
sess.run(tf.global_variables_initializer())
print("Training...")
print()
for i in range(epochs):
print("EPOCH {} ...".format(i+1))
for image, label in get_batches_fn(batch_size):
_, loss = sess.run([train_op, cross_entropy_loss],
feed_dict={input_image: image, correct_label: label, keep_prob: KEEP_PROB, learning_rate:LEARNING_RATE})
print("Loss: = {:.3f}".format(loss))
print()
tests.test_train_nn(train_nn)
def run():
num_classes = NUM_CLASSES
image_shape = IMAGE_SHAPE
data_dir = './data'
runs_dir = './runs'
tests.test_for_kitti_dataset(data_dir)
# Download pretrained vgg model
helper.maybe_download_pretrained_vgg(data_dir)
# OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
# You'll need a GPU with at least 10 teraFLOPS to train on.
# https://www.cityscapes-dataset.com/
with tf.Session() as sess:
# Path to vgg model
vgg_path = os.path.join(data_dir, 'vgg16/vgg')
# Create function to get batches
get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)
# OPTIONAL: Augment Images for better results
# https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network
# TODO: Build NN using load_vgg, layers, and optimize function
# TODO: Train NN using the train_nn function
# TODO: Save inference data using helper.save_inference_samples
# helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)
# OPTIONAL: Apply the trained model to a video
epochs = EPOCHS
batch_size = BATCH_SIZE
# TF placeholders
correct_label = tf.placeholder(tf.int32, [None, None, None, num_classes], name='correct_label')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
input_image, keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(sess, vgg_path)
#sess.run(tf.Print(vgg_layer7_out, [tf.shape(vgg_layer7_out)]))
#nn_last_layer = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes)
nn_last_layer = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes,
sess=sess, vgg_input=input_image, keep_prob=keep_prob)
logits, train_op, cross_entropy_loss = optimize(nn_last_layer, correct_label, learning_rate, num_classes)
# TODO: Train NN using the train_nn function
train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate)
# TODO: Save inference data using helper.save_inference_samples
helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)
print("running on images - done")
# OPTIONAL: Apply the trained model to a video
if __name__ == '__main__':
run() | |
'''
File:
get_historical.py
Authors:
Prakash Dhimal, Kevin Sanford
Description:
Python module to get historical prices and volumes for a given company
'''
import numpy as np
import normalize as scale
'''
@param - historical - list containing historical prices, and volumes
@retruns opening - list containing daily opening prices from the historical data
'''
#to get all opening prices together
def get_unscaled_opening(historical):
opening = [] #is a dynamic array (list) for python
for i in range(len(historical)):
x = float(historical[i]['Open'])
opening.append(x)
return opening
'''
@param - historical - list containing historical prices, and volumes
@retruns opening, scaled_opening - list containing daily opening prices from the historical data
'''
def get_historical_opening(historical, scaler):
opening = [] #is a dynamic array (list) for python
for i in range(len(historical)):
x = float(historical[i]['Open'])
opening.append(x)
scaled_opening = scale.scale(opening, scaler)
return opening, scaled_opening
'''
@param - historical - list containing historical prices, and volumes
@retruns days_high - list containing daily high prices from the historical data
'''
def get_historical_high(historical, scaler):
days_high = []
for i in range(len(historical)):
x = float(historical[i]['High'])
days_high.append(x)
scaled_high = scale.scale(days_high, scaler)
return days_high, scaled_high
'''
@param - historical - list containing historical prices, and volumes
@retruns days_low - list containing daily low prices from the historical data
'''
def get_historical_low(historical, scaler):
days_low = []
for i in range(len(historical)):
x = float(historical[i]['Low'])
days_low.append(x)
scaled_low = scale.scale(days_low, scaler)
return days_low, scaled_low
'''
@param - historical - list containing historical prices, and volumes
@retruns closing - list containing daily closing prices from the historical data
'''
def get_historical_closing(historical, scaler):
#same for closing
closing = []
for i in range(len(historical)):
x = float(historical[i]['Adj_Close'])
closing.append(x)
scaled_closing = scale.scale(closing, scaler)
return closing, scaled_closing
'''
@param - historical - list containing historical prices, and volumes
- company - Share object
@retruns - historical_volume - list containing daily volume from the historical data
- average_volume - list containing average volume for the sample data
'''
def get_historical_volume(historical, company, scaler):
historical_volume = [] #is a dynamic array (list) for python
average_volume = []
for i in range(len(historical)):
x = float(historical[i]['Volume'])
historical_volume.append(x)
average_volume.append(float(company.get_avg_daily_volume()))
scaled_historical_volume = scale.scale(historical_volume, scaler)
scaled_average_volume = scale.scale(average_volume, scaler)
return historical_volume, average_volume, scaled_historical_volume, scaled_average_volume
'''
@param - historical - list containing historical prices, and volumes
- scaler
@return change - price change for the day
_scaled_change - price change scaled for -1 to 1
'''
def get_change(historical, scaler):
change = []
change.append(0)
for i in range(len(historical)-1):
x = float(historical[i+1]["Close"]) - float(historical[i]['Close'])
change.append(x)
scaled_change = scale.scale(change, scaler)
return change, scaled_change
'''
def get_range():
'''
'''
Method to stack training data together
stacks opening, volume, high,low,average_volume together
result is traing data array of (sample size X # of features)
and target array of (sample size X 1)
@param historical list, company - Share object
@returns data - training data
closing - target data
'''
def training_data(historical, company, scaler, useSpread, useVolume):
historical_opening, scaled_opening = get_historical_opening(historical, scaler)
historical_closing, scaled_closing = get_historical_closing(historical, scaler)
historical_high, scaled_high = get_historical_high(historical, scaler)
historical_low, scaled_low = get_historical_low(historical, scaler)
historical_volume, average_volume, scaled_volume, scaled_avg_vol = get_historical_volume(historical, company, scaler)
change, scaled_change = get_change(historical, scaler)
opening = np.array(historical_opening)
_scaled_opening = np.array(scaled_opening)
volume = np.array(historical_volume)
_scaled_volume = np.array(scaled_volume)
high = np.array(historical_high)
_scaled_high = np.array(scaled_high)
low = np.array(historical_low)
_scaled_low = np.array(scaled_low)
avg_vol = np.array(average_volume)
_scaled_avg_vol = np.array(scaled_avg_vol)
closing = np.array(historical_closing)
_scaled_closing = np.array(scaled_closing)
_change = np.array(change)
_scaled_change = np.array(scaled_change)
if useSpread is False and useVolume is False:
data = np.vstack((opening, high, low))
scaled_data = np.vstack((_scaled_opening, _scaled_high, _scaled_low))
elif useSpread is True and useVolume is False:
data = np.vstack((opening, high, low, _change))
scaled_data = np.vstack((_scaled_opening, _scaled_high, _scaled_low, _scaled_change))
elif useSpread is False and useVolume is True:
data = np.vstack((opening, high, low, volume))
scaled_data = np.vstack((_scaled_opening, _scaled_high, _scaled_low, _scaled_volume))
else:
data = np.vstack((opening, high, low, _change, volume))
scaled_data = np.vstack((_scaled_opening, _scaled_high, _scaled_low, _scaled_change, _scaled_volume))
shape1, shape2 = data.shape
data = data.reshape(shape2, shape1)
shape1, shape2 = scaled_data.shape
scaled_data = scaled_data.reshape(shape2, shape1)
return data, closing, scaled_data, _scaled_closing | |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nets.inception_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from research.slim.nets import inception
slim = tf.contrib.slim
class InceptionV1Test(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith(
'InceptionV1/Logits/SpatialSqueeze'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildPreLogitsNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(net.op.name.startswith('InceptionV1/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])
self.assertFalse('Logits' in end_points)
self.assertFalse('Predictions' in end_points)
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
mixed_6c, end_points = inception.inception_v1_base(inputs)
self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_6c.get_shape().as_list(),
[batch_size, 7, 7, 1024])
expected_endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b',
'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c',
'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2',
'Mixed_5b', 'Mixed_5c']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 224, 224
endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d',
'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b',
'Mixed_5c']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v1_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV1/' + endpoint))
self.assertItemsEqual(endpoints[:index + 1], end_points.keys())
def testBuildAndCheckAllEndPointsUptoMixed5c(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v1_base(inputs,
final_endpoint='Mixed_5c')
endpoints_shapes = {'Conv2d_1a_7x7': [5, 112, 112, 64],
'MaxPool_2a_3x3': [5, 56, 56, 64],
'Conv2d_2b_1x1': [5, 56, 56, 64],
'Conv2d_2c_3x3': [5, 56, 56, 192],
'MaxPool_3a_3x3': [5, 28, 28, 192],
'Mixed_3b': [5, 28, 28, 256],
'Mixed_3c': [5, 28, 28, 480],
'MaxPool_4a_3x3': [5, 14, 14, 480],
'Mixed_4b': [5, 14, 14, 512],
'Mixed_4c': [5, 14, 14, 512],
'Mixed_4d': [5, 14, 14, 512],
'Mixed_4e': [5, 14, 14, 528],
'Mixed_4f': [5, 14, 14, 832],
'MaxPool_5a_2x2': [5, 7, 7, 832],
'Mixed_5b': [5, 7, 7, 832],
'Mixed_5c': [5, 7, 7, 1024]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v1_arg_scope()):
inception.inception_v1_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars(
slim.get_model_variables())
self.assertAlmostEqual(5607184, total_params)
def testHalfSizeImages(self):
batch_size = 5
height, width = 112, 112
inputs = tf.random_uniform((batch_size, height, width, 3))
mixed_5c, _ = inception.inception_v1_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
[batch_size, 4, 4, 1024])
def testUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testGlobalPoolUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 1
height, width = 250, 300
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v1(inputs, num_classes,
global_pool=True)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])
def testUnknowBatchSize(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v1(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEqual(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 224, 224
num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_v1(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v1(eval_inputs, num_classes, reuse=True)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEqual(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random_uniform([1, 224, 224, 3])
logits, _ = inception.inception_v1(images,
num_classes=num_classes,
spatial_squeeze=False)
with self.test_session() as sess:
tf.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
if __name__ == '__main__':
tf.test.main() | |
import numpy as np
def test_generate_minimax_move():
"""
First checking whether the agent can return an action.
Then it asserts the agent will producing valid move.
Next, it will test if the agent can produce a winning move
given a board state
"""
from agents.agent_minimax import generate_move
from agents.common import NO_PLAYER, PLAYER1, PLAYER2, BoardPiece
# blank board test
test_board = np.full((6, 7), NO_PLAYER)
action, _ = generate_move(test_board, PLAYER1, None)
assert action != -1
# test the action should be in b/w 0 and 6
# negative test - invalid situations
test_board = np.full((6, 7), PLAYER2)
test_board[0, 0] = NO_PLAYER
action, _ = generate_move(test_board, PLAYER1, None)
assert (action == -1)
test_board = np.zeros((6, 7))
test_board[3:, 0] = PLAYER1
test_board[5, 1:3] = PLAYER2
action, _ = generate_move(test_board, PLAYER1, None)
assert (action == -1)
action, _ = generate_move(test_board, PLAYER2, None)
assert (action == -1)
# positive test - winning move
from agents.common import string_to_board
board_str = """|==============|
| |
| |
| |
| X O |
| O X X O |
|X O X O O X |
|==============|
|0 1 2 3 4 5 6 |"""
board = string_to_board(board_str)
action, _ = generate_move(board, PLAYER1, None)
assert (action == 5)
board_str = """|==============|
| |
| |
| |
| X O |
| O O X O |
|X O X O X X |
|==============|
|0 1 2 3 4 5 6 |"""
board = string_to_board(board_str)
action, _ = generate_move(board, PLAYER1, None)
assert (action == 4) | |
import sys
sys.dont_write_bytecode = True
import numpy as np
import scipy.sparse as sp
from network_propagation_methods import sample_data, netprop, minprop_2, minprop_3
#### Parameters #############
# convergence threshold
eps = 1e-6
# maximum number of iterations
max_iter = 1000
# diffusion parameters
alphaP, alphaD, alphaC = 0.33, 0.33, 0.33
# random seed
seed = 123
#### load networks (hypothetical data) #######
norm_adj_networkP, norm_adj_networkD, norm_adj_networkC, norm_biadj_networkPD, norm_biadj_networkPC, norm_biadj_networkDC = sample_data(seed)
#### Network propagation ###########################
# Initial labels (hypothetical data)
np.random.seed(seed=seed)
yP = np.array(np.random.rand(norm_adj_networkP.shape[0]), dtype=np.float64)
yD = np.array(np.random.rand(norm_adj_networkD.shape[0]), dtype=np.float64)
yC = np.array(np.random.rand(norm_adj_networkC.shape[0]), dtype=np.float64)
## network propagation with single network
fP, convergent = netprop(norm_adj_networkP, yP, alphaP, eps, max_iter)
print(convergent)
print(fP)
## MINProp with 2 homo subnetworks
fP, fD, convergent = minprop_2(norm_adj_networkP, norm_adj_networkD, norm_biadj_networkPD, yP, yD, alphaP, alphaD, eps, max_iter)
print(convergent)
print(fP)
print(fD)
## MINProp with 3 homo subnetworks
fP, fD, fC, convergent = minprop_3(norm_adj_networkP, norm_adj_networkD, norm_adj_networkC, norm_biadj_networkPD, norm_biadj_networkPC, norm_biadj_networkDC, yP, yD, yC, alphaP, alphaD, alphaC, eps, max_iter)
print(convergent)
print(fP)
print(fD)
print(fC) | |
import numpy as np
import os
from datetime import datetime
from pytz import timezone
import matplotlib.pyplot as plt
from agent_qlean import QLearnAgent
from agent_bandit import BanditAgent
from environment import Environment
from simulator import parameters
from simulator.transaction_model import TransactionModel
from experiments import rewards
from authenticators.simple_authenticators import RandomAuthenticator, \
HeuristicAuthenticator, OracleAuthenticator, NeverSecondAuthenticator, \
AlwaysSecondAuthenticator
auths = [
# (Environment(BanditAgent(do_reward_shaping=True)), 'Bandit (reward shaping)'),
# (RandomAuthenticator(), 'Random'),
# (OracleAuthenticator(), 'Oracle'),
# (HeuristicAuthenticator(50), 'Heuristic'),
# (NeverSecondAuthenticator(), 'NeverSecond'),
# (AlwaysSecondAuthenticator(), 'AlwaysSecond'),
(Environment(QLearnAgent('zero', 0.01, 0.1, 0.1, False)), 'Q-Learn'),
(Environment(QLearnAgent('zero', 0.01, 0.1, 0.1, True)), 'Q-Learn with reward shaping'),
(Environment(BanditAgent()), 'Bandit'),
(Environment(BanditAgent(do_reward_shaping=True)), 'Bandit with reward shaping'),
]
authenticator = None
auth_name = ''
for k in range(len(auths)):
if auth_name != 'Q-Learning (from scratch)':
authenticator, auth_name = auths[k]
else: # if we just did Q-Learning, run it again with the pre-trained one
auth_name = 'Q-Learning (pre-trained)'
seed = 666
print("-----")
print(auth_name)
print("-----")
sum_monetary_rewards = None
for i in range(1):
# the parameters for the simulation
params = parameters.get_default_parameters()
params['seed'] = seed
params['init_satisfaction'] = 0.9
params['stay_prob'] = [0.9, 0.6]
params['num_customers'] = 100
params['num_fraudsters'] = 10
params['end_date'] = datetime(2016, 12, 31).replace(tzinfo=timezone('US/Pacific'))
path = 'results/{}_{}_{}_{}_{}_{}'.format(auth_name,
seed,
int(params['init_satisfaction']*10),
params['num_customers'],
params['num_fraudsters'],
params['end_date'].year)
if os.path.exists(path+'.npy'):
monetary_rewards = np.load(path+'.npy')
else:
# get the model for transactions
model = TransactionModel(params, authenticator=authenticator)
# run
while not model.terminated:
model.step()
agent_vars = model.log_collector.get_agent_vars_dataframe()
agent_vars.index = agent_vars.index.droplevel(1)
monetary_rewards = rewards.monetary_reward_per_timestep(agent_vars)
np.save(path, monetary_rewards)
if sum_monetary_rewards is None:
sum_monetary_rewards = monetary_rewards
else:
sum_monetary_rewards += monetary_rewards
seed += 1
sum_monetary_rewards /= (i+1)
if k == 0:
color = 'r'
elif k == 1:
color = 'r--'
elif k == 2:
color = 'b'
elif k == 3:
color = 'b--'
plt.plot(range(len(monetary_rewards)), np.cumsum(monetary_rewards), color, label=auth_name)
plt.xlabel('time step')
plt.ylabel('monetary reward (cumulative)')
plt.legend()
plt.tight_layout()
plt.show() | |
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from PyTorchDisentanglement.utils.file_utils import Logger
class BaseModel(nn.Module):
def __init__(self):
super(BaseModel, self).__init__()
self.params_loaded = False
def setup(self, params, logger=None):
"""
Setup required model components
#TODO: log system info, including git commit hash
"""
self.load_params(params)
self.check_params()
self.make_dirs()
if logger is None:
self.init_logging()
self.log_params()
else:
self.logger = logger
self.setup_model()
self.setup_optimizer()
def load_params(self, params):
"""
Calculates a few extra parameters
Sets parameters as member variable
"""
params.cp_latest_filename = "latest_checkpoint_v"+params.version
if not hasattr(params, "model_out_dir"):
params.model_out_dir = params.out_dir + params.model_name
params.cp_save_dir = params.model_out_dir + "/checkpoints/"
params.log_dir = params.model_out_dir + "/logfiles/"
params.save_dir = params.model_out_dir + "/savefiles/"
params.disp_dir = params.model_out_dir + "/vis/"
params.batches_per_epoch = params.epoch_size / params.batch_size
params.num_batches = params.num_epochs * params.batches_per_epoch
self.params = params
self.params_loaded = True
def check_params(self):
"""
Check parameters with assertions
"""
assert self.params.num_pixels == int(np.prod(self.params.data_shape))
def get_param(self, param_name):
"""
Get param value from model
This is equivalent to self.param_name, except that it will return None if
the param does not exist.
"""
if hasattr(self, param_name):
return getattr(self, param_name)
else:
return None
def make_dirs(self):
"""Make output directories"""
if not os.path.exists(self.params.model_out_dir):
os.makedirs(self.params.model_out_dir)
if not os.path.exists(self.params.log_dir):
os.makedirs(self.params.log_dir)
if not os.path.exists(self.params.cp_save_dir):
os.makedirs(self.params.cp_save_dir)
if not os.path.exists(self.params.save_dir):
os.makedirs(self.params.save_dir)
if not os.path.exists(self.params.disp_dir):
os.makedirs(self.params.disp_dir)
def init_logging(self, log_filename=None):
if self.params.log_to_file:
if log_filename is None:
log_filename = self.params.log_dir+self.params.model_name+"_v"+self.params.version+".log"
self.logger = Logger(filename=log_filename, overwrite=True)
else:
self.logger = Logger(filename=None)
def js_dumpstring(self, obj):
"""Dump json string with special NumpyEncoder"""
return self.logger.js_dumpstring(obj)
def log_params(self, params=None):
"""Use logging to write model params"""
if params is not None:
dump_obj = params.__dict__
else:
dump_obj = self.params.__dict__
self.logger.log_params(dump_obj)
def log_info(self, string):
"""Log input string"""
self.logger.log_info(string)
def write_checkpoint(self, session):
"""Write checkpoints"""
base_save_path = self.params.cp_save_dir+self.params.model_name+"_v"+self.params.version
full_save_path = base_save_path+self.params.cp_latest_filename
torch.save(self.state_dict(), full_save_path)
self.logger.log_info("Full model saved in file %s"%full_save_path)
return base_save_path
def load_checkpoint(self, model_dir):
"""
Load checkpoint model into session.
Inputs:
model_dir: String specifying the path to the checkpoint
"""
assert self.params.cp_load == True, ("cp_load must be set to true to load a checkpoint")
cp_file = model_dir+self.params.cp_latest_filename
return torch.load(cp_file)
def setup_model(self):
raise NotImplementedError
def get_optimizer(self, optimizer_params, trainable_variables):
optimizer_name = optimizer_params.optimizer.name
if(optimizer_name == "sgd"):
optimizer = torch.optim.SGD(
trainable_variables,
lr=optimizer_params.weight_lr,
weight_decay=optimizer_params.weight_decay)
elif optimizer_name == "adam":
optimizer = torch.optim.Adam(
trainable_variables,
lr=optimizer_params.weight_lr,
weight_decay=optimizer_params.weight_decay)
else:
assert False, ("optimizer name must be 'sgd' or 'adam', not %s"%(optimizer_name))
return optimizer
def setup_optimizer(self):
self.optimizer = self.get_optimizer(
optimizer_params=self.params,
trainable_variables=self.parameters())
self.scheduler = torch.optim.lr_scheduler.MultiStepLR(
self.optimizer,
milestones=self.params.optimizer.milestones,
gamma=self.params.optimizer.lr_decay_rate)
def get_encodings(self):
raise NotImplementedError
def print_update(self, input_data, input_labels=None, batch_step=0):
"""
Log train progress information
Inputs:
input_data: data object containing the current image batch
input_labels: data object containing the current label batch
batch_step: current batch number within the schedule
NOTE: For the analysis code to parse update statistics, the self.js_dumpstring() call
must receive a dict object. Additionally, the self.js_dumpstring() output must be
logged with <stats> </stats> tags.
For example: logging.info("<stats>"+self.js_dumpstring(output_dictionary)+"</stats>")
"""
update_dict = self.generate_update_dict(input_data, input_labels, batch_step)
js_str = self.js_dumpstring(update_dict)
self.log_info("<stats>"+js_str+"</stats>")
def generate_update_dict(self, input_data, input_labels=None, batch_step=0):
"""
Generates a dictionary to be logged in the print_update function
"""
update_dict = dict()
for param_name, param_var in self.named_parameters():
grad = param_var.grad
update_dict[param_name+"_grad_max_mean_min"] = [
grad.max().item(), grad.mean().item(), grad.min().item()]
return update_dict
def generate_plots(self, input_data, input_labels=None):
"""
Plot weights, reconstruction, gradients, etc
Inputs:
input_data: data object containing the current image batch
input_labels: data object containing the current label batch
"""
pass | |
import os
import re
import itertools
import cv2
import time
import numpy as np
import torch
from torch.autograd import Variable
from utils.craft_utils import getDetBoxes, adjustResultCoordinates
from data import imgproc
from data.dataset import SynthTextDataSet
import math
import xml.etree.ElementTree as elemTree
#-------------------------------------------------------------------------------------------------------------------#
def rotatePoint(xc, yc, xp, yp, theta):
xoff = xp - xc
yoff = yp - yc
cosTheta = math.cos(theta)
sinTheta = math.sin(theta)
pResx = cosTheta * xoff + sinTheta * yoff
pResy = - sinTheta * xoff + cosTheta * yoff
# pRes = (xc + pResx, yc + pResy)
return int(xc + pResx), int(yc + pResy)
def addRotatedShape(cx, cy, w, h, angle):
p0x, p0y = rotatePoint(cx, cy, cx - w / 2, cy - h / 2, -angle)
p1x, p1y = rotatePoint(cx, cy, cx + w / 2, cy - h / 2, -angle)
p2x, p2y = rotatePoint(cx, cy, cx + w / 2, cy + h / 2, -angle)
p3x, p3y = rotatePoint(cx, cy, cx - w / 2, cy + h / 2, -angle)
points = [[p0x, p0y], [p1x, p1y], [p2x, p2y], [p3x, p3y]]
return points
def xml_parsing(xml):
tree = elemTree.parse(xml)
annotations = [] # Initialize the list to store labels
iter_element = tree.iter(tag="object")
for element in iter_element:
annotation = {} # Initialize the dict to store labels
annotation['name'] = element.find("name").text # Save the name tag value
box_coords = element.iter(tag="robndbox")
for box_coord in box_coords:
cx = float(box_coord.find("cx").text)
cy = float(box_coord.find("cy").text)
w = float(box_coord.find("w").text)
h = float(box_coord.find("h").text)
angle = float(box_coord.find("angle").text)
convertcoodi = addRotatedShape(cx, cy, w, h, angle)
annotation['box_coodi'] = convertcoodi
annotations.append(annotation)
box_coords = element.iter(tag="bndbox")
for box_coord in box_coords:
xmin = int(box_coord.find("xmin").text)
ymin = int(box_coord.find("ymin").text)
xmax = int(box_coord.find("xmax").text)
ymax = int(box_coord.find("ymax").text)
# annotation['bndbox'] = [xmin,ymin,xmax,ymax]
annotation['box_coodi'] = [[xmin, ymin], [xmax, ymin], [xmax, ymax],
[xmin, ymax]]
annotations.append(annotation)
bounds = []
for i in range(len(annotations)):
box_info_dict = {"points": None, "text": None, "ignore": None}
box_info_dict["points"] = np.array(annotations[i]['box_coodi'])
if annotations[i]['name'] == "dnc":
box_info_dict["text"] = "###"
box_info_dict["ignore"] = True
else:
box_info_dict["text"] = annotations[i]['name']
box_info_dict["ignore"] = False
bounds.append(box_info_dict)
return bounds
#-------------------------------------------------------------------------------------------------------------------#
def load_prescription_gt(dataFolder):
total_img_path = []
total_imgs_bboxes = []
for (root, directories, files) in os.walk(dataFolder):
for file in files:
if '.jpg' in file:
img_path = os.path.join(root, file)
total_img_path.append(img_path)
if '.xml' in file:
gt_path = os.path.join(root, file)
total_imgs_bboxes.append(gt_path)
total_imgs_parsing_bboxes = []
for img_path, bbox in zip(sorted(total_img_path), sorted(total_imgs_bboxes)):
# check file
assert img_path.split(".jpg")[0] == bbox.split(".xml")[0]
result_label = xml_parsing(bbox)
total_imgs_parsing_bboxes.append(result_label)
return total_imgs_parsing_bboxes, sorted(total_img_path)
# NOTE
def load_prescription_cleval_gt(dataFolder):
total_img_path = []
total_gt_path = []
for (root, directories, files) in os.walk(dataFolder):
for file in files:
if '.jpg' in file:
img_path = os.path.join(root, file)
total_img_path.append(img_path)
if '_cl.txt' in file:
gt_path = os.path.join(root, file)
total_gt_path.append(gt_path)
total_imgs_parsing_bboxes = []
for img_path, gt_path in zip(sorted(total_img_path), sorted(total_gt_path)):
# check file
assert img_path.split(".jpg")[0] == gt_path.split('_label_cl.txt')[0]
lines = open(gt_path, encoding="utf-8").readlines()
word_bboxes = []
for line in lines:
box_info_dict = {"points": None, "text": None, "ignore": None}
box_info = line.strip().encode("utf-8").decode("utf-8-sig").split(",")
box_points = [int(box_info[i]) for i in range(8)]
box_info_dict["points"] = np.array(box_points)
word_bboxes.append(box_info_dict)
total_imgs_parsing_bboxes.append(word_bboxes)
return total_imgs_parsing_bboxes, sorted(total_img_path)
def load_synthtext_gt(data_folder):
synth_dataset = SynthTextDataSet(
output_size=768, data_dir=data_folder, saved_gt_dir=data_folder, logging=False
)
img_names, img_bbox, img_words = synth_dataset.load_data(bbox="word")
total_img_path = []
total_imgs_bboxes = []
for index in range(len(img_bbox[:100])):
img_path = os.path.join(data_folder, img_names[index][0])
total_img_path.append(img_path)
try:
wordbox = img_bbox[index].transpose((2, 1, 0))
except:
wordbox = np.expand_dims(img_bbox[index], axis=0)
wordbox = wordbox.transpose((0, 2, 1))
words = [re.split(" \n|\n |\n| ", t.strip()) for t in img_words[index]]
words = list(itertools.chain(*words))
words = [t for t in words if len(t) > 0]
if len(words) != len(wordbox):
import ipdb
ipdb.set_trace()
single_img_bboxes = []
for j in range(len(words)):
box_info_dict = {"points": None, "text": None, "ignore": None}
box_info_dict["points"] = wordbox[j]
box_info_dict["text"] = words[j]
box_info_dict["ignore"] = False
single_img_bboxes.append(box_info_dict)
total_imgs_bboxes.append(single_img_bboxes)
return total_imgs_bboxes, total_img_path
def load_icdar2015_gt(dataFolder, isTraing=False):
if isTraing:
img_folderName = "ch4_training_images"
gt_folderName = "ch4_training_localization_transcription_gt"
else:
img_folderName = "ch4_test_images"
gt_folderName = "ch4_test_localization_transcription_gt"
gt_folder_path = os.listdir(os.path.join(dataFolder, gt_folderName))
total_imgs_bboxes = []
total_img_path = []
for gt_path in gt_folder_path:
gt_path = os.path.join(os.path.join(dataFolder, gt_folderName), gt_path)
img_path = (
gt_path.replace(gt_folderName, img_folderName)
.replace(".txt", ".jpg")
.replace("gt_", "")
)
image = cv2.imread(img_path)
lines = open(gt_path, encoding="utf-8").readlines()
single_img_bboxes = []
for line in lines:
box_info_dict = {"points": None, "text": None, "ignore": None}
box_info = line.strip().encode("utf-8").decode("utf-8-sig").split(",")
box_points = [int(box_info[j]) for j in range(8)]
word = box_info[8:]
word = ",".join(word)
box_points = np.array(box_points, np.int32).reshape(4, 2)
cv2.polylines(
image, [np.array(box_points).astype(np.int)], True, (0, 0, 255), 1
)
box_info_dict["points"] = box_points
box_info_dict["text"] = word
if word == "###":
box_info_dict["ignore"] = True
else:
box_info_dict["ignore"] = False
single_img_bboxes.append(box_info_dict)
total_imgs_bboxes.append(single_img_bboxes)
total_img_path.append(img_path)
return total_imgs_bboxes, total_img_path
def load_icdar2013_gt(dataFolder, isTraing=False):
# choise test dataset
if isTraing:
img_folderName = "Challenge2_Test_Task12_Images"
gt_folderName = "Challenge2_Test_Task1_GT"
else:
img_folderName = "Challenge2_Test_Task12_Images"
gt_folderName = "Challenge2_Test_Task1_GT"
gt_folder_path = os.listdir(os.path.join(dataFolder, gt_folderName))
total_imgs_bboxes = []
total_img_path = []
for gt_path in gt_folder_path:
gt_path = os.path.join(os.path.join(dataFolder, gt_folderName), gt_path)
img_path = (
gt_path.replace(gt_folderName, img_folderName)
.replace(".txt", ".jpg")
.replace("gt_", "")
)
image = cv2.imread(img_path)
lines = open(gt_path, encoding="utf-8").readlines()
single_img_bboxes = []
for line in lines:
box_info_dict = {"points": None, "text": None, "ignore": None}
box_info = line.strip().encode("utf-8").decode("utf-8-sig").split(",")
box = [int(box_info[j]) for j in range(4)]
word = box_info[4:]
word = ",".join(word)
box = [
[box[0], box[1]],
[box[2], box[1]],
[box[2], box[3]],
[box[0], box[3]],
]
box_info_dict["points"] = box
box_info_dict["text"] = word
if word == "###":
box_info_dict["ignore"] = True
else:
box_info_dict["ignore"] = False
single_img_bboxes.append(box_info_dict)
total_imgs_bboxes.append(single_img_bboxes)
total_img_path.append(img_path)
return total_imgs_bboxes, total_img_path
def test_net(
net,
image,
text_threshold,
link_threshold,
low_text,
cuda,
poly,
canvas_size=1280,
mag_ratio=1.5,
):
# resize
img_resized, target_ratio, size_heatmap = imgproc.resize_aspect_ratio(
image, canvas_size, interpolation=cv2.INTER_LINEAR, mag_ratio=mag_ratio
)
ratio_h = ratio_w = 1 / target_ratio
# preprocessing
x = imgproc.normalizeMeanVariance(img_resized)
x = torch.from_numpy(x).permute(2, 0, 1) # [h, w, c] to [c, h, w]
x = Variable(x.unsqueeze(0)) # [c, h, w] to [b, c, h, w]
if cuda:
x = x.cuda()
# forward pass
with torch.no_grad():
y, feature = net(x)
# make score and link map
score_text = y[0, :, :, 0].cpu().data.numpy().astype(np.float32)
score_link = y[0, :, :, 1].cpu().data.numpy().astype(np.float32)
# NOTE
score_text = score_text[: size_heatmap[0], : size_heatmap[1]]
score_link = score_link[: size_heatmap[0], : size_heatmap[1]]
# Post-processing
boxes, polys = getDetBoxes(
score_text, score_link, text_threshold, link_threshold, low_text, poly
)
# coordinate adjustment
boxes = adjustResultCoordinates(boxes, ratio_w, ratio_h)
polys = adjustResultCoordinates(polys, ratio_w, ratio_h)
for k in range(len(polys)):
if polys[k] is None:
polys[k] = boxes[k]
# render results (optional)
score_text = score_text.copy()
render_score_text = imgproc.cvt2HeatmapImg(score_text)
render_score_link = imgproc.cvt2HeatmapImg(score_link)
render_img = [render_score_text, render_score_link]
# ret_score_text = imgproc.cvt2HeatmapImg(render_img)
return boxes, polys, render_img | |
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x, y = np.random.rand(2, 100) * 4
hist, xedges, yedges = np.histogram2d(x, y, bins=4)
elements = (len(xedges) - 1) * (len(yedges) - 1)
xpos, ypos = np.meshgrid(xedges[:-1]+0.25, yedges[:-1]+0.25)
xpos = xpos.flatten()
ypos = ypos.flatten()
zpos = np.zeros(elements)
dx = 0.5 * np.ones_like(zpos)
dy = dx.copy()
dz = hist.flatten()
ax.bar3d(xpos, ypos, zpos, dx, dy, dz, color='b', zsort='average')
plt.show() | |
from jaxns.nested_sampling import NestedSampler
from jaxns.prior_transforms import PriorChain, MVNDiagPrior, UniformPrior, GaussianProcessKernelPrior, HalfLaplacePrior,MVNPrior
from jaxns.plotting import plot_cornerplot, plot_diagnostics
from jaxns.gaussian_process.kernels import RBF
from jax.scipy.linalg import solve_triangular
from jax import random, jit
from jax import numpy as jnp
import pylab as plt
def main():
def log_normal(x, mean, uncert):
dx = x - mean
dx = dx / uncert
return -0.5 * x.size * jnp.log(2. * jnp.pi) - x.size*jnp.log(uncert) - 0.5 * dx @ dx
N = 100
X = jnp.linspace(-2., 2., N)[:, None]
true_alpha, true_sigma, true_l, true_uncert = 1., 1., 0.2, 0.25
data_mu = jnp.zeros((N, ))
prior_cov = RBF()(X, X, true_l, true_sigma)
Y = jnp.linalg.cholesky(prior_cov) @ random.normal(random.PRNGKey(0), shape=(N, )) + data_mu
Y_obs = Y + true_uncert * random.normal(random.PRNGKey(1), shape=(N, ))
def predict_f(sigma, K, uncert, **kwargs):
data_cov = jnp.square(uncert) * jnp.eye(X.shape[0])
mu = jnp.zeros_like(Y_obs)
return mu + K @ jnp.linalg.solve(K + data_cov, Y_obs)
def predict_fvar(sigma, K, uncert, **kwargs):
data_cov = jnp.square(uncert) * jnp.eye(X.shape[0])
mu = jnp.zeros_like(Y_obs)
return jnp.diag(K - K @ jnp.linalg.solve(K + data_cov, K))
###
# define the prior chain
# Here we assume each image is represented by pixels.
# Alternatively, you could choose regions arranged non-uniformly over the image.
image_shape = (128, 128)
npix = image_shape[0] * image_shape[1]
I150 = jnp.ones(image_shape)
alpha_cw_gp_sigma = HalfLaplacePrior('alpha_cw_gp_sigma', 1.)
alpha_mw_gp_sigma = HalfLaplacePrior('alpha_mw_gp_sigma', 1.)
l_cw = UniformPrior('l_cw', 0., 0.5)#degrees
l_mw = UniformPrior('l_mw', 0.5, 2.)#degrees
K_cw = GaussianProcessKernelPrior('K_cw',RBF(), X, l_cw, alpha_cw_gp_sigma)
K_mw = GaussianProcessKernelPrior('K_mw',RBF(), X, l_mw, alpha_mw_gp_sigma)
alpha_cw = MVNPrior('alpha_cw', -1.5, K_cw)
alpha_mw = MVNPrior('alpha_mw', -2.5, K_mw)
S_cw_150 = UniformPrior('S150_cw', 0., I150)
S_mw_150 = UniformPrior('S150_mw', 0., I150)
uncert = HalfLaplacePrior('uncert', 1.)
def log_likelihood(uncert, alpha_cw, alpha_mw, S_cw_150, S_mw_150):
log_prob = 0
for img, freq in zip(images, freqs): # <- need to define these
I_total = S_mw_150 * (freq/150e6) ** (alpha_mw) + S_cw_150 * (freq/150e6) ** (alpha_cw)
log_prob += log_normal(img, I_total, uncert)
return log_prob
prior_chain = PriorChain()\
.push(alpha_cw).push(S_cw_150)\
.push(alpha_mw).push(S_mw_150)\
.push(uncert)
print(prior_chain)
ns = NestedSampler(log_likelihood, prior_chain, sampler_name='ellipsoid', predict_f=predict_f, predict_fvar=predict_fvar)
def run_with_n(n):
@jit
def run():
return ns(key=random.PRNGKey(0),
num_live_points=n,
max_samples=1e3,
collect_samples=True,
termination_frac=0.01,
stoachastic_uncertainty=True)
results = run()
return results
# for n in [200]:
# results = run_with_n(n)
# plt.scatter(n, results.logZ)
# plt.errorbar(n, results.logZ, yerr=results.logZerr)
# plt.title("Kernel: {}".format(rational_quadratic.__name__))
# plt.ylabel('log Z')
# plt.show()
#
# plt.scatter(X[:, 0], Y_obs, label='data')
# plt.plot(X[:, 0], Y, label='underlying')
# plt.plot(X[:,0], results.marginalised['predict_f'], label='marginalised')
# plt.plot(X[:,0], results.marginalised['predict_f'] + jnp.sqrt(results.marginalised['predict_fvar']), ls='dotted', c='black')
# plt.plot(X[:,0], results.marginalised['predict_f'] - jnp.sqrt(results.marginalised['predict_fvar']), ls='dotted', c='black')
# plt.title("Kernel: {}".format(rational_quadratic.__name__))
# plt.legend()
# plt.show()
#
# plot_diagnostics(results)
# plot_cornerplot(results)
# return results.logZ, results.logZerr
if __name__ == '__main__':
main() | |
from tqdm import tqdm
import numpy as np
from dataclasses import dataclass
from typing import Dict, List, Tuple, Union
import ipdb
import collections
import random
import torch
from copy import deepcopy
from torch.nn.utils.rnn import pad_sequence
from transformers.tokenization_utils_base import BatchEncoding
def _sample_by_model_predict_prob(prob_tensor, labels):
dynamic_mask_predict_prob = prob_tensor.clone()
batch_mask_predict_prob = dynamic_mask_predict_prob.expand(labels.shape[0],
dynamic_mask_predict_prob.shape[0]).to(labels.device)
probability_matrix = batch_mask_predict_prob.gather(1, labels)
avg_model_predict_prob = float(torch.mean(probability_matrix))
probability_matrix = 1 - probability_matrix
return probability_matrix, avg_model_predict_prob
def _sample_by_tfidf(idf_tensor, labels):
token_tf_idfs = []
for label_i, label in enumerate(labels):
token_count = torch.bincount(label) # -> shape: (max_index_of_label(e.g. 49000), )
token_freq_pad = torch.zeros_like(idf_tensor)
token_freq_pad[:len(token_count)] = token_count
token_tf_idfs.append(token_freq_pad)
token_tfs = torch.stack(token_tf_idfs)
token_inver_tfs = 1 / token_tfs
token_tf_idfs = token_inver_tfs * idf_tensor # compute the tf-idf value
probability_matrix = token_tf_idfs.gather(1, labels)
return probability_matrix
def _tf_idf_decay_func(tf_idf_init_prob, step_i, decay=0.998):
if step_i == 0:
return tf_idf_init_prob
else:
return tf_idf_init_prob * decay ** step_i
@dataclass
class BpeDataCollatorForLanguageModeling:
"""
Data collator used for language modeling.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for masked language modeling
"""
# use_time_embed = False
def __init__(self,
tokenizer,
use_time_embed,
mlm_probability,
pretrain_task,
clm_sample_n,
use_random_mlm_probability=None,
mlm_prob_min=None,
mlm_prob_max=None,
mask_type='normal',
mask_softmax_t=0.5,
masker_recorder=None,
tf_idf_warmup_decay=None,
is_record_mask_ratio=False,
return_timestamps=True,
softmax_t_decay_mode=None,
part_prob_percent=None,
part_prob_range=None
):
self.tokenizer = tokenizer
self.use_time_embed = use_time_embed
self.mlm_probability = mlm_probability
self.pretrain_task = pretrain_task
self.clm_sample_n = clm_sample_n
self.CLM_MIN_LEN = 32
self.use_random_mlm_probability = use_random_mlm_probability
self.mlm_prob_min = mlm_prob_min
self.mlm_prob_max = mlm_prob_max
self.mask_type = mask_type
self.masker_recorder = masker_recorder
self._mask_softmax_t = mask_softmax_t
self.tf_idf_warmup_decay = tf_idf_warmup_decay
self.is_record_mask_ratio = is_record_mask_ratio
self.return_timestamps = return_timestamps
self.softmax_t_decay_mode = softmax_t_decay_mode
self.softmax_t_range = (0.0001, 0.8)
self.total_training_step = 0
self.part_prob_percent = part_prob_percent
self.part_prob_range = part_prob_range
if self.mask_type == 'part_prob_linear_increase':
pmin, pmax = self.part_prob_range
assert pmin < pmax
self.part_prob_percent = pmin
if self.use_random_mlm_probability is not None:
assert self.mlm_prob_min
assert self.mlm_prob_max
@property
def mask_softmax_t(self):
return max(self._mask_softmax_t, 1e-10)
def _linear_decay_t(self, t_min, t_max, total_step, step):
return (t_min - t_max) / total_step * step + t_max
def adjust_part_prob_percent(self, step_now):
if self.mask_type == 'part_prob_linear_increase':
p_min, p_max = self.part_prob_range
self.part_prob_percent = p_max - self._linear_decay_t(p_min, p_max, self.total_training_step, step_now)
def adjust_mask_softmax_t(self, step_now):
t_min, t_max = self.softmax_t_range
if self.softmax_t_decay_mode in {'linear'}:
self._mask_softmax_t = self._linear_decay_t(t_min, t_max, self.total_training_step, step_now)
# 凹
elif self.softmax_t_decay_mode == 'exponential_concave':
tau = 0.2
linear_t = self._linear_decay_t(t_min, t_max, self.total_training_step, step_now)
self._mask_softmax_t = - np.exp(-linear_t / tau) + t_max
# 凸
elif self.softmax_t_decay_mode == 'exponential_convex':
tau = 0.2
linear_t = self._linear_decay_t(t_min, t_max, self.total_training_step, step_now)
self._mask_softmax_t = np.exp(-(1 - linear_t) / tau)
elif self.softmax_t_decay_mode == 'by_prob':
self._mask_softmax_t = self.masker_recorder.mean_prob_tensor
# print(f"Set mask softmax t to {self._mask_softmax_t}")
else:
return None
def __call__(
self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]]
) -> Dict[str, torch.Tensor]:
if isinstance(examples[0], (dict, BatchEncoding)):
examples = [e["input_ids"] for e in examples]
batch_ids, batch_timestamps = self._tensorize_batch(examples)
if self.pretrain_task == 'mlm':
inputs, labels, batch_timestamps, attention_mask = self.mlm_mask_tokens(batch_ids, batch_timestamps)
elif self.pretrain_task == 'clm':
inputs, labels, batch_timestamps, attention_mask = self.clm_mask_tokens(batch_ids, batch_timestamps)
else:
raise NotImplementedError
if self.return_timestamps:
return {"input_ids": inputs,
"labels": labels,
'timestamps': batch_timestamps,
'attention_mask': attention_mask}
else:
return {"input_ids": inputs,
"labels": labels,
'attention_mask': attention_mask}
def _tensorize_batch(
self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]]
):
batch_ids = []
batch_timestamps = []
for cn_char_subword, timestamp_subword in examples:
batch_ids.append(cn_char_subword)
batch_timestamps.append(timestamp_subword)
batch_ids = torch.stack(batch_ids)
return batch_ids, batch_timestamps
#
# # In order to accept both lists of lists and lists of Tensors
# if isinstance(examples[0], (list, tuple)):
# examples = [torch.tensor(e, dtype=torch.long) for e in examples]
# length_of_first = examples[0].size(0)
# are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
# if are_tensors_same_length:
# return torch.stack(examples, dim=0)
# else:
# if self.tokenizer._pad_token is None:
# raise ValueError(
# "You are attempting to pad samples but the tokenizer you are using"
# f" ({self.tokenizer.__class__.__name__}) does not have one."
# )
# return pad_sequence(examples, batch_first=True, padding_value=self.tokenizer.pad_token_id)
def _compute_pad_len(self, labels):
pad_lens = []
for label in labels:
non_pad_length = len(label[label != self.tokenizer.pad_token_id])
pad_lens.append(non_pad_length)
pad_lens = torch.tensor(pad_lens).unsqueeze(1)
return pad_lens
def _handle_prob_overshoot(self,
probability_matrix,
pad_lens,
overshoot_threshold=1.0):
is_exist_overshoot_indices = bool((probability_matrix > overshoot_threshold).any())
if is_exist_overshoot_indices:
for seq_i, seq_prob in enumerate(probability_matrix):
gt_1_mask = seq_prob > overshoot_threshold
if bool(gt_1_mask.any()):
overshoot_value = int(seq_prob[gt_1_mask])
distribute_value = float((overshoot_value - overshoot_threshold)) / float(pad_lens[seq_i] - 1)
seq_prob[~gt_1_mask] = seq_prob[~gt_1_mask] + distribute_value
seq_prob[gt_1_mask] = overshoot_value
return True
else:
return False
def mlm_mask_tokens(self, inputs: torch.Tensor, batch_timestamps, verbose=False):
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
if self.use_random_mlm_probability:
mlm_probability = random.uniform(self.mlm_prob_min, self.mlm_prob_max)
else:
mlm_probability = self.mlm_probability
self.mlm_probability = mlm_probability
assert 0 < mlm_probability < 1
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
if self.mask_type.startswith('part_prob'):
assert self.part_prob_percent is not None
random_value = random.random()
if random_value < self.part_prob_percent:
mask_type = 'posterior_prob'
else:
mask_type = 'normal'
# print(
# f"random_value: {random_value}, set mask_type to {mask_type}, part_prob_value: {self.part_prob_percent}")
else:
mask_type = self.mask_type
if mask_type in {'posterior_prob', 'tf_idf', 'posterior_prob_with_tf_idf_warmup'}:
if mask_type == 'posterior_prob':
probability_matrix, avg_model_predict_prob = _sample_by_model_predict_prob(
self.masker_recorder.prob_tensor, labels)
print(f"Model avg predict prob: {avg_model_predict_prob}, t: {self.mask_softmax_t}")
elif mask_type == 'tf_idf':
probability_matrix = _sample_by_tfidf(self.masker_recorder.idf_tensor, labels)
elif mask_type == 'posterior_prob_with_tf_idf_warmup':
tf_idf_prob = _tf_idf_decay_func(1.0,
self.masker_recorder.train_step,
decay=self.tf_idf_warmup_decay)
self.masker_recorder.tf_idf_warm_up_probs.append(tf_idf_prob)
random_prob = random.random()
if verbose:
print(f"[Warm up by tfidf] step i: {self.masker_recorder.train_step},"
f" tf_idf_prob: {tf_idf_prob},"
f" random_prob: {random_prob}")
if random_prob < tf_idf_prob:
probability_matrix = _sample_by_tfidf(self.masker_recorder.idf_tensor, labels)
else:
probability_matrix, avg_model_predict_prob = _sample_by_model_predict_prob(
self.masker_recorder.prob_tensor, labels)
else:
raise NotImplementedError
# temp compute the freq of tokens in each sample
pad_token_indices = torch.where(labels == self.tokenizer.pad_token_id)
# TODO, 这里获取NO PAD长度的写的不太好,但是目前也想不出不用for llop的办法
pad_lens = self._compute_pad_len(labels)
probability_matrix[pad_token_indices] = float('-inf')
probability_matrix = torch.softmax(probability_matrix / self.mask_softmax_t, dim=1)
probability_matrix = probability_matrix * mlm_probability * pad_lens
# is_overshoot = self._handle_prob_overshoot(probability_matrix,
# pad_lens,
# overshoot_threshold=1.0)
is_overshoot = False
if is_overshoot:
self.masker_recorder.overshoot_count += 1
probability_matrix[probability_matrix >= 1.0] = 1.0
probability_matrix[probability_matrix <= 0.0] = 0.0
# ----------------------------------------------------------------------------------------------------------
# Print for debug
# ----------------------------------------------------------------------------------------------------------
if verbose:
non_pad_token_indices = torch.where(labels != self.tokenizer.pad_token_id)
print(f"[Probability Matrix] min-{torch.min(probability_matrix[non_pad_token_indices])},"
f"max-{torch.max(probability_matrix[non_pad_token_indices])},"
f"avg-{torch.mean(probability_matrix[non_pad_token_indices])},"
f"softmax_t: {self.mask_softmax_t}")
print_masked_indices = torch.bernoulli(probability_matrix).bool()
for pad_len_i, pad_len in enumerate(pad_lens):
print(
f"[Mask ratio-{pad_len_i}]: "
f"{collections.Counter(print_masked_indices[pad_len_i].tolist())[True] / int(pad_len)}")
# ----------------------------------------------------------------------------------------------------------
try:
masked_indices = torch.bernoulli(probability_matrix).bool()
except:
ipdb.set_trace()
elif mask_type == 'lowest_prob':
# # ----------------------------------------------------------------------------------------------------------
# # OLD version
# # ----------------------------------------------------------------------------------------------------------
# RANDOM_RATIO = 0.0
# dynamic_mask_predict_prob = self.masker_recorder.prob_tensor.clone()
# # device = dynamic_mask_predict_prob.to(dynamic_mask_predict_prob.device)
# probability_matrix = torch.zeros(labels.shape)
# seq_len = probability_matrix.shape[1]
# pad_start_indices = []
# for label_i, label in enumerate(labels):
# padding_indices = torch.where(label == self.tokenizer.pad_token_id)[0]
# if padding_indices.shape[0] == 0:
# pad_start_index = len(label)
# else:
# pad_start_index = int(padding_indices[0])
# pad_start_indices.append(pad_start_index)
# probability_matrix[label_i] = dynamic_mask_predict_prob[label]
# probability_matrix[label_i][padding_indices] = float('inf')
#
# # label_prob = dynamic_mask_predict_prob[label]
# # label_prob[padding_indices] = float('inf')
# # ipdb.set_trace()
# # label_prob = (1 - RANDOM_RATIO) * label_prob + RANDOM_RATIO * torch.rand((len(label_prob, ))).to(
# # label_prob.device)
# # top_percent_label_indices = torch.argsort(label_prob)[:int(len(label_prob) * self.mlm_probability)]
# # masked_index = torch.zeros_like(label_prob, dtype=int)
# # masked_index[top_percent_label_indices] =
# # masked_index = masked_index.bool()
# # masked_indices.append(masked_index)
# # masked_indices = torch.stack(masked_indices) # batch_size x max_seq_length
#
# probability_matrix = (1 - RANDOM_RATIO) * probability_matrix + RANDOM_RATIO * torch.rand_like(
# probability_matrix)
# top_percent_label_indices = torch.argsort(probability_matrix)[:, :int(seq_len * self.mlm_probability)]
# masked_indices = torch.zeros_like(probability_matrix, dtype=int)
# for masked_index_i, masked_index in enumerate(masked_indices):
# top_percent_label_index = top_percent_label_indices[masked_index_i]
# top_percent_label_index = top_percent_label_index[
# top_percent_label_index < pad_start_indices[masked_index_i]]
# masked_index[top_percent_label_index] = 1
# masked_indices = masked_indices.bool()
# # ----------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------
# NEW version
# ----------------------------------------------------------------------------------------------------------
RANDOM_RATIO = 1e-6 # 1e-6
dynamic_mask_predict_prob = self.masker_recorder.prob_tensor.clone()
batch_mask_predict_prob = dynamic_mask_predict_prob.expand(labels.shape[0],
dynamic_mask_predict_prob.shape[0]).to(
labels.device)
probability_matrix = batch_mask_predict_prob.gather(1, labels)
# seq_len = probability_matrix.shape[1]
pad_token_indices = torch.where(labels == self.tokenizer.pad_token_id)
probability_matrix[pad_token_indices] = float('inf') # batch_size x max_seq_len
probability_matrix = (1 - RANDOM_RATIO) * probability_matrix + RANDOM_RATIO * torch.rand_like(
probability_matrix)
pad_lens = self._compute_pad_len(labels)
top_percent_label_indices = []
argsort_probability_matrix = torch.argsort(probability_matrix)
for pad_len_i, pad_len in enumerate(pad_lens):
top_percent_label_indices.append(
argsort_probability_matrix[pad_len_i][:int(pad_len * self.mlm_probability)])
# top_percent_label_indices = torch.argsort(probability_matrix)[:, :int(seq_len * self.mlm_probability)]
temp_indices = torch.cat([torch.full(x.shape, i) for i, x in enumerate(top_percent_label_indices)]).long()
top_percent_label_fancy_index = (temp_indices, torch.cat(top_percent_label_indices))
masked_indices = torch.zeros_like(probability_matrix, dtype=int)
masked_indices[top_percent_label_fancy_index] = 1
masked_indices[pad_token_indices] = 0
masked_indices = masked_indices.bool()
# ----------------------------------------------------------------------------------------------------------
# # ----------------------------------------------------------------------------------------------------------
# # Compute softmax version & compare
# # ----------------------------------------------------------------------------------------------------------
# mask_softmax_t = 0.00001
# probability_matrix_softmax = batch_mask_predict_prob.gather(1, labels)
# probability_matrix_softmax = torch.softmax(probability_matrix_softmax / mask_softmax_t, dim=1)
# probability_matrix_softmax = probability_matrix_softmax * mlm_probability * pad_lens
# masked_indices_softmax = torch.bernoulli(probability_matrix_softmax).bool()
#
# for pad_len_i, pad_len in enumerate(pad_lens):
# print("-" * 78)
# print(
# f"[lowest_prob][Mask ratio-{pad_len_i}]: "
# f"{collections.Counter(masked_indices[pad_len_i].tolist())[True] / int(pad_len)}")
# print(
# f"[Softmax][Mask Mask-{pad_len_i}]: "
# f"{collections.Counter(masked_indices_softmax[pad_len_i].tolist())[True] / int(pad_len)}")
# # ----------------------------------------------------------------------------------------------------------
elif mask_type == 'normal':
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, mlm_probability)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
pad_lens = None
masked_indices = torch.bernoulli(probability_matrix).bool() # batch_size x max_seq_length
else:
raise NotImplementedError
if self.masker_recorder is not None:
if self.masker_recorder.record_snapshot:
self.masker_recorder.step_mask_probabilities.extend(
probability_matrix[probability_matrix > 0.0].tolist())
if self.mask_type == 'part_prob' and mask_type == 'posterior_prob':
keep_N = 16
record_point_N = 200 # For debug
max_sample_per_step = 32
if self.masker_recorder.train_step % max(int(self.masker_recorder.total_steps / record_point_N),
10) == 0:
train_step_counts = collections.Counter(
self.masker_recorder.step_sample_mask_distributions['train_step'])
current_step_count = train_step_counts[self.masker_recorder.train_step]
if current_step_count >= max_sample_per_step:
pass
else:
for label, mask_prob in zip(labels[:keep_N], probability_matrix[:keep_N]):
label = label[label != 1]
mask_prob = mask_prob[:len(label)]
self.masker_recorder.step_sample_mask_distributions['train_step'].append(
self.masker_recorder.train_step)
self.masker_recorder.step_sample_mask_distributions['tokens'].append(
tuple(label.tolist()))
self.masker_recorder.step_sample_mask_distributions['mask_prob'].append(
tuple(mask_prob.tolist()))
self.masker_recorder.step_sample_mask_distributions['softmax_t'].append(
self.mask_softmax_t)
self.masker_recorder.step_sample_mask_distributions['avg_model_prob'].append(
max(avg_model_predict_prob, 1e-10))
# # save label maskes
# self.masker_recorder
if self.is_record_mask_ratio:
if pad_lens is None:
pad_lens = self._compute_pad_len(labels)
for masked_index_i, masked_index in enumerate(masked_indices):
mask_ratio = collections.Counter(masked_index.tolist())[True] / int(pad_lens[masked_index_i])
self.masker_recorder.mask_ratios.append(mask_ratio)
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(self.tokenizer.max_len, labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# Compute Attention Mask
# ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
attention_mask = torch.ones_like(inputs)
attention_mask[inputs.eq(self.tokenizer.pad_token_id)] = 0
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels, batch_timestamps, attention_mask
def clm_mask_tokens(self, inputs: torch.Tensor, batch_timestamps):
"""
Parameters
----------
inputs: tensor, shape: batch_size x max_seq_len
example:
tensor([[15086, 8773, 10116, ..., 1, 1, 1],
[13689, 1683, 1613, ..., 1, 1, 1]])
batch_timestamps: List
Returns
-------
"""
inputs_clone = inputs.clone()
input_lens = [len(x[x != 1]) for x in inputs]
new_inputs = []
new_labels = []
attention_masks = []
for i, input_len in enumerate(input_lens):
if input_len <= self.CLM_MIN_LEN:
continue
clm_samples = inputs_clone[i].repeat(self.clm_sample_n, 1)
sample_pad_mask = torch.ones(self.clm_sample_n, inputs_clone.shape[1]).bool()
sample_pad_mask_view = sample_pad_mask.view(-1)
# 最小是当前样本长度的1/4或者是32
clm_sample_len = random.sample(range(self.CLM_MIN_LEN, input_len), self.clm_sample_n)
unmask_view_indices = []
for i, x in enumerate(clm_sample_len):
unmask_view_indices.extend(list(range(i * sample_pad_mask.shape[1], i * sample_pad_mask.shape[1] + x)))
sample_pad_mask_view[unmask_view_indices] = False
clm_samples[sample_pad_mask] = self.tokenizer.pad_token_id
# set labels
cls_labels = clm_samples.clone()
cls_label_mask = torch.zeros_like(cls_labels).bool()
cls_label_mask = ~torch.scatter(cls_label_mask, 1, (torch.tensor(clm_sample_len) - 1).unsqueeze(1), True)
cls_labels[cls_label_mask] = -100
clm_samples[~cls_label_mask] = self.tokenizer.pad_token_id
# set attentions
attention_pad_mask = sample_pad_mask.clone()
attention_pad_mask = ~torch.scatter(attention_pad_mask, 1, (torch.tensor(clm_sample_len) - 1).unsqueeze(1),
True)
attention_pad_mask = attention_pad_mask.long()
# some assertions
temp_assert_index = len(clm_samples[0][clm_samples[0] != 1])
assert cls_labels[0][temp_assert_index] != -100
assert clm_samples[0][temp_assert_index] == self.tokenizer.pad_token_id
assert clm_samples[0][temp_assert_index - 1] != self.tokenizer.pad_token_id, ipdb.set_trace()
assert attention_pad_mask[0][temp_assert_index] == 0 # Mask for label position
assert attention_pad_mask[0][temp_assert_index - 1] == 1 # Unmask for previous position
new_inputs.append(clm_samples)
new_labels.append(cls_labels)
attention_masks.append(attention_pad_mask)
new_inputs = torch.cat(new_inputs)
new_labels = torch.cat(new_labels)
attention_masks = torch.cat(attention_masks)
return new_inputs, new_labels, batch_timestamps, attention_masks | |
from cupy import _util
# expose cache handles to this module
from cupy.fft._cache import get_plan_cache # NOQA
from cupy.fft._cache import clear_plan_cache # NOQA
from cupy.fft._cache import get_plan_cache_size # NOQA
from cupy.fft._cache import set_plan_cache_size # NOQA
from cupy.fft._cache import get_plan_cache_max_memsize # NOQA
from cupy.fft._cache import set_plan_cache_max_memsize # NOQA
from cupy.fft._cache import show_plan_cache_info # NOQA
# on Linux, expose callback handles to this module
import sys as _sys
if _sys.platform.startswith('linux'):
from cupy.fft._callback import get_current_callback_manager # NOQA
from cupy.fft._callback import set_cufft_callbacks # NOQA
else:
def get_current_callback_manager(*args, **kwargs):
return None
class set_cufft_callbacks:
def __init__(self, *args, **kwargs):
raise RuntimeError('cuFFT callback is only available on Linux')
enable_nd_planning = True
use_multi_gpus = False
_devices = None
def set_cufft_gpus(gpus):
'''Set the GPUs to be used in multi-GPU FFT.
Args:
gpus (int or list of int): The number of GPUs or a list of GPUs
to be used. For the former case, the first ``gpus`` GPUs
will be used.
.. warning::
This API is currently experimental and may be changed in the future
version.
.. seealso:: `Multiple GPU cuFFT Transforms`_
.. _Multiple GPU cuFFT Transforms:
https://docs.nvidia.com/cuda/cufft/index.html#multiple-GPU-cufft-transforms
'''
_util.experimental('cupy.fft.config.set_cufft_gpus')
global _devices
if isinstance(gpus, int):
devs = [i for i in range(gpus)]
elif isinstance(gpus, list):
devs = gpus
else:
raise ValueError("gpus must be an int or a list of int.")
if len(devs) <= 1:
raise ValueError("Must use at least 2 GPUs.")
# make it hashable
_devices = tuple(devs) | |
import numpy as np
import mdtraj as md
import pytest
from scattering.utils.io import get_fn
from scattering.utils.run import run_total_vhf, run_partial_vhf
@pytest.mark.parametrize("step", [1, 2])
def test_run_total_vhf(step):
trj = md.load(get_fn("spce.xtc"), top=get_fn("spce.gro"))
chunk_length = 4
n_chunks = 5
r, t, g_r_t = run_total_vhf(
trj, step=step, chunk_length=chunk_length, n_chunks=n_chunks
)
assert len(t) == chunk_length / step
assert len(r) == 200
assert np.shape(g_r_t) == (chunk_length / step, 200)
# Check normalization to ~1
assert 0.95 < np.mean(g_r_t[:, -10:]) < 1.05
@pytest.mark.parametrize("step", [1, 2])
def test_run_partial_vhf(step):
trj = md.load(get_fn("spce.xtc"), top=get_fn("spce.gro"))
chunk_length = 4
n_chunks = 5
combo = ["O", "O"]
r, t, g_r_t = run_partial_vhf(
trj,
step=step,
selection1=f"element {combo[0]}",
selection2=f"element {combo[1]}",
chunk_length=chunk_length,
n_chunks=n_chunks,
)
assert len(t) == chunk_length / step
assert len(r) == 200
assert np.shape(g_r_t) == (chunk_length / step, 200)
# Check normalization to ~1
assert 0.95 < np.mean(g_r_t[:, -10:]) < 1.05 | |
import tensorflow as tf
from tensorflow import keras
print(tf.VERSION)
print(tf.keras.__version__)
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.inception_v3 import preprocess_input
import numpy as np
import argparse
import matplotlib.pyplot as plt
import json
parser = argparse.ArgumentParser()
parser.add_argument('--class_index', default='index_to_class.json', type = str, help = 'index map')
parser.add_argument('--input', default='image.jpg', type = str, help = 'input image')
parser.add_argument('--model', default='model.h5', type = str, help = 'model in h5 format')
args = parser.parse_args()
print(args.class_index)
print(args.input)
print(args.model)
index_to_class = {}
with open(args.class_index) as json_file:
index_to_class = json.load(json_file)
print(index_to_class)
model = keras.models.load_model(args.model)
img = image.load_img(args.input, target_size=(299, 299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
top = preds[0].argsort()[-3:][::-1]
top = list(map(lambda x: index_to_class[str(x)], top))
plt.figure(figsize=(10,10))
# plt.xticks([])
# plt.yticks([])
plt.imshow(x[0])
# plt.imshow(img, cmap=plt.cm.binary)
plt.colorbar()
plt.grid(False)
plt.xlabel(top)
plt.show()
print('Predicted:', top)
# Predicted: [(u'n02504013', u'Indian_elephant', 0.82658225), (u'n01871265', u'tusker', 0.1122357), (u'n02504458', u'African_elephant', 0.061040461)] | |
"""Utility functions module."""
import cvxpy as cp
import datetime
import logging
import numpy as np
import os
import pandas as pd
import plotly.graph_objects as go
import plotly.io as pio
import psychrolib
import pvlib
import re
import scipy.sparse
import subprocess
import sys
import time
import typing
import cobmo.config
logger = cobmo.config.get_logger(__name__)
# Instantiate dictionary for execution time logging.
log_times = dict()
class OptimizationProblem(object):
"""Optimization problem object for use with CVXPY."""
constraints: list
objective: cp.Expression
cvxpy_problem: cp.Problem
def __init__(self):
self.constraints = []
self.objective = cp.Constant(value=0.0)
def solve(
self,
keep_problem=False
):
# Instantiate CVXPY problem object.
if hasattr(self, 'cvxpy_problem') and keep_problem:
pass
else:
self.cvxpy_problem = cp.Problem(cp.Minimize(self.objective), self.constraints)
# Solve optimization problem.
self.cvxpy_problem.solve(
solver=(
cobmo.config.config['optimization']['solver_name'].upper()
if cobmo.config.config['optimization']['solver_name'] is not None
else None
),
verbose=cobmo.config.config['optimization']['show_solver_output'],
**cobmo.config.solver_parameters
)
# Assert that solver exited with an optimal solution. If not, raise an error.
try:
assert self.cvxpy_problem.status == cp.OPTIMAL
except AssertionError:
logger.error(f"Solver termination status: {self.cvxpy_problem.status}")
raise
class MatrixConstructor(object):
"""Matrix constructor object for more performant matrix construction operations.
- Creates a matrix representation for given index, column sets.
- Values can be added to the matrix using inplace addition ``+=`` and value setting ``=``. Note that
value setting ``=`` behaves like inplace addition ``+=``, i.e. existing values are not overwritten but added up.
True value setting is not available for performance reasons.
- Value getting always returns 0. True value getting is not available for performance reasons.
- Once the matrix construction is complete, the matrix representation can be converted to Scipy sparse CSR matrix
with ``to_scipy_csr()`` or Pandas dataframe in sparse / dense format with ``to_dataframe_sparse()`` /
``to_dataframe_dense()``.
:syntax:
``MatrixConstructor(index, columns)``: Instantiate matrix constructor object for given index (rows) and columns.
``matrix[index_key, column_key] += 1.0``: Add value 1.0 at given row / column key location.
``matrix[index_key, column_key] = 1.0``: Add value 1.0 at given row / column key location.
Parameters:
index (pd.Index): Index (row) key set.
columns (pd.Index): Columns key set.
Attributes:
index (pd.Index): Index (row) key set.
columns (pd.Index): Columns key set.
data_index (list): List of data entry row locations as integer index.
data_columns (list): List of data entry column locations as integer index.
data_values (list): List of data entry values.
"""
index: pd.Index
columns: pd.Index
data_index: list
data_columns: list
data_values: list
def __init__(
self,
index: typing.Union[pd.Index],
columns: typing.Union[pd.Index]
):
self.index = index
self.columns = columns
self.data_index = list()
self.data_columns = list()
self.data_values = list()
def __getitem__(
self,
key: typing.Tuple[any, any]
) -> float:
# Always return 0, to enable inplace addition ``+=``.
# - True value getting is not available for performance reasons.
return 0.0
def __setitem__(
self,
key: typing.Tuple[any, any],
value: any
):
# Assert that key has exactly 2 entries, otherwise raise error.
if len(key) != 2:
raise ValueError(f"Cannot use key with {len(key)} items. Only key with 2 items is valid.")
# Append new values.
# - Integer key locations are obtained from index sets.
# - Note that existing values are not overwritten but added up.
# - True value setting is not available for performance reasons.
self.data_index.append(self.index.get_loc(key[0]))
self.data_columns.append(self.columns.get_loc(key[1]))
self.data_values.append(value)
def to_scipy_csr(self) -> scipy.sparse.csr_matrix:
"""Obtain Scipy sparse CSR matrix."""
return (
scipy.sparse.csr_matrix(
(self.data_values, (self.data_index, self.data_columns)),
shape=(len(self.index), len(self.columns))
)
)
def to_dataframe_sparse(self) -> pd.DataFrame:
"""Obtain Pandas dataframe in sparse format.
- Reference for Pandas sparse dataframes: <https://pandas.pydata.org/pandas-docs/stable/user_guide/sparse.html>
"""
return (
pd.DataFrame.sparse.from_spmatrix(
scipy.sparse.coo_matrix(
(self.data_values, (self.data_index, self.data_columns)),
shape=(len(self.index), len(self.columns))
),
index=self.index,
columns=self.columns
)
)
def to_dataframe_dense(self) -> pd.DataFrame:
"""Obtain Pandas dataframe in dense format."""
return self.to_dataframe_sparse().sparse.to_dense()
def log_time(
label: str,
log_level: str = 'debug',
logger_object: logging.Logger = logger
):
"""Log start / end message and time duration for given label.
- When called with given label for the first time, will log start message.
- When called subsequently with the same / previously used label, will log end message and time duration since
logging the start message.
- The log level for start / end messages can be given as keyword argument, By default, messages are logged as
debug messages.
- The logger object can be given as keyword argument. By default, uses ``utils.logger`` as logger.
- Start message: "Starting ``label``."
- End message: "Completed ``label`` in ``duration`` seconds."
Arguments:
label (str): Label for the start / end message.
Keyword Arguments:
log_level (str): Log level to which the start / end messages are output. Choices: 'debug', 'info'.
Default: 'debug'.
logger_object (logging.logger.Logger): Logger object to which the start / end messages are output. Default:
``utils.logger``.
"""
time_now = time.time()
if log_level == 'debug':
logger_handle = lambda message: logger_object.debug(message)
elif log_level == 'info':
logger_handle = lambda message: logger_object.info(message)
else:
raise ValueError(f"Invalid log level: '{log_level}'")
if label in log_times.keys():
logger_handle(f"Completed {label} in {(time_now - log_times.pop(label)):.6f} seconds.")
else:
log_times[label] = time_now
logger_handle(f"Starting {label}.")
def calculate_absolute_humidity_humid_air(
temperature, # In °C.
relative_humidity # In percent.
):
absolute_humidity = (
psychrolib.GetHumRatioFromRelHum(
TDryBulb=temperature, # In °C.
RelHum=relative_humidity / 100.0, # In [0,1].
Pressure=101325.0 # In Pa.
)
)
return absolute_humidity # In kg(water)/kg(air).
def calculate_enthalpy_humid_air(
temperature, # In °C.
absolute_humidity # In kg(water)/kg(air).
):
enthalpy = (
psychrolib.GetMoistAirEnthalpy(
TDryBulb=temperature, # In °C.
HumRatio=absolute_humidity # In kg(water)/kg(air).
)
)
return enthalpy # In J/kg.
def calculate_dew_point_enthalpy_humid_air(
temperature, # In °C.
relative_humidity # In percent.
):
enthalpy = (
psychrolib.GetMoistAirEnthalpy(
TDryBulb=psychrolib.GetTDewPointFromRelHum(
TDryBulb=temperature, # In °C.
RelHum=relative_humidity / 100.0 # In [0,1].
),
HumRatio=calculate_absolute_humidity_humid_air(
temperature, # In °C.
relative_humidity # In percent.
)
)
)
return enthalpy # In J/kg.
def calculate_irradiation_surfaces(
database_connection,
weather_type='singapore_nus',
irradiation_model='dirint'
):
"""Calculates irradiation for surfaces oriented towards east, south, west & north.
- Operates on the database: Updates according columns in `weather_timeseries`.
- Takes irradition_horizontal as measured global horizontal irradiation (ghi).
- Based on pvlib-python toolbox: https://github.com/pvlib/pvlib-python
"""
# Load weather data from database
weather_types = pd.read_sql(
"""
select * from weather_types
where weather_type='{}'
""".format(weather_type),
database_connection
)
weather_timeseries = pd.read_sql(
"""
select * from weather_timeseries
where weather_type='{}'
""".format(weather_type),
database_connection
)
# Set time zone (required for pvlib solar position calculations).
weather_timeseries.index = pd.to_datetime(weather_timeseries['time'])
weather_timeseries.index = weather_timeseries.index.tz_localize(weather_types['time_zone'][0])
# Extract global horizontal irradiation (ghi) from weather data.
irradiation_ghi = weather_timeseries['irradiation_horizontal']
# Calculate solarposition (zenith, azimuth).
solarposition = pvlib.solarposition.get_solarposition(
time=weather_timeseries.index,
latitude=weather_types['latitude'][0],
longitude=weather_types['longitude'][0]
)
# Calculate direct normal irradiation (dni) from global horizontal irradiation (ghi).
irradiation_dni = pd.Series(index=weather_timeseries.index)
if irradiation_model == 'disc':
# ... via DISC model.
irradiation_disc = pvlib.irradiance.disc(
ghi=irradiation_ghi,
solar_zenith=solarposition['zenith'],
datetime_or_doy=weather_timeseries.index
)
irradiation_dni = irradiation_disc['dni']
elif irradiation_model == 'erbs':
# ... via ERBS model.
irradiation_erbs = pvlib.irradiance.erbs(
ghi=irradiation_ghi,
zenith=solarposition['zenith'],
datetime_or_doy=weather_timeseries.index
)
irradiation_dni = irradiation_erbs['dni']
elif irradiation_model == 'dirint':
# ... via DIRINT model.
irradiation_dirint = pvlib.irradiance.dirint(
ghi=irradiation_ghi,
solar_zenith=solarposition['zenith'],
times=weather_timeseries.index,
temp_dew=np.vectorize(psychrolib.GetTDewPointFromHumRatio)( # In °C.
TDryBulb=weather_timeseries['ambient_air_temperature'].values, # In °C.
HumRatio=weather_timeseries['ambient_air_absolute_humidity'].values, # In kg(water)/kg(air).
Pressure=101325 # In Pa.
)
)
irradiation_dni = irradiation_dirint
# Replace NaNs (NaN means no irradiation).
irradiation_dni.loc[irradiation_dni.isna()] = 0.0
# Calculate diffuse horizontal irradiation (dhi).
irradiation_dhi = pd.Series(
irradiation_ghi
- irradiation_dni
* pvlib.tools.cosd(solarposition['zenith']),
)
# Define surface orientations.
surface_orientations = pd.DataFrame(
data=[0.0, 90.0, 180.0, 270.0],
index=['north', 'east', 'south', 'west'],
columns=['surface_azimuth']
)
# Calculate irradiation onto each surface.
for index, row in surface_orientations.iterrows():
irradiation_surface = pvlib.irradiance.get_total_irradiance(
surface_tilt=90.0,
surface_azimuth=row['surface_azimuth'],
solar_zenith=solarposition['zenith'],
solar_azimuth=solarposition['azimuth'],
dni=irradiation_dni,
ghi=irradiation_ghi,
dhi=irradiation_dhi,
surface_type='urban',
model='isotropic'
)
weather_timeseries.loc[:, 'irradiation_' + index] = irradiation_surface['poa_global']
# Update weather_timeseries in database.
database_connection.cursor().execute(
"""
delete from weather_timeseries
where weather_type='{}'
""".format(weather_type),
)
weather_timeseries.to_sql(
'weather_timeseries',
database_connection,
if_exists='append',
index=False
)
def calculate_sky_temperature(
database_connection,
weather_type='singapore_nus'
):
""" Calculates sky temperatures from ambient air temperature for tropical weather.
- Ambient air temperature is decreased by 11K to get the sky temperature.
- According to ISO 52016-1, Table B.19.
"""
# Load weather data.
weather_types = pd.read_sql(
"""
select * from weather_types
where weather_type='{}'
""".format(weather_type),
database_connection
)
weather_timeseries = pd.read_sql(
"""
select * from weather_timeseries
where weather_type='{}'
""".format(weather_type),
database_connection
)
weather_timeseries.index = pd.to_datetime(weather_timeseries['time'])
# Get temperature difference between sky and ambient.
temperature_difference = weather_types['temperature_difference_sky_ambient'][0]
# Calculate sky temperature.
weather_timeseries.loc[:, 'sky_temperature'] = \
weather_timeseries.loc[:, 'ambient_air_temperature'] - temperature_difference
# Update weather_timeseries in database.
database_connection.cursor().execute(
"""
delete from weather_timeseries
where weather_type='{}'
""".format(weather_type),
)
weather_timeseries.to_sql('weather_timeseries', database_connection, if_exists='append', index=False)
def calculate_error(
expected_timeseries=pd.DataFrame(),
predicted_timeseries=pd.DataFrame()
):
"""Computes the error between expected and predicted timeseries dataframes.
- Note: This function doesn't check if the data format is valid.
"""
# Instantiate error timeseries / summary dataframes.
error_timeseries = pd.DataFrame(
0.0,
index=expected_timeseries.index,
columns=expected_timeseries.columns
)
error_summary = pd.DataFrame(
0.0,
index=pd.Index(['mean_absolute_error', 'root_mean_squared_error'], name='error_type'),
columns=expected_timeseries.columns
)
# Calculate error values.
for index, row in error_timeseries.iterrows():
error_timeseries.loc[index, :] = (
predicted_timeseries.loc[index, :]
- expected_timeseries.loc[index, :]
)
for column_name, column in error_summary.iteritems():
error_summary.loc['mean_absolute_error', column_name] = (
error_timeseries[column_name].abs().mean()
)
error_summary.loc['root_mean_squared_error', column_name] = (
(error_timeseries[column_name] ** 2).mean() ** 0.5
)
return (
error_summary,
error_timeseries
)
def calculate_tank_diameter_height(
volume,
aspect_ratio
):
"""Calculates diameter and height of storage tank based on volume and aspect ratio."""
# Calculations.
diameter = (volume / aspect_ratio * 4 / np.pi) ** (1 / 3)
height = diameter * aspect_ratio
return (
diameter,
height
)
def calculate_discounted_payback_time(
lifetime,
investment_cost,
operation_cost,
operation_cost_baseline,
interest_rate=0.06
):
"""Calculate simple / discounted payback time in years."""
# Calculate annual cost savings.
operation_cost_savings_annual = (operation_cost_baseline - operation_cost) / lifetime
if (operation_cost_savings_annual <= 0.0) or (investment_cost == 0.0):
# Return `None` if no savings observed.
simple_payback_time = None
discounted_payback_time = None
else:
# Calculate simple payback time.
simple_payback_time = int(np.ceil(investment_cost / operation_cost_savings_annual))
if simple_payback_time >= lifetime:
# If simple payback time is greater than lifetime, return None.
simple_payback_time = None
# Calculate discounted payback time in years.
year = 0
annual_discounted_savings = np.zeros(int(np.ceil(lifetime)) + 1)
cumulative_discounted_savings = np.zeros(int(np.ceil(lifetime)) + 1)
while cumulative_discounted_savings[year] < investment_cost:
year += 1
discount_factor = (1.0 + interest_rate) ** (-year)
annual_discounted_savings[year] = operation_cost_savings_annual * discount_factor
cumulative_discounted_savings[year] = cumulative_discounted_savings[year - 1] + annual_discounted_savings[year]
# Discontinue calculations if payback is not reached within lifetime, return None.
if year >= lifetime:
year = None
discounted_payback_time = year
return (
simple_payback_time,
discounted_payback_time,
)
def get_timestamp(
time: datetime.datetime = None
) -> str:
"""Generate formatted timestamp string, e.g., for saving results with timestamp."""
if time is None:
time = datetime.datetime.now()
return time.strftime('%Y-%m-%d_%H-%M-%S')
def get_results_path(
base_name: str,
scenario_name: str = None
) -> str:
"""Generate results path, which is a new subfolder in the results directory. The subfolder name is
assembled of the given base name, scenario name and current timestamp. The new subfolder is
created on disk along with this.
- Non-alphanumeric characters are removed from `base_name` and `scenario_name`.
- If is a script file path or `__file__` is passed as `base_name`, the base file name without extension
will be taken as base name.
"""
# Preprocess results path name components, including removing non-alphanumeric characters.
base_name = re.sub(r'\W+', '', os.path.basename(os.path.splitext(base_name)[0])) + '_'
scenario_name = '' if scenario_name is None else re.sub(r'\W+', '', scenario_name) + '_'
timestamp = cobmo.utils.get_timestamp()
# Obtain results path.
results_path = os.path.join(cobmo.config.config['paths']['results'], f'{base_name}{scenario_name}{timestamp}')
# Instantiate results directory.
# TODO: Catch error if dir exists.
os.mkdir(results_path)
return results_path
def get_alphanumeric_string(
string: str
):
"""Create lowercase alphanumeric string from given string, replacing non-alphanumeric characters with underscore."""
return re.sub(r'\W+', '_', string).strip('_').lower()
def launch(path):
"""Launch the file at given path with its associated application. If path is a directory, open in file explorer."""
try:
assert os.path.exists(path)
except AssertionError:
logger.error(f'Cannot launch file or directory that does not exist: {path}')
if sys.platform == 'win32':
os.startfile(path)
elif sys.platform == 'darwin':
subprocess.Popen(['open', path], cwd="/", stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
else:
subprocess.Popen(['xdg-open', path], cwd="/", stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def write_figure_plotly(
figure: go.Figure,
results_path: str,
file_format=cobmo.config.config['plots']['file_format']
):
"""Utility function for writing / storing plotly figure to output file. File format can be given with
`file_format` keyword argument, otherwise the default is obtained from config parameter `plots/file_format`.
- `results_path` should be given as file name without file extension, because the file extension is appended
automatically based on given `file_format`.
- Valid file formats: 'png', 'jpg', 'jpeg', 'webp', 'svg', 'pdf', 'html', 'json'
"""
if file_format in ['png', 'jpg', 'jpeg', 'webp', 'svg', 'pdf']:
pio.write_image(figure, f"{results_path}.{file_format}")
elif file_format in ['html']:
pio.write_html(figure, f"{results_path}.{file_format}")
elif file_format in ['json']:
pio.write_json(figure, f"{results_path}.{file_format}")
else:
logger.error(
f"Invalid `file_format` for `write_figure_plotly`: {file_format}"
f" - Valid file formats: 'png', 'jpg', 'jpeg', 'webp', 'svg', 'pdf', 'html', 'json'"
)
raise ValueError | |
"""Prepare CelebAHQ dataset"""
import os
import torch
import numpy as np
from PIL import Image
from .segbase import SegmentationDataset
class CelebaHQSegmentation(SegmentationDataset):
NUM_CLASS = 15
def __init__(self, root='/home/mo/datasets/face_mask/', split='train', mode=None, transform=None, **kwargs):
super(CelebaHQSegmentation, self).__init__(root, split, mode, transform, **kwargs)
assert os.path.exists(self.root), "Please setup the dataset"
self.images = _get_aligned_images(self.root + self.split)
if len(self.images) == 0:
raise RuntimeError("Found 0 images in subfolders of:" + root + "\n")
# self.valid_classes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
self._mapping = np.array([[0, 0, 0], [204, 0, 0], [76, 153, 0], [204, 204, 0], [51, 51, 255], [204, 0, 204],
[0, 255, 255], [102, 51, 0], [255, 0, 0], [102, 204, 0], [255, 255, 0], [0, 0, 153],
[0, 0, 204], [255, 153, 51], [255, 51, 153]]).astype('int32')
self._mapping_list = [[0, 0, 0], [204, 0, 0], [76, 153, 0], [204, 204, 0], [51, 51, 255], [204, 0, 204],
[0, 255, 255], [102, 51, 0], [255, 0, 0], [102, 204, 0], [255, 255, 0], [0, 0, 153],
[0, 0, 204], [255, 153, 51], [255, 51, 153]]
'''
'skin' 'color': (204, 0, 0)
'nose': 'color': (76, 153, 0)
'glasses': 'color': (204, 204, 0)
'l_eye': 'color': (51, 51, 255)
'r_eye': 'color': (204, 0, 204)
'brows': "_r_brow.png"], 'color': (0, 255, 255)
'l_ear': 'color': (102, 51, 0)
'r_ear': 'color': (255, 0, 0)
'mouth': 'color': (102, 204, 0)
'u_lip': 'color': (255, 255, 0)
'l_lip': , 'color': (0, 0, 153)
'hair': 'color': (0, 0, 204)
'neck': 'color': (255, 153, 51)
'misc': ["_hat.png", "_cloth.png", "ear_r.png"], 'color': (255, 51, 153)
'''
def _class_to_index(self, mask):
# assert the value
# uniques = np.unique(mask, axis=0)
# for row in uniques:
# values = np.unique(row, axis=0)
# for value in values:
# print(value)
# assert (value in self._mapping)
index = []
for row in mask:
for item in row:
try:
i = self._mapping_list.index(item.tolist())
index.append(i)
except ValueError:
# due to scaling some mask pixels are not retained?
i = find_nearest_index(self._mapping_list, item)
index.append(i)
pass
index = np.asarray(index).reshape((mask.shape[0], mask.shape[1]))
return index
def __getitem__(self, index):
AB = Image.open(self.images[index]).convert('RGB')
w, h = AB.size
w2 = int(w / 2)
img = AB.crop((0, 0, w2, h))
if self.mode == 'test':
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(self.images[index])
mask = AB.crop((w2, 0, w, h))
# synchrosized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
img, mask = self._img_transform(img), self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
return img, mask, os.path.basename(self.images[index])
def _mask_transform(self, mask):
target = self._class_to_index(np.array(mask).astype('int32'))
return torch.LongTensor(np.array(target).astype('int32'))
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.images)
@property
def pred_offset(self):
return 0
def _get_aligned_images(img_folder):
img_paths = []
for root, _, files in os.walk(img_folder):
for filename in files:
if filename.endswith('.jpg'):
imgpath = os.path.join(root, filename)
if os.path.isfile(imgpath):
img_paths.append(imgpath)
else:
print('cannot find the image:', imgpath)
print('Found {} images in the folder {}'.format(len(img_paths), img_folder))
return img_paths
def find_nearest_index(array, value):
array = np.asarray(array)
idx = np.mean(np.abs(array - value), axis=1).argmin()
return idx
if __name__ == '__main__':
dataset = CelebaHQSegmentation() | |
"""
Test functions for the shoyu.py module
"""
import os
import pickle
import numpy as np
from ramannoodles import shoyu
# open spectra library
SHOYU_DATA_DICT = pickle.load(open('raman_spectra/shoyu_data_dict.p', 'rb'))
def test_download_cas():
"""
Test function that confirms that the raman_spectra/ directory exists/was created,
and confirms that the .jdx file was saved with the correct filename.
"""
# CAS registry number for water
cas_num = '7732-18-5'
shoyu.download_cas(cas_num)
assert os.path.isdir('raman_spectra/'), 'directory not found'
assert os.path.isfile('raman_spectra/7732185_NIST_IR.jdx'), 'file not saved correctly'
#Various try statements to make sure that bad inputs are handled correctly.
try:
shoyu.download_cas(7732185)
except TypeError:
print('An int was passed to the function, and it was handled well with a TypeError.')
def test_add_jdx():
"""
Test function that confirms that custom labeling is successful when updating shoyu_data_dict.p,
and that the y units are correctly converted to ABSORBANCE instead of the default TRANSMITTENCE.
"""
# .jdx file containing water data
filename = '7732185_NIST_IR.jdx'
shoyu_data_dict = shoyu.add_jdx('raman_spectra/'+filename, label='Water_label_test')
assert 'Water_label_test' in shoyu_data_dict, 'custom label not applied successfully'
water = shoyu_data_dict['Water_label_test']
assert water['yunits'] == 'ABSORBANCE', 'Incorrect y units stored'
assert filename[-4:] == '.jdx', 'File type is not .jdx'
try:
shoyu.download_cas(1)
except TypeError:
print('An int was passed to the function, and it was handled well with a TypeError.')
def test_initialize_standard_library():
"""
Test function that confirms the raman_spectra/ directory is created, the .jdx files are
downloaded and stored correctly, and that the shoyu_data_dict.p file is generated.
"""
shoyu.initialize_standard_library()
assert os.path.isdir('raman_spectra/'), 'Directory not found'
assert os.path.isfile('raman_spectra/7732185_NIST_IR.jdx'), 'file not saved correctly'
assert os.path.isfile('raman_spectra/shoyu_data_dict.p'), 'shoyu_data_dict.p not found'
def test_more_please():
"""
Test function that confirms that the pentane .jdx file was downloaded correctly and was
successfully added to shoyu_data_dict.p
"""
# CAS registry number for pentane
cas_num = '109-66-0'
shoyu_data_dict = shoyu.more_please(cas_num)
assert os.path.isfile('raman_spectra/109660_NIST_IR.jdx'), 'file not found'
assert 'N-PENTANE' in shoyu_data_dict, 'N-PENTANE not successfully added to shoyu_data_dict'
try:
shoyu.download_cas(109660)
except TypeError:
print('An int was passed to the function, and it was handled well with a TypeError.')
def test_clean_spectra():
"""
Test function for shoyu.clean_spectra. It verifies that the output type is correct,
that repeated data points were removed from the input data, and that bad input types
are handled correctly.
"""
compound = SHOYU_DATA_DICT['WATER']
comp_data_clean = shoyu.clean_spectra(compound)
assert isinstance(comp_data_clean, list), 'output type not a list'
assert len(comp_data_clean) < len(compound['x']), 'repeat data points were not removed'
try:
shoyu.clean_spectra(compound=[[1, 2, 3, 4], [0.2, 0.4, 1.0, 0.01]])
except TypeError:
print('A list was passed to the function, and it was handled well with a TypeError.')
def test_interpolate_spectra():
"""
Test function for shoyu.interpolate_spectra. It verifies that the output type is correct,
and that bad input types are handled correctly.
"""
compound = SHOYU_DATA_DICT['WATER']
comp_data_clean = shoyu.clean_spectra(compound)
comp_data_int = shoyu.interpolate_spectra(comp_data_clean)
assert isinstance(comp_data_int, list), 'Output type not correct, a list is expected'
try:
shoyu.interpolate_spectra([1, 2, 3, 4])
except TypeError:
print('A list of ints was passed to the function, and was handled well with a TypeError.')
def test_sum_spectra():
"""
Test function for shoyu.sum_spectra. It checks to confirm that the output data lengths match,
that the output types are correct, and that bad input types are handled well.
"""
compound1 = SHOYU_DATA_DICT['WATER']
compound2 = SHOYU_DATA_DICT['CARBON MONOXIDE']
comp1_data_clean = shoyu.clean_spectra(compound1)
comp2_data_clean = shoyu.clean_spectra(compound2)
comp1_data_int = shoyu.interpolate_spectra(comp1_data_clean)
comp2_data_int = shoyu.interpolate_spectra(comp2_data_clean)
x_combined, y_combined = shoyu.sum_spectra(comp1_data_int, comp2_data_int)
assert len(x_combined) == len(y_combined), 'Output data lengths do not match'
assert isinstance(x_combined, np.ndarray), 'x_combined type is not a numpy.ndarray.'
assert isinstance(y_combined, np.ndarray), 'y_combined type is not a numpy.ndarray.'
try:
shoyu.sum_spectra(1.2, comp2_data_int)
except TypeError:
print('A float was passed to the function, and it was handled well with a TypeError.')
try:
shoyu.sum_spectra(comp1_data_int, 66.6)
except TypeError:
print('A float was passed to the function, and it was handled well with a TypeError.')
def test_combine_spectra():
"""
Test function that confirms that the two compounds from shoyu_data_dict.p were combined
sucessfully, that the output data has the correct shape, and that the output range is
within the overall range of the two individual compounds.
"""
compound_1 = SHOYU_DATA_DICT['WATER']
compound_2 = SHOYU_DATA_DICT['CARBON MONOXIDE']
data = shoyu.combine_spectra(compound_1, compound_2)
assert len(data[0]) == len(data[1]), 'lengths of x and y data do not match'
assert len(data) == 2, 'shape of output data different than expected'
ranges = [max(compound_1['x']), min(compound_1['x']),
max(compound_2['x']), min(compound_2['x'])]
assert min(ranges) <= min(data[0]), """
output data contains values below the minimum range of either compound"""
assert max(ranges) >= max(data[0]), """
output data contains values above the maximum range of either compound"""
try:
shoyu.combine_spectra([1, 2, 3, 4], compound_2)
except TypeError:
print('A list was passed to the function, and it was handled well with a TypeError.')
try:
shoyu.combine_spectra(compound_1, [1, 2, 3, 4])
except TypeError:
print('A list was passed to the function, and it was handled well with a TypeError.') | |
import numpy as np
import pandas as pd
from pandas.io.parsers import read_csv
from BOAmodel import *
from collections import defaultdict
""" parameters """
# The following parameters are recommended to change depending on the size and complexity of the data
N = 2000 # number of rules to be used in SA_patternbased and also the output of generate_rules
Niteration = 500 # number of iterations in each chain
Nchain = 2 # number of chains in the simulated annealing search algorithm
supp = 5 # 5% is a generally good number. The higher this supp, the 'larger' a pattern is
maxlen = 3 # maxmum length of a pattern
# \rho = alpha/(alpha+beta). Make sure \rho is close to one when choosing alpha and beta.
alpha_1 = 500 # alpha_+
beta_1 = 1 # beta_+
alpha_2 = 500 # alpha_-
beta_2 = 1 # beta_-
""" input file """
# notice that in the example, X is already binary coded.
# Data has to be binary coded and the column name shd have the form: attributename_attributevalue
filepathX = 'tictactoe_X.txt' # input file X
filepathY = 'tictactoe_Y.txt' # input file Y
df = read_csv(filepathX,header=0,sep=" ")
Y = np.loadtxt(open(filepathY,"rb"),delimiter=" ")
lenY = len(Y)
train_index = sample(xrange(lenY),int(0.70*lenY))
test_index = [i for i in xrange(lenY) if i not in train_index]
model = BOA(df.iloc[train_index],Y[train_index])
model.generate_rules(supp,maxlen,N)
model.set_parameters(alpha_1,beta_1,alpha_2,beta_2,None,None)
rules = model.fit(Niteration,Nchain,print_message=True)
# test
Yhat = predict(rules,df.iloc[test_index])
TP,FP,TN,FN = getConfusion(Yhat,Y[test_index])
tpr = float(TP)/(TP+FN)
fpr = float(FP)/(FP+TN)
print 'TP = {}, FP = {}, TN = {}, FN = {} \n accuracy = {}, tpr = {}, fpr = {}'.format(TP,FP,TN,FN, float(TP+TN)/(TP+TN+FP+FN),tpr,fpr) | |
import sys
import os
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src"))
)
import torch
import torch.nn.functional as F
import numpy as np
import imageio
import util
import warnings
from data import get_split_dataset
from render import NeRFRenderer
from model import make_model
from scipy.interpolate import CubicSpline
from tqdm import tqdm
import matplotlib.pylab as plt
import trimesh
from dotmap import DotMap
def extra_args(parser):
parser.add_argument(
"--subset", "-S", type=int, default=0, help="Subset in data to use"
)
parser.add_argument(
"--split",
type=str,
default="train",
help="Split of data to use train | val | test",
)
parser.add_argument(
"--source",
"-P",
type=str,
default="0 1 2 3 4",
help="Source view(s) in image, in increasing order. -1 to do random",
)
parser.add_argument(
"--num_views",
type=int,
default=400,
help="Number of video frames (rotated views)",
)
parser.add_argument(
"--elevation",
type=float,
default=10.0,
help="Elevation angle (negative is above)",
)
parser.add_argument(
"--scale", type=float, default=1.0, help="Video scale relative to input size"
)
parser.add_argument(
"--radius",
type=float,
default=8.0,
help="Distance of camera from origin, default is average of z_far, z_near of dataset (only for non-DTU)",
)
parser.add_argument(
"--root",
type=str,
default="/home/htxue/data/mit/pixel-nerf/"
)
parser.add_argument(
"--voxel_num",
type=int,
default=100
)
parser.add_argument(
"--cluster_method",
type=str,
default='tsne'
)
parser.add_argument("--fps", type=int, default=30, help="FPS of video")
return parser
args, conf = util.args.parse_args(extra_args, default_conf="conf/default_mv.conf")
args.resume = True
print(args)
device = util.get_cuda(args.gpu_id[0])
dset = get_split_dataset(
args.dataset_format, args.datadir, want_split=args.split, training=False
)
data = dset[args.subset]
data_path = data["path"]
print("Data instance loaded:", data_path)
images = data["images"] # (NV, 3, H, W)
poses = data["poses"] # (NV, 4, 4)
focal = data["focal"]
if isinstance(focal, float):
# Dataset implementations are not consistent about
# returning float or scalar tensor in case of fx=fy
focal = torch.tensor(focal, dtype=torch.float32)
focal = focal[None]
c = data.get("c")
if c is not None:
c = c.to(device=device).unsqueeze(0)
NV, _, H, W = images.shape
focal = focal.to(device=device)
source = torch.tensor(list(map(int, args.source.split())), dtype=torch.long)
NS = len(source)
random_source = NS == 1 and source[0] == -1
assert not (source >= NV).any()
print("H, W:",H, W)
if random_source:
src_view = torch.randint(0, NV, (1,))
else:
src_view = source
if args.scale != 1.0:
Ht = int(H * args.scale)
Wt = int(W * args.scale)
if abs(Ht / args.scale - H) > 1e-10 or abs(Wt / args.scale - W) > 1e-10:
warnings.warn(
"Inexact scaling, please check {} times ({}, {}) is integral".format(
args.scale, H, W
)
)
H, W = Ht, Wt
net = make_model(conf["model"], using_intermediate_feature=True).to(device=device)
net.load_weights(args)
print('src views', src_view)
net.encode(
images[src_view].unsqueeze(0),
poses[src_view].unsqueeze(0).to(device=device),
focal,
c=c,
)
feature_list = []
def inter_feature_hook(module, input, output):
feature_list.append(output.data)
renderer = NeRFRenderer.from_conf(
conf["renderer"], lindisp=dset.lindisp, eval_batch_size=args.ray_batch_size,
).to(device=device)
render_par = renderer.bind_parallel(net, args.gpu_id, simple_output=True).eval()
# Get the distance from camera to origin
z_near = dset.z_near
z_far = dset.z_far
N = args.voxel_num
print(args.name)
if "pour" in args.name:
ty = np.linspace(1, 9, N + 1)
tx = np.linspace(-4, 4, N + 1)
tz = np.linspace(-3, 5, N + 1)
if "shake" in args.name:
ty = np.linspace(0, 3, N + 1)
tx = np.linspace(-1.5, 1.5, N + 1)
tz = np.linspace(-1.5, 1.5, N + 1)
# ty = np.linspace(1, 9, N + 1)
# tx = np.linspace(-4, 4, N + 1)
# tz = np.linspace(-3, 5, N + 1)
query_pts = np.stack(np.meshgrid(tx, ty, tz), -1).astype(np.float32)
print(query_pts.shape)
sh = query_pts.shape
flat = query_pts.reshape([-1, 3])
flat = torch.from_numpy(flat).to(args.gpu_id[0])
fn = lambda i0, i1: net(flat[None, i0:i1, :], viewdirs=torch.zeros(flat[i0:i1].shape).to(args.gpu_id[0]))
# fn = lambda i0, i1: net(flat[None, i0:i1, :], viewdirs=None)
chunk = 1024
# sigma_list = []
# feature_list = []
#
# for i in tqdm(range(0, flat.shape[0], chunk)):
# feature, out = fn(i, i + chunk)
# feature_list.append(feature[0].detach().cpu().numpy())
# sigma_list.append(out[0].detach().cpu().numpy())
feature = np.concatenate([fn(i, i + chunk)[0][0].detach().cpu().numpy() for i in tqdm(range(0, flat.shape[0], chunk))], 0)
raw = np.concatenate([fn(i, i + chunk)[1][0].detach().cpu().numpy() for i in tqdm(range(0, flat.shape[0], chunk))], 0)
# feature = np.concatenate(feature_list, 0)
# sigma = np.concatenate(sigma_list, 0)
sigma = np.reshape(raw, list(sh[:-1]) + [-1]) # N * N * N * 4
# sigma = sigma.view(-1, sigma.shape[-1])
sigma = np.maximum(sigma[..., -1], 0.)
print("calculating cluster information, using {} to get the decomposed representation".format(args.cluster_method))
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
if args.cluster_method == 'tsne':
tsne = TSNE(n_components=3)
sigma_flatten = sigma.flatten()
valid = sigma_flatten > 0
feature_tsne = tsne.fit_transform(feature[valid])
feature = feature_tsne
for i in range(3):
feature[:, i] = (feature[:, i] - feature[:, i].min()) / (feature[:, i].max() - feature[:, i].min())
elif args.cluster_method == 'pca':
pca = PCA(n_components=3)
feature_pca = pca.fit_transform(feature)
feature = feature_pca
for i in range(3):
feature[:, i] = (feature[:, i] - feature[:, i].min()) / (feature[:, i].max() - feature[:, i].min())
elif args.cluster_method == 'mean':
feature_r = feature.mean(-1)
feature_r = np.expand_dims(feature_r, -1).repeat(3, -1)
print(feature_r.shape)
feature = (feature_r - feature_r.min()) / (feature_r.max() - feature_r.min())
elif args.cluster_method == 'vertrgb':
feature = np.maximum(raw[:, :3], 0.)
# feature_tsne = feature_tsne.reshape(list(sh[:-1]) + [-1]) # N * N * N * 3
saving_path = os.path.join(args.root, 'experimental', 'mesh_color', 'water_pour_S{}_resolution{}_feature{}/'.format(
args.subset, args.voxel_num, args.cluster_method))
if not os.path.exists(saving_path):
os.makedirs(saving_path)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# draw 3D plot
t = [i for i in range(N+1)]
sigma_flatten = sigma.flatten()
valid = sigma_flatten > 0
np.save(saving_path + 'feature_all.npy', feature)
np.save(saving_path + 'sigmma_all.npy', sigma_flatten)
x, y, z = np.meshgrid(t, t, t)
x, y, z = x.flatten(), y.flatten(), z.flatten()
color = feature
fig = plt.figure()
ax3D = fig.add_subplot(111, projection='3d')
if args.cluster_method == 'tsne':
ax3D.scatter(x[valid], z[valid], y[valid], s=10, c=color, marker='o') # tsne is operated on filtered points
else:
ax3D.scatter(x[valid], z[valid], y[valid], s=10, c=color[valid], marker='o')
ax3D.set_xlim3d(0, 100)
ax3D.set_ylim3d(0, 100)
ax3D.set_zlim3d(0, 100)
plt.show()
plt.hist(np.maximum(0, sigma.ravel()), log=True)
plt.savefig(saving_path + 'hist.jpg')
# import mcubes
# threshold = 5
# print('fraction occupied', np.mean(sigma > threshold))
# vertices, triangles = mcubes.marching_cubes(sigma, threshold)
# print('done', vertices.shape, triangles.shape)
#
#
#
# n_vert = vertices.shape[0]
#
# vert_index = vertices[:, 0] * (N + 1) * (N + 1)+ vertices[:, 1] * (N+1) + vertices[:, 2]
# vert_index = vert_index.astype(np.int)
#
# vert_rgb = feature[vert_index]
#
#
#
# src_view_images = np.hstack(images[src_view])
# print(src_view_images.shape)
#
#
# imageio.imwrite(saving_path + 'src_view.png', (((src_view_images.transpose(1, 2, 0)+1)/2)*255).astype(np.uint8))
# np.save(saving_path + 'vertices.npy', vertices)
# np.save(saving_path + 'triangles.npy', triangles)
# np.save(saving_path + 'color.npy', vert_rgb)
# util.save_obj(vertices, triangles, saving_path + "model.obj", vert_rgb=vert_rgb)
#
# print("object saved!") | |
from config import TIMITConfig
from argparse import ArgumentParser
from multiprocessing import Pool
import os
from TIMIT.dataset import TIMITDataset
if TIMITConfig.training_type == 'H':
from TIMIT.lightning_model_h import LightningModel
else:
from TIMIT.lightning_model import LightningModel
from sklearn.metrics import mean_absolute_error, mean_squared_error, accuracy_score
import pytorch_lightning as pl
import torch
import torch.utils.data as data
from tqdm import tqdm
import pandas as pd
import numpy as np
if __name__ == "__main__":
parser = ArgumentParser(add_help=True)
parser.add_argument('--data_path', type=str, default=TIMITConfig.data_path)
parser.add_argument('--speaker_csv_path', type=str, default=TIMITConfig.speaker_csv_path)
parser.add_argument('--timit_wav_len', type=int, default=TIMITConfig.timit_wav_len)
parser.add_argument('--batch_size', type=int, default=TIMITConfig.batch_size)
parser.add_argument('--epochs', type=int, default=TIMITConfig.epochs)
parser.add_argument('--alpha', type=float, default=TIMITConfig.alpha)
parser.add_argument('--beta', type=float, default=TIMITConfig.beta)
parser.add_argument('--gamma', type=float, default=TIMITConfig.gamma)
parser.add_argument('--hidden_size', type=float, default=TIMITConfig.hidden_size)
parser.add_argument('--lr', type=float, default=TIMITConfig.lr)
parser.add_argument('--gpu', type=int, default=TIMITConfig.gpu)
parser.add_argument('--n_workers', type=int, default=TIMITConfig.n_workers)
parser.add_argument('--dev', type=str, default=False)
parser.add_argument('--model_checkpoint', type=str, default=TIMITConfig.model_checkpoint)
parser.add_argument('--noise_dataset_path', type=str, default=TIMITConfig.noise_dataset_path)
parser.add_argument('--model_type', type=str, default=TIMITConfig.model_type)
parser.add_argument('--training_type', type=str, default=TIMITConfig.training_type)
parser.add_argument('--data_type', type=str, default=TIMITConfig.data_type)
parser = pl.Trainer.add_argparse_args(parser)
hparams = parser.parse_args()
print(f'Testing Model on NISP Dataset\n#Cores = {hparams.n_workers}\t#GPU = {hparams.gpu}')
# Testing Dataset
test_set = TIMITDataset(
wav_folder = os.path.join(hparams.data_path, 'TEST'),
hparams = hparams,
is_train=False
)
csv_path = hparams.speaker_csv_path
df = pd.read_csv(csv_path)
h_mean = df['height'].mean()
h_std = df['height'].std()
a_mean = df['age'].mean()
a_std = df['age'].std()
#Testing the Model
if hparams.model_checkpoint:
if TIMITConfig.training_type == 'AHG':
model = LightningModel.load_from_checkpoint(hparams.model_checkpoint, HPARAMS=vars(hparams))
model.eval()
height_pred = []
height_true = []
age_pred = []
age_true = []
gender_pred = []
gender_true = []
# i = 0
for batch in tqdm(test_set):
x, y_h, y_a, y_g = batch
y_hat_h, y_hat_a, y_hat_g = model(x)
height_pred.append((y_hat_h*h_std+h_mean).item())
age_pred.append((y_hat_a*a_std+a_mean).item())
gender_pred.append(y_hat_g>0.5)
height_true.append((y_h*h_std+h_mean).item())
age_true.append(( y_a*a_std+a_mean).item())
gender_true.append(y_g)
# if i> 5: break
# i += 1
female_idx = np.where(np.array(gender_true) == 1)[0].reshape(-1).tolist()
male_idx = np.where(np.array(gender_true) == 0)[0].reshape(-1).tolist()
height_true = np.array(height_true)
height_pred = np.array(height_pred)
age_true = np.array(age_true)
age_pred = np.array(age_pred)
hmae = mean_absolute_error(height_true[male_idx], height_pred[male_idx])
hrmse = mean_squared_error(height_true[male_idx], height_pred[male_idx], squared=False)
amae = mean_absolute_error(age_true[male_idx], age_pred[male_idx])
armse = mean_squared_error(age_true[male_idx], age_pred[male_idx], squared=False)
print(hrmse, hmae, armse, amae)
hmae = mean_absolute_error(height_true[female_idx], height_pred[female_idx])
hrmse = mean_squared_error(height_true[female_idx], height_pred[female_idx], squared=False)
amae = mean_absolute_error(age_true[female_idx], age_pred[female_idx])
armse = mean_squared_error(age_true[female_idx], age_pred[female_idx], squared=False)
print(hrmse, hmae, armse, amae)
print(accuracy_score(gender_true, gender_pred))
else:
model = LightningModel.load_from_checkpoint(hparams.model_checkpoint, HPARAMS=vars(hparams))
model.eval()
height_pred = []
height_true = []
gender_true = []
for batch in tqdm(test_set):
x, y_h, y_a, y_g = batch
y_hat_h = model(x)
height_pred.append((y_hat_h*h_std+h_mean).item())
height_true.append((y_h*h_std+h_mean).item())
gender_true.append(y_g)
female_idx = np.where(np.array(gender_true) == 1)[0].reshape(-1).tolist()
male_idx = np.where(np.array(gender_true) == 0)[0].reshape(-1).tolist()
height_true = np.array(height_true)
height_pred = np.array(height_pred)
hmae = mean_absolute_error(height_true[male_idx], height_pred[male_idx])
hrmse = mean_squared_error(height_true[male_idx], height_pred[male_idx], squared=False)
print(hrmse, hmae)
hmae = mean_absolute_error(height_true[female_idx], height_pred[female_idx])
hrmse = mean_squared_error(height_true[female_idx], height_pred[female_idx], squared=False)
print(hrmse, hmae)
else:
print('Model chekpoint not found for Testing !!!') | |
"""
Generate data for the diffusion forward model.
Author:
Panagiotis Tsilifis
Date:
6/12/2014
"""
import numpy as np
import fipy as fp
import os
import matplotlib.pyplot as plt
# Make the source
nx = 101
ny = nx
dx = 1./101
dy = dx
rho = 0.05
q0 = 1. / (np.pi * rho ** 2)
T = 0.3
mesh = fp.Grid2D(dx=dx, dy=dy, nx=nx, ny=ny)
xs_1 = np.array([0.91, 0.23])
#xs_2 = np.array([0.89, 0.75])
time = fp.Variable()
sourceTerm_1 = fp.CellVariable(name = "Source term", mesh=mesh, value = 0.)
#sourceTerm_2 = fp.CellVariable(name = "Source term", mesh=mesh, value = 0.)
for i in range(sourceTerm_1().shape[0]):
sourceTerm_1()[i] = q0 * np.exp( - ((mesh.cellCenters[0]()[i] - xs_1[0]) ** 2
+ (mesh.cellCenters[1]()[i] - xs_1[1]) ** 2 ) / (2 * rho **2)) * (time() < T)
#sourceTerm_2()[i] = q0 * np.exp( - ((mesh.cellCenters[0]()[i] - xs_2[0]) ** 2
# + (mesh.cellCenters[1]()[i] - xs_2[1]) ** 2 ) / (2 * rho **2)) * (time() < T)
# The equation
eq = fp.TransientTerm() == fp.DiffusionTerm(coeff=1.) + sourceTerm_1# + sourceTerm_2
# The solution variable
phi = fp.CellVariable(name = "Concentration", mesh=mesh, value=0.)
#if __name__ == '__main__':
# viewer = fp.Viewer(vars=phi, datamin=0., datamax=3.)
# viewer.plot()
x = np.arange(0,101.)/101
y = x
data = []
dt = 0.005
steps = 60
for step in range(steps):
time.setValue(time() + dt)
eq.solve(var=phi, dt=dt)
# if __name__ == '__main__':
# viewer.plot()
if step == 14 or step == 29 or step == 44 or step == 59:
dc = phi()[50]
#dr = phi()[109]
uc = phi()[10150]
#ur = phi()[12099]
#data = np.hstack([data, np.array([dl, dr, ul, ur])])
data = np.hstack([data, np.array([dc, uc])])
fig = plt.figure()
plt.contourf(x, y, phi().reshape(101,101),200)
plt.colorbar()
# fig.suptitle('Concentration at t = ' + str(time()))
plt.xlabel('x')
plt.ylabel('y')
#png_file = os.path.join('figures', 'concentration'+'t.png')
#plt.savefig(png_file)
plt.show()
#if __name__ == '__main__':
# raw_input("Transient diffusion with source term. Press <return> to proceed")
#np.save('data_concentrations_upperlowercenters.npy',data) | |
"""
setup.py file for SWIG example
"""
from distutils.core import setup, Extension
import numpy
polyiou_module = Extension('_polyiou',
sources=['polyiou_wrap.cxx', 'polyiou.cpp'],
)
setup(name = 'polyiou',
version = '0.1',
author = "SWIG Docs",
description = """Simple swig example from docs""",
ext_modules = [polyiou_module],
py_modules = ["polyiou"],
) | |
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from mars.lib.sparse.core import issparse
import mars.lib.sparse as mls
from mars.tests.core import TestBase
DEBUG = True
# cs: coo solution
# ds: dense solution (converted from coo solution using instance method toarray())
# da: dense answer (how to obtain: 1. convert operand from coo to dense using toarray() ; 2. operate using numpy library
class Test(TestBase):
def setUp(self):
self.c1 = mls.COONDArray(indices=np.asarray([(9, 5, 4), (7, 6, 5), (2, 2, 4), (7, 9, 4), (2, 2, 6)]),
data=np.asarray([3, 4, 5, 3, 1]),
shape=np.asarray([10, 11, 12])
)
self.c2 = mls.COONDArray(indices=np.asarray([(9, 5, 4), (2, 2, 4), (7, 9, 4), (2, 2, 6), (8, 4, 9)]),
data=np.asarray([3, 5, 3, 1, 2]),
shape=np.asarray([10, 11, 12])
)
self.c3 = mls.COONDArray(indices=np.asarray([tuple(i - 1 for i in list(ind)) for ind in self.c1.indices]),
data=np.asarray(self.c1.data),
shape=np.asarray(self.c1.shape)
)
# self.s1 = sps.coo_matrix()
# create dense numpy arrays with a similar shape and all zero values
self.d1 = np.zeros(shape=self.c1.shape)
self.d2 = np.zeros(shape=self.c2.shape)
self.d3 = np.zeros(shape=self.c3.shape)
# assign nnz val to the dense numpy array of each instance.
# d stands for dense
for i in range(len(self.c1.indices)):
self.d1[tuple(self.c1.indices[i])] = self.c1.data[i]
for i in range(len(self.c2.indices)):
self.d2[tuple(self.c2.indices[i])] = self.c2.data[i]
for i in range(len(self.c3.indices)):
self.d3[tuple(self.c3.indices[i])] = self.c3.data[i]
def testCooCreation(self):
# self.assert(mls.issparse(self.c1))
# type assertion only. REQUIRE: parameter assertion as well
s = mls.COONDArray(self.c1)
assert (isinstance(s, mls.COONDArray))
assert (isinstance(s, mls.SparseNDArray))
assert (mls.issparse(s))
assert (s.issparse())
# assert(mls.issparse(self.c2))
# update to new numpy ndarray
def testToArray(self):
# if issparse(a):
# a = a.toarray()
# # hand-tune <b> && compare <b> with <a>
# else:
# raise ValueError("input array is not sparse")
nparr1 = self.c1.toarray()
nparr2 = self.c2.toarray()
np.testing.assert_allclose(nparr1, self.d1)
np.testing.assert_allclose(nparr2, self.d2)
def assertArrayEqual(self, a, b):
if issparse(a):
a = a.toarray()
else:
a = np.asarray(a)
if issparse(b):
b = b.toarray()
else:
b = np.asarray(b)
try:
return np.testing.assert_equal(a, b)
except AssertionError:
return False
def testCooAddition(self):
# CASE0: SPARSE + SPARSE
# cs: coo sum
# cs = self.c1.__add__(self.c2)
cs = self.c1 + self.c2
# ds: dense sum; coo sum.todense()
ds = cs.toarray()
# da: dense answer
da = self.d1 + self.d2
np.testing.assert_allclose(ds, da)
# dense_result = self.d1 + self.d2
# CASE1: SPARSE + DENSE
ds = self.c1 + self.d2
# dense answer
da = self.d1 + self.d2
np.testing.assert_allclose(ds, da)
const_val = 3
# CASE2: SPARSE + CONSTANT, increment_all = False
# const_val = 3
# cs = self.c1 + const_val
# ds = cs.toarray()
# # dense answer
# da = np.zeros(shape=self.c1.shape)
# for i, v in zip(self.c1.indices, self.c1.values):
# da[i] = v + const_val
# np.testing.assert_allclose(ds, da)
# CASE3: SPARSE + CONSTANT, increment_all = True
# da = self.d1 + const_val * np.ones(shape=self.c1.shape)
cs = self.c1 + const_val
# ds = cs.toarray()
# NOTE that output type is changed to numpy ndarray from COONDArray given the nature of increment_all.
# WILL improve usage of memory by adding a new attribute, offset
ds = cs
da = np.ones(shape=self.c1.shape) * const_val
for i, v in zip(self.c1.indices, self.c1.data):
da[i] += v
np.testing.assert_allclose(ds, da)
# CASE4: TypeError <- SPARSE + INCORRECT INPUT
with self.assertRaises(TypeError):
cs = self.c1 + [1, 2, 3]
# self.assertEqual(cs, None)
# equivalent to:
# self.assertRaises(TypeError, mls.COONDArray.__add__, self.c1, [1, 2, 3])
# see testCooAddition for references of variable naming
def testCooSubtraction(self):
# CASE0: SPARSE <- SPARSE - SPARSE
cd = self.c1 - self.c2
dd = cd.toarray()
da = self.d1 - self.d2
np.testing.assert_allclose(dd, da)
# CASE1: DENSE <- SPARSE - DENSE
# dense difference
dd = self.c1 - self.d2
# dense answer
da = self.d1 - self.d2
np.testing.assert_allclose(dd, da)
const_val = 3
# CASE2: DENSE <- SPARSE + CONSANT, increment_all = True
cd = self.c1.__sub__(other=const_val)
ds = cd
da = np.ones(shape=self.c1.shape) * const_val * -1
for i, v in zip(self.c1.indices, self.c1.data):
da[i] += v
np.testing.assert_allclose(ds, da)
# CASE4: TypeError <- SPARSE + INCORRECT INPUT
with self.assertRaises(TypeError):
_ = self.c1 - [1, 2, 3] # noqa: F841
def testCooCopy(self):
# coo 1 copy
c1c = self.c1.copy()
# dense 1 copy
d1c = c1c.toarray()
np.testing.assert_allclose(d1c, self.d1)
def testCooTranspose(self):
# ct: coo transpose.
# ('ct' denotes what is transposed in the coo form.)
# dt: dense transpose.
# ('dt' denotes what is transposed in the dense form. )
# da: dense answer.
# ('da' denotes the correct answer for the transpose operation)
# CASE: Axes is None
ct = self.c1.transpose()
dt = ct.toarray()
da = self.d1.transpose()
np.testing.assert_allclose(dt, da)
# CASE: Axes is a tuple of length two
ct = self.c1.transpose((0, 2))
dt = ct.toarray()
da = self.c1.toarray()
da = np.transpose(da, (2, 1, 0)) # the order of axis after tranposition.
# INCORRECT: da = self.c1.toarray().transpose((1, 0))
np.testing.assert_allclose(dt, da)
def testCooMul(self):
# CASE: SPARSE * SPARSE
# coo product
cp = self.c1 * self.c2
# dense product
dp = cp.toarray()
# dense answer
da = np.multiply(self.d1, self.d2)
np.testing.assert_allclose(dp, da)
# CASE: SPARSE <- SPARSE * SPARSE, no matching index
cp = self.c1 * self.c3
dp = cp.toarray()
da = np.multiply(self.d1, self.c3.toarray())
np.testing.assert_allclose(dp, da)
# CASE: SPARSE * DENSE
cp = self.c1 * self.d2
dp = cp.toarray()
# dense answer
da = np.multiply(self.d1, self.d2)
np.testing.assert_allclose(dp, da)
# CASE: SPARSE * CONSTANT
multiplier = 3
cp = self.c1 * multiplier
dp = cp.toarray()
da = np.zeros(shape=self.c1.shape)
# print("multiplier: ")
for i, v in zip(self.c1.indices, self.c1.data):
# print(tuple(i))
# print(da[tuple(i)])
da[tuple(i)] = v * multiplier
# print("i: ", i)
# print("v: ", v)
# print(da[i])
# print(dp[np.nonzero(dp)])
# print(da[np.nonzero(da)])
np.testing.assert_allclose(dp, da)
# CASE: SPARSE * CONSTANT, CONSTANT = 0
cp = self.c1 * 0
dp = cp.toarray()
da = np.zeros(self.c1.shape)
np.testing.assert_allclose(dp, da)
# CASE: SPARSE * CONSTANT, CONSTANT = 1
cp = self.c1 * 1
dp = cp.toarray()
da = self.d1
np.testing.assert_allclose(dp, da)
# CASE: Sparse * Incorrect Input
with self.assertRaises(TypeError):
# cp = self.c1 * {'a': 1, 'b': 2, 'c': 3}
cp = self.c1 * [1, 2, 3]
# self.assertRaises(TypeError, mls.COONDArray.__mul__, self.c1, [1, 2, 3])
def testCooDiv(self):
# CASE: SPARSE / SPARSE
# 'ca' denotes the divided in the coo form.
# 'cx' denoted the divisor in the coo form.
# 'cq' denoted the quotient in the coo form
# cq <- ca / cx
# coo a; coo x; dense a; dense x
ca = self.c1
cx = self.c2
da = self.d1
dx = self.d2
cq = ca / cx
# cq = cq.toarray()
with np.errstate(divide='ignore'):
answer = np.true_divide(da, dx)
# answer[np.isnan(answer)] = 0
np.testing.assert_allclose(cq, answer)
# print(cq[np.nonzero(cq==answer)])
# print(answer[np.nonzero(cq == answer)])
# print(answer)
# return
# CASE: SPARSE / SPARSE, no matching index
cq = (self.c1 / self.c3)
answer = self.d1 / self.c3.toarray()
# answer[np.isnan(answer)] = 0
np.testing.assert_allclose(cq, answer)
# CASE: SPARSE / DENSE
ca = self.c1
cx = self.c2
da = self.d1
dx = self.d2
result = ca / dx
# result = result.toarray()
# with np.errstate(divide='ignore'):
with np.errstate(divide='ignore', invalid='ignore'):
answer = np.true_divide(da, dx)
# answer[np.isnan(answer)] = 0
np.testing.assert_allclose(result, answer)
# CASE: SPARSE / CONSTANT:
ca = self.c1
cx = self.c2
da = self.d1
const_val = 3
dx = np.ones(shape=self.d2.shape) * const_val
result = ca / const_val
result = result.toarray()
with np.errstate(divide='ignore'):
answer = np.true_divide(da, dx)
# answer[np.isnan(answer)] = 0
# print(result[np.nonzero(result)])
# print(answer[np.nonzero(answer)])
np.testing.assert_allclose(result, answer)
# CASE: SPARSE / CONSTANT, CONSTANT = 0
with self.assertRaises(TypeError):
result = self.c1 / 0
# CASE: SPARSE / CONSTANT, CONSTANT = 1
result = (self.c1 / 1).toarray()
answer = self.d1
np.testing.assert_allclose(result, answer)
# CASE: SPARSE / INCORRECT TYPE
with self.assertRaises(TypeError):
result = self.c1 / [1, 2, 3]
################################################
# supplement uncovered lines: #
################################################
def testRaw(self):
raw_c1 = self.c1.raw
np.testing.assert_allclose(raw_c1[0], self.c1.indices)
np.testing.assert_allclose(raw_c1[1], self.c1.data)
np.testing.assert_allclose(raw_c1[2], self.c1.shape) | |
"""
author: Junxian Ye
time: 12/22/2016
link: https://github.com/un-knight/coursera-machine-learning-algorithm
"""
import numpy as np
import pandas as pd
import sklearn.svm
import seaborn as sns
from matplotlib import pyplot as plt
from func import tools
def gaussian_kernel(x1, x2, sigma=1.0):
diff = x1 - x2
k = np.exp(-1 * (diff ** 2).sum() / (2 * (sigma ** 2)))
return k
def main():
"""
# test gaussian kernel
x1 = np.array([1, 2, 1])
x2 = np.array([0, 4, -1])
print(gaussian_kernel(x1, x2, 2.0))
"""
data = tools.read_data_from_mat('./data/ex6data2.mat')
# Visulize ex6data2.mat
sns.set(style="white", palette=sns.diverging_palette(240, 10, n=2))
sns.lmplot('X1', 'X2', hue='y', data=data, size=10, fit_reg=False,
scatter_kws={'s': 30})
plt.title('Example Datast 2')
# plt.show()
svc = sklearn.svm.SVC(C=100.0, kernel='rbf', gamma=10, probability=True)
svc.fit(data[['X1', 'X2']], data['y'])
print('svc score: ', svc.score(data[['X1', 'X2']], data['y']))
predict_prob = svc.predict_proba(data[['X1', 'X2']])[:, 0]
fig, ax = plt.subplots(figsize=(8, 6))
ax.scatter(data['X1'], data['X2'], s=30, c=predict_prob, cmap='Reds')
ax.set_title('SVM(Gaussian Kernel) Decision Boundary(Example Dataset 2)')
plt.show()
if __name__ == '__main__':
main() | |
from __future__ import division
import math
import numpy as np
import unittest
from chainer import testing
from chainercv.utils import tile_images
@testing.parameterize(*testing.product({
'fill': [128, (104, 117, 123), np.random.uniform(255, size=(3, 1, 1))],
'pad': [0, 1, 2, 3]
}))
class TestTileImages(unittest.TestCase):
def test_tile_images(self):
B = np.random.randint(10, 20)
n_col = np.random.randint(2, 5)
H = 30
W = 40
imgs = np.random.uniform(255, size=(B, 3, H, W))
tile = tile_images(imgs, n_col, self.pad, fill=self.fill)
n_row = int(math.ceil(B / n_col))
self.assertTrue(n_col >= 1 and n_row >= 1)
start_y_11 = H + self.pad + self.pad // 2
start_x_11 = W + self.pad + self.pad // 2
tile_11 = tile[:,
start_y_11:start_y_11 + H,
start_x_11:start_x_11 + W]
np.testing.assert_equal(tile_11, imgs[(n_col - 1) + 2])
testing.run_module(__name__, __file__) | |
import numpy as np
import time
class mpc_controller():
def __init__(self,
env,
dyn_model,
horizon = 20,
cost_fn = None,
num_simulated_paths = 1000,):
self.env = env
self.dyn_model = dyn_model
self.horizon = horizon
self.cost_fn = cost_fn
self.num_simulated_paths = num_simulated_paths
#self.num_simulated_paths = 1000
self.curract = None
#self.alpha = 0.005 ## step size
self.sigma = 0.5 ## normal dist variance
self.coef_lambda = 1
self.es_gen = 10
self.cost_std = []
def init_mppi(self,state):
#step_ini = 0.1
theta_l = state[0][3]
phi_l = state[0][4]
state = np.repeat(state.reshape([1, -1]), self.num_simulated_paths, axis=0)
action_iter = np.random.uniform(-1.732, 1.732, (self.num_simulated_paths, 2))
delta = self.dyn_model.predict(state, action_iter)
cost = self.cost_fn(state[:, :], action_iter, delta[:, :], theta_l, phi_l, self.horizon)
act = np.argmin(cost)
action = action_iter[act] * 0.0
mean_vector_ini = np.repeat(action,self.horizon)
self.mean_max = np.repeat(np.array([2, 2]), self.horizon)
self.mean_min = np.repeat(np.array([-2, -2]), self.horizon)
self.mean_vec = mean_vector_ini
def get_ac_mppi(self, state):
theta_l = state[0][3]
phi_l = state[0][4]
Normal_dist = self.sigma * np.random.randn(self.num_simulated_paths, 2 * self.horizon)
action = self.mean_vec + Normal_dist
cost = np.zeros([self.num_simulated_paths], dtype=np.float32)
state = np.repeat(state.reshape([1, -1]), self.num_simulated_paths, axis=0)
d_theta_l = state[:,8]
d_phi_l = state[:, 9]
for i in range(self.horizon):
action_iter = action[:, i:i + 2]
# delta is the difference in states per timestamp
delta = self.dyn_model.predict(state, action_iter)
d_theta_l = d_theta_l + delta[:,5]
d_phi_l = d_phi_l + delta[:, 6]
delta[:, 3] = d_theta_l / 200.0
delta[:, 4] = d_phi_l / 200.0
cost = cost + self.cost_fn(state, action_iter, delta, theta_l, phi_l, self.horizon)
dr = delta[:,0] * 200000
state = np.hstack((delta[:,0:5] + state[:,0:5], dr.reshape([-1,1]), delta[:,1:5] * 200.0))
#print(np.std(cost))
print('traj std:{:.10f}'.format(np.std(cost)))
cost = (cost - np.min(cost)) / np.std(cost)
weights = np.exp(- (cost / self.coef_lambda))
weights = weights/np.sum(weights)
mean_diff = np.sum(np.multiply(weights,Normal_dist.T), axis = 1)
self.mean_vec = self.mean_vec + mean_diff
self.mean_vec = np.minimum(self.mean_vec, self.mean_max)
self.mean_vec = np.maximum(self.mean_vec, self.mean_min)
action = self.mean_vec[0:2] * 0.5773
return action#, min_cost | |
# -*- coding: utf-8 -*-
# Author: Jiajun Ren <jiajunren0522@gmail.com>
import os
import numpy as np
import pytest
from renormalizer.spectra import SpectraOneWayPropZeroT, SpectraTwoWayPropZeroT, SpectraExact
from renormalizer.spectra.tests import cur_dir
from renormalizer.tests import parameter
from renormalizer.utils import Quantity
@pytest.mark.parametrize(
"algorithm, mol_list, std_fname, rtol",
(
# [1, parameter.hybrid_mol_list,"hybrid_ZTabs.npy",1e-2],
# [1, parameter.mol_list,"ZeroTabs_2svd.npy",1e-2],
[2, parameter.hybrid_mol_list, "hybrid_ZTabs.npy", 1e-3],
[2, parameter.mol_list, "ZeroTabs_2svd.npy", 1e-2],
),
)
def test_hybrid_abs(algorithm, mol_list, std_fname, rtol):
np.random.seed(0)
# print "data", value
if algorithm == 1:
SpectraZeroT = SpectraOneWayPropZeroT
else:
SpectraZeroT = SpectraTwoWayPropZeroT
zero_t_corr = SpectraZeroT(mol_list, "abs", offset=Quantity(2.28614053, "ev"))
zero_t_corr.info_interval = 30
nsteps = 100
dt = 30.0
zero_t_corr.evolve(dt, nsteps)
with open(os.path.join(cur_dir, std_fname), "rb") as f:
std = np.load(f)
assert np.allclose(zero_t_corr.autocorr[:nsteps], std[:nsteps], rtol=rtol)
# from matplotlib import pyplot as plt
# plt.plot(zero_t_corr.autocorr)
# plt.plot(std)
# plt.show()
@pytest.mark.parametrize(
"algorithm, mol_list, std_fname, rtol",
(
[1, parameter.hybrid_mol_list, "hybrid_ZTemi_prop.npy", 1e-3],
[1, parameter.mol_list, "ZeroExactEmi.npy", 1e-2],
[2, parameter.hybrid_mol_list, "hybrid_ZTemi_prop.npy", 1e-3],
[2, parameter.mol_list, "ZeroExactEmi.npy", 1e-2],
),
)
def test_hybrid_emi(algorithm, mol_list, std_fname, rtol):
np.random.seed(0)
# print "data", value
if algorithm == 1:
SpectraZeroT = SpectraOneWayPropZeroT
else:
SpectraZeroT = SpectraTwoWayPropZeroT
zero_t_corr = SpectraZeroT(mol_list, "emi")
zero_t_corr.info_interval = 100
nsteps = 1000
dt = 30.0
zero_t_corr.evolve(dt, nsteps)
with open(os.path.join(cur_dir, std_fname), "rb") as f:
std = np.load(f)
assert np.allclose(zero_t_corr.autocorr[:nsteps], std[:nsteps], rtol=rtol)
def test_Exact_Spectra_hybrid_TDDMRG_TDH():
# print "data", value
exact_emi = SpectraExact(parameter.hybrid_mol_list, spectratype="emi")
exact_emi.info_interval = 100
nsteps = 3000
# nsteps = 50
dt = 30.0
exact_emi.evolve(dt, nsteps)
with open(os.path.join(cur_dir, "hybrid_ZTemi_exact.npy"), "rb") as fin:
std = np.load(fin)
assert np.allclose(exact_emi.autocorr[:nsteps], std[:nsteps], rtol=1e-3)
@pytest.mark.parametrize("algorithm", ("pure", "hybrid"))
def test_1mol_Exact_Spectra_hybrid_TDDMRG_TDH(algorithm):
nmols = 1
J = np.zeros([1, 1])
if algorithm == "pure":
mol_list = parameter.custom_mol_list(J, nmols=nmols)
elif algorithm == "hybrid":
mol_list = parameter.custom_mol_list(J, hartrees=[True, False], nmols=nmols)
else:
assert False
E_offset = -mol_list[0].elocalex - mol_list[0].reorganization_energy
exact_abs = SpectraExact(mol_list, spectratype="abs", offset=Quantity(E_offset))
exact_abs.info_interval = 100
nsteps = 1000
dt = 30.0
exact_abs.evolve(dt, nsteps)
with open(os.path.join(cur_dir, "1mol_ZTabs.npy"), "rb") as f:
mol1_ZTabs_std = np.load(f)
assert np.allclose(exact_abs.autocorr[:nsteps], mol1_ZTabs_std[:nsteps], rtol=1e-3) | |
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import math
import torch.nn.functional as F
import pdb
from mmd_comp import MultipleKernelMaximumMeanDiscrepancy, JointMultipleKernelMaximumMeanDiscrepancy
from kernels import GaussianKernel
def Entropy(input_):
bs = input_.size(0)
epsilon = 1e-5
entropy = -input_ * torch.log(input_ + epsilon)
entropy = torch.sum(entropy, dim=1)
return entropy
def virtual_loss(model,feat,lambd=0.1,T=0.05,k=2,eta=0.05,conf=False):
w_temp = model.fc.weight
#feat = F.normalize(feat)
sum = False
loss = 0
for i in range(k):
model.zero_grad()
w_temp.requires_grad_()
out_t1 = torch.mm(feat.detach(), w_temp.t())
out_t1 = F.softmax(out_t1)
size_t = out_t1.size(0)
loss_d = torch.sum(torch.sum(out_t1 * (torch.log(out_t1 + 1e-5)), 1))/size_t
loss -= loss_d
loss_d.backward(retain_graph=True)
#pdb.set_trace()
w_delta = -w_temp.grad * eta#-F.normalize(w_temp.grad) * torch.norm(w_temp,dim=1).view(w_temp.size(0),1)*eta
w_temp_delta = w_delta #+ F.normalize(w_temp)*torch.sum(torch.mm(w_temp, w_delta.t()),1).view(-1,1)
w_temp = w_temp + w_temp_delta
w_temp = Variable(w_temp)
w_temp.requires_grad_()
out_d = F.softmax(torch.mm(feat, w_temp.t()))
loss = -lambd * torch.sum(torch.sum(out_d * (torch.log(out_d + 1e-5)), 1))/size_t
return loss
def grl_hook(coeff):
def fun1(grad):
return -coeff*grad.clone()
return fun1
def CDAN(input_list, ad_net, entropy=None, coeff=None, random_layer=None):
softmax_output = input_list[1].detach()
feature = input_list[0]
if random_layer is None:
op_out = torch.bmm(softmax_output.unsqueeze(2), feature.unsqueeze(1))
ad_out = ad_net(op_out.view(-1, softmax_output.size(1) * feature.size(1)))
else:
random_out = random_layer.forward([feature, softmax_output])
ad_out = ad_net(random_out.view(-1, random_out.size(1)))
batch_size = softmax_output.size(0) // 2
dc_target = torch.from_numpy(np.array([[1]] * batch_size + [[0]] * batch_size)).float().cuda()
if entropy is not None:
entropy.register_hook(grl_hook(coeff))
entropy = 1.0+torch.exp(-entropy)
source_mask = torch.ones_like(entropy)
source_mask[feature.size(0)//2:] = 0
source_weight = entropy*source_mask
target_mask = torch.ones_like(entropy)
target_mask[0:feature.size(0)//2] = 0
target_weight = entropy*target_mask
weight = source_weight / torch.sum(source_weight).detach().item() + \
target_weight / torch.sum(target_weight).detach().item()
return torch.sum(weight.view(-1, 1) * nn.BCELoss(reduce=False)(ad_out, dc_target)) / torch.sum(weight).detach().item()
else:
return nn.BCELoss()(ad_out, dc_target)
def DANN(features, ad_net):
ad_out = ad_net(features)
batch_size = ad_out.size(0) // 2
dc_target = torch.from_numpy(np.array([[1]] * batch_size + [[0]] * batch_size)).float().cuda()
return nn.BCELoss()(ad_out, dc_target)
def DAN(features_s, features_t):
mkmmd_loss = MultipleKernelMaximumMeanDiscrepancy(
kernels=[GaussianKernel(alpha=2 ** k) for k in range(-3, 2)],
linear=not False, quadratic_program=False
)
return mkmmd_loss(features_s, features_t)
def JAN(features_s, features_t, output_s, output_t):
thetas = None
jmmd_loss = JointMultipleKernelMaximumMeanDiscrepancy(
kernels=(
[GaussianKernel(alpha=2 ** k) for k in range(-3, 2)],
(GaussianKernel(sigma=0.92, track_running_stats=False),)
),
linear=False, thetas=thetas
).cuda()
transfer_loss = jmmd_loss(
(features_s, F.softmax(output_s, dim=1)),
(features_t, F.softmax(output_t, dim=1))
)
return transfer_loss | |
"""Unittests for the functions in svid_location, using the true data from 2020-02-11."""
import unittest
import numpy.testing as npt
import pandas.testing as pt
from itertools import product
import numpy as np
import math
from scipy.special import expit
from gnssmapper.algo.FPL import FourParamLogisticRegression
class TestFPL(unittest.TestCase):
def setUp(self) -> None:
self.fpl = FourParamLogisticRegression()
self.fpl.param = np.array([0.95, 1, 1, 0.05])
self.fpl.batch_size=3
def test_four_param_sigmoid(self) -> None:
self.assertAlmostEqual(self.fpl._four_param_sigmoid(1),0.5)
self.assertAlmostEqual(self.fpl._four_param_sigmoid(1e6),0.95)
self.assertAlmostEqual(self.fpl._four_param_sigmoid(-1e6),0.05)
self.assertAlmostEqual(self.fpl._four_param_sigmoid(2),0.05+0.9/(1+math.exp(-1)))
def test_batch_update(self) -> None:
xhat=np.array((1,1))
ytrue=np.array((0,0))
# y_fac=2
# delta_a = 1
# delta_b = 0
# delta_c = -0.45
# delta_d = 1
a,b,c,d =0.95 -self.fpl.lr[0], 1, 1+0.45*self.fpl.lr[2], 0.05 -self.fpl.lr[3]
self.fpl._batch_update(xhat,ytrue)
self.assertAlmostEqual(self.fpl.param[0],a)
self.assertAlmostEqual(self.fpl.param[1],b)
self.assertAlmostEqual(self.fpl.param[2],c)
self.assertAlmostEqual(self.fpl.param[3],d)
def test_fit_online(self) -> None:
X=np.arange(10)
Y = np.array([1] * 10)
p=self.fpl.fit_online(X,Y)
self.assertEqual(len(p),10)
self.assertEqual(len(p[9]),4)
npt.assert_almost_equal(p[9],self.fpl.param)
updates = max(max([abs(i-j) for i,j in zip(p[n],p[n+1])] for n in range(9) if n not in {2,5,8,9}))
self.assertAlmostEqual(updates,0)
def test_fit_offline(self) -> None:
X=np.arange(0,100)
Y=np.array([1,0]*50)
np.random.shuffle(Y)
self.fpl.fit_offline(X,Y)
theta=self.fpl.param
def neg_log_likelihood(theta, X, y):
m = X.shape[0]
yhat = theta[3] + (theta[0] - theta[3]) *expit(theta[1] * (X - theta[2]) )
return -(1 / m) * np.sum(y*np.log(yhat) + (1 - y)*np.log(1 - yhat))
min_ = neg_log_likelihood(theta,X,Y)
A = np.arange(0.5+1e-3,1 - 1e-3,0.1)
B = np.arange(1e-2,5,1)
C =np.arange(1e-3,20,1)
D =np.arange(0+1e-3,0.5-1e-3,0.1)
likelihoods = [neg_log_likelihood([a,b,c,d],X,Y) for a,b,c,d in product(A,B,C,D) ]
self.assertLessEqual(min_,min(likelihoods))
def test_prob(self) -> None:
X=np.array([np.nan,1])
npt.assert_almost_equal(self.fpl.prob(X),np.array([0,0.5]))
def test_pred(self) -> None:
X=np.array([np.nan,1.001])
npt.assert_almost_equal(self.fpl.predict(X),np.array([0,1]))
if __name__ == '__main__':
unittest.main() | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import json
SUMMARY_LOG_SAVE_PATH = ""
DENSENET_MODEL_PREDICT_RESULT_FILE = ""
RESNET_MODEL_PREDICT_RESULT_FILE = ""
XCEPTION_MODEL_PREDICT_RESULT_FILE = ""
def VariableSummaries(var):
#记录张量
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def feed_dict(train, batch_size=256):
# TODO 实现标签数据的选取
if train:
xs, ys = mnist.train.next_batch(batch_size)
else:
xs, ys = mnist.test.images, mnist.test.labels
return {x: xs, y_: ys}
def main(unused_argv):
#数据参数
net_num = 3
img_num = 50000
class_num = 80
epoch = 10
batch_size = 256
learning_rate = 0.001
# 加载训练数据
# TODO json格式读取 feed_dict函数实现
train_data = np.arange(1000*80*5, dtype=np.float32).reshape(1000, 80, 5) # 5要和网络数一致
train_labels = np.arange(1000, dtype=np.int32).reshape(1000)
eval_data = np.arange(200*80*5, dtype=np.float32).reshape(200, 80, 5) # 5要和网络数一致
eval_labels = np.arange(200, dtype=np.int32).reshape(200)
sess = tf.InteractiveSession()
#输入占位
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, [None, class_num, net_num], name='x-input')
y_ = tf.placeholder(tf.float32, [None, class_num], name='y-input')
'''SE模块'''
#全局池化
global_max_pooling = tf.reduce_max(x, 1, name='global_max_pooling')
VariableSummaries(global_max_pooling)
#第一层全连接
with tf.name_scope('fc1'):
fc1 = tf.layers.dense(inputs=global_max_pooling, units=net_num, activation=tf.nn.relu, name='fc1')
VariableSummaries(fc1)
#第二层全连接
with tf.name_scope('fc2'):
fc2 = tf.layers.dense(inputs=fc1, units=net_num, activation=tf.nn.sigmoid, name='fc2')
VariableSummaries(fc2)
#加权
with tf.name_scope('scale'):
scale = tf.reshape(fc2, [-1, 1, net_num])
scaled_x = x * fc2
VariableSummaries(scaled_x)
with tf.name_scope('cov1'):
sum_x = tf.layers.conv1d(inputs=scaled_x, filters=1, strides=1, kernel_size=1)
# 去除多余维数
y = tf.squeeze(sum_x)
VariableSummaries(y)
#计算交叉熵
with tf.name_scope('cross_entropy'):
diff = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)
with tf.name_scope('total'):
cross_entropy = tf.reduce_mean(diff)
tf.summary.scalar('cross_entropy', cross_entropy)
#训练步骤
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(learning_rate).minimize(
cross_entropy)
#精度评估
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
#tensorboard记录
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(SUMMARY_LOG_SAVE_PATH + '/train', sess.graph)
test_writer = tf.summary.FileWriter(SUMMARY_LOG_SAVE_PATH + '/test')
tf.global_variables_initializer().run()
#开始训练或预测
for i in range(img_num//batch_size*epoch):
if i % 20 == 0: # Record summaries and test-set accuracy
summary, acc = sess.run([merged, accuracy], feed_dict=feed_dict(False))
test_writer.add_summary(summary, i)
print('Accuracy at step %s: %s' % (i, acc))
else: # Record train set summaries, and train
if i % 40 == 39: # Record execution stats
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, _ = sess.run([merged, train_step],
feed_dict=feed_dict(True, batch_size=batch_size),
options=run_options,
run_metadata=run_metadata)
train_writer.add_run_metadata(run_metadata, 'step%03d' % i)
train_writer.add_summary(summary, i)
print('Adding run metadata for', i)
else: # Record a summary
summary, _ = sess.run([merged, train_step], feed_dict=feed_dict(True, batch_size=batch_size))
train_writer.add_summary(summary, i)
train_writer.close()
test_writer.close()
if __name__ == "__main__":
tf.app.run(main=main) | |
import numpy as np
import time
from .gdtwcpp import solve
from .signal import signal
from .utils import process_function
class GDTW:
def __init__(self):
# generic input vars
self.x = None
self.x_a = None
self.x_f = None
self.y = None
self.y_a = None
self.y_f = None
# params and loss, regularizer functionals
self.t = None # t we integrate over
self.lambda_cum = 1
self.lambda_inst = .1
self.Loss = "L2"
self.R_cum = "L2"
self.R_inst = "L2"
self.loss_f = None
# search space size
self.N = None
self.N_default = 300
self.M = None
self.M_max = 300
self.eta = .15
# slope constraints and boundary conditions
self.s_min = 1e-8
self.s_max = 1e8
self.s_beta = 0
self.BC_start_stop = True
# termination conditions
self.max_iters = 10
self.epsilon_abs = 1e-1
self.epsilon_rel = 1e-2
# for inspecting each iteration
self.callback = False
# misc.
self.verbose = 0
self.uid = None
# private
self.vectorized_Loss= False
self.D = None
self.iteration = 0
self.time_solve = None
self.f_tau_ = None
def allocate(self):
# graph
self.Tau = np.zeros((self.N,self.M),dtype=np.double)
self.u = None
self.l = None
self.u_orig = None
self.l_orig = None
# solution
self.tau = np.zeros( self.N, dtype=np.double) # phi is continuous
self.path = np.zeros( self.N, dtype=np.int) # path is discrete
self.f_tau = np.double(0.0)
self.phi = lambda t_: np.interp(t_, self.t, self.tau)
return self
def compute_taus(self):
# initial u and ls
if self.iteration == 0:
self.u = np.min([self.s_beta + self.s_max*self.t, self.s_beta + 1-self.s_min*(1-self.t) ],axis=0).astype(np.double)
self.l = np.max([ self.s_min*self.t, -self.s_beta + 1-self.s_max*(1-self.t) ],axis=0).astype(np.double)
# restrict domain of phi to domain of t, since x and y may not be defined outside [t_min, t_max]
self.u = np.min([self.u,np.repeat(1,self.N)],axis=0)
self.l = np.max([self.l,np.repeat(0,self.N)],axis=0)
self.u_orig = self.u.copy()
self.l_orig = self.l.copy()
# update u and l by factor eta, keep within bounds of original l and u
else:
tau_range = self.eta * (self.u-self.l)/np.double(2.)
self.u = np.min([self.tau+tau_range,self.u_orig],axis=0)
self.l = np.max([self.tau-tau_range,self.l_orig],axis=0)
# compute taus for given an u and l
a = np.stack((self.l,self.u-self.l),axis=1)
b = np.vstack((np.ones(self.M),np.arange(self.M).astype(np.double)/np.double(self.M-1)))
self.Tau = np.dot(a,b)
# sanity check: this should decrease as u[i] shrinks towards phi[i]
# print( self.u[int(self.N/2)] - self.l[int(self.N/2)] )
return self
def compute_dist_matrix(self):
# The pre-computed distance matrix must satisfy: D[i,j] = Loss( x(Tau[i,j]) - y(t[i]) )
# Note: scipy.spatial.distance.cdist won't work since t is a vector and tau is a matrix.
if self.verbose > 0: time_start = time.time()
# We'll compute x(tau) and assign infinitity at undefined points.
X = self.x_f(self.Tau)
X[np.isnan(X)] = np.inf
# We repeat y(t) so that it's the same shape of x(tau).
Y = np.tile(self.y_f(self.t).reshape((self.N,1)),(1,self.M))
# We apply the processed loss function.
self.D = (X-Y)**2
# Finally, we'll report the time it took to do all of this.
if self.verbose > 0: print(f"Pre-computed loss: {time.time() - time_start :03.4f} sec")
return self
# --------------------------------------------------------------------------------------------
# Solver
def run(self):
self.check_params()
self.allocate()
self.offer_suggestions()
self.iterate()
return self
def solve(self):
time_start = time.time()
i = solve(
self.t,
self.Tau,
self.D,
self.R_cum,
self.R_inst,
np.double(self.lambda_cum),
np.double(self.lambda_inst),
np.double(self.s_min),
np.double(self.s_max),
self.BC_start_stop,
self.verbose,
self.tau,
self.path,
self.f_tau
)
if i == -1: raise ValueError("C++ code failed.")
self.time_solve = time.time() - time_start
return self
def iterate(self):
for self.iteration in np.arange(self.max_iters).astype(np.int):
# compute graph and solve
self.compute_taus()
self.compute_dist_matrix()
self.solve()
# optional methods
if self.verbose > 1: self.print_iteration()
if self.callback: self.callback(self)
# early termination
if self.iteration > 0 and self.f_tau_ != np.inf:
if np.abs(self.f_tau-self.f_tau_) <= self.epsilon_abs+self.epsilon_rel*np.abs(self.f_tau_):
if self.verbose > 2: print("Stopping criterion met.")
break
self.f_tau_ = self.f_tau.copy()
return self
def print_iteration(self):
if self.iteration==0: print(f'\titeration{" "*4}solver{" "*4}f(phi)')
print(f'\t{self.iteration:3}/{self.max_iters:3}{" "*6}{self.time_solve:03.4f}{" "*3}{self.f_tau:8.6f}')
return self
def serialize(self):
result = {
"t" : self.t,
"tau" : self.tau,
"phi" : self.phi,
"y" : self.y_a,
"x" : self.x_a,
"x_hat" : self.x_f(self.tau),
"f_tau" : self.f_tau.copy(),
"params" : self.param_list
}
return result
# --------------------------------------------------------------------------------------------
# Helper methods for checking inputs
def set_params(self, params={}):
self.param_list = {k:v for k,v in params.items() if k not in ["x", "y"]}
self.__dict__.update(params)
return self
def check_params(self):
# If time is given as a sequence,
if isinstance(self.t, np.ndarray) or isinstance(self.t, list):
# we'll ensure it's a numpy array
self.t = np.array(self.t, dtype=np.double)
# and then check if it's multidimensional.
if self.t.ndim > 1:
# If so we'll throw an error.
raise ValueError(f"Time is multi-dimensional; we can only accept a 1-D sequence.")
# If we're not given an N, we'll use the length of time as our N
if not isinstance(self.N, int): # works for both int and np.int
self.N = self.t.shape[0]
# and alert the user.
if self.verbose > 1:
print(f"Setting N={self.N} == len(t).")
# Otherwise, we'll check to see if our given N and t agree.
else:
# If they agree, that's great,
if self.N == self.t.shape[0]:
pass
# otherwise, we'll need to choose one.
else:
# If t is irregularly sampled (10 decimal place precision),
if np.unique(np.around(np.diff(self.t),10)).shape[0] > 1:
# we'll want to use that t to integrate over,
self.N = self.t.shape[0]
# and alert the user of this choice.
if self.verbose > 1:
print(f"Over-riding your choice of N: N = {self.N} == len(t). Since t is irregularly sampled, we'll want to integrate over that vector.")
# If N is bigger than t,
elif self.N > self.t.shape[0]:
# we'll use the smaller value,
self.N = self.t.shape[0]
# and alert the user of this choice.
if self.verbose > 1:
print(f"Over-riding your choice of N: N = {self.N} == len(t). Since N is greater than the length of t.")
# Otherwise,
else:
# we'll default to the value given by N and rebuild t,
self.t = np.linspace(0, 1, num=self.N, dtype=np.double)
# and alert the user of this choice.
if self.verbose > 1:
print(f"You've set both t and N, but they don't agree: i.e. len(t) > N. We're keeping your choice of N = {self.N}.")
# Otherwise, if N is given and t is not, we'll construct a sequence based on N.
elif isinstance(self.N, int):
self.t = np.linspace(0, 1, num=self.N, dtype=np.double)
# It could be the case that both t and N are not given,
else:
# which will be indicated here,
if self.t is None and self.N is None:
if isinstance(self.x, np.ndarray) or isinstance(self.x, list):
self.N = np.array(self.x).shape[0]
elif isinstance(self.y, np.ndarray) or isinstance(self.y, list):
self.N = np.array(self.y).shape[0]
else:
# We'll use the default value of N
self.N = self.N_default
self.t = np.linspace(0, 1, num=self.N, dtype=np.double)
# and alert the user.
if self.verbose > 1:
print(f"Setting N={self.N} since neither vector t or integer N is set.")
# If we end up here, then the only explanation is that t is incorrect.
else:
raise ValueError(f"Time t is set incorrectly. It must be a 1-D sequence.")
# If M is not given or is unreasonably large,
if not isinstance(self.M, int) or self.M >= self.N:
# we'll set it to the smaller of either the default size or a little over half of N,
self.M = np.min((self.N*.55, self.M_max)).astype(np.int)
# and alert the user of this choice.
if self.verbose > 1:
print(f"Setting M={self.M}")
# We'll also ensure M is odd so that there's a center point (aka. j_center in our C++ code).
self.M = self.M if self.M % 2 == 1 else self.M + 1
# We'll alert the user of these final parameters.
if self.verbose > 0:
print(f"M={self.M}, N={self.N}")
# Signals are given as generic inputs x and y. We'll parse these here.
self.x_a, self.x_f = signal(self.x, "x", self.N)
self.y_a, self.y_f = signal(self.y, "y", self.N)
# Finally, we'll process our loss function.
self.loss_f = process_function(self.Loss)
return self
def offer_suggestions(self):
if self.verbose > 0:
M_suggested = np.min((self.N*.55, self.M_max))
if self.M < M_suggested:
print(f"Suggestion: M is too small. Increasing M from {self.M} to ~{M_suggested} may offer better results.")
if self.M > (self.N):
print(f"Suggestion: M is too big. Decreasing M from {self.M} to ~{M_suggested} will be faster.")
if (self.s_beta != 0 and (not callable(self.x) or not callable(self.y))):
print(f"Suggestion:\n x(t) and y(t) are defined over time domain [{0},{1}], but since you've set beta={self.s_beta}, this method will search over tau with a range of [{0-self.s_beta},{1+self.s_beta}].\n The problem is that you've provided an array instead of a function for x(t) or y(t).\n This method doesn't perform prediction, and so it won't impute values for x(tau) or y(tau) where tau < min(t) or tau > max(t).\n Please make sure you use a function to define x and y instead of an array, or you'll have some spurrious results for tau outside the range of t.")
range_x = [np.round(np.nanmin(self.x_f(self.t)),1), np.round(np.nanmax(self.x_f(self.t)),1)]
range_y = [np.round(np.nanmin(self.y_f(self.t)),1), np.round(np.nanmax(self.y_f(self.t)),1)]
if (range_x[0] != -1.0 or range_x[1] != 1.0 or range_y[0] != -1.0 or range_y[1] != 1.0):
print(f"Suggestion: x(t) and y(t) do not have a range [-1,1] (they have range(x)={range_x} and range(y)={range_y}.")
print(f"You may want to set scale_signals=True so the Loss function doesn't dominate the regularizers in the objective function.")
return self | |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 14 14:34:24 2022
@author: Manuel Huber
"""
import os.path
import multiprocessing
from multiprocessing import Process, Manager
import ee
import geemap
import numpy as np
Map = geemap.Map()
import matplotlib.pyplot as plt
from colour import Color
#from osgeo import gdal
import pandas as pd
import time
import os, glob
import progressbar
from osgeo import gdal
#########################################################################
def get_geotiff_gee(dataset,world,name, path, scale_x, name_save, tile_size,number_cover_type):
sel_name = 'wld_rgn' #country_na'
conti = world.filter(ee.Filter.eq(sel_name, name)) # Select the right continent boundaries of the input name
sel_name = 'country_na'
features_country = np.unique(conti.aggregate_array(sel_name).getInfo()) # All countries in the selected continents/area
bar = progressbar.ProgressBar(maxval=len(features_country), \
widgets=[progressbar.Bar('=', '[', ']'), ' ', '{}'.format(name), progressbar.Percentage()])
bar.start()
# Looping through all countries individually as there are limitations on the "coveringGrid" function, which needs to put into a list:
for j in range(len(features_country)):
bar.update(j+1)
geometry = world.filter(ee.Filter.eq(sel_name, features_country[j]))
ROI = geometry.geometry()
data_pro = dataset.projection()
features = ROI.coveringGrid(data_pro,tile_size) #Set the size of the tiling which will depend on the inital resolution set!
geometries_new = features.toList(5000)
for k in range(len(geometries_new.getInfo())):
roi =ee.Feature(geometries_new.getInfo()[k]).geometry()
##########!!!!!!!!!!!!!!! Depending on dataset!!!!!!!!!!!!!!!!!!!!############
# Here the right feaure or layer is selected from the input dataset
data = dataset.updateMask(dataset.eq(number_cover_type)).clip(roi)
##########!!!!!!!!!!!!!!! Depending on dataset!!!!!!!!!!!!!!!!!!!!############
data_pro = data.projection(); # Select projection of the image
# Force the next reprojection to aggregate instead of resampling.
new_area_count = data.reduceResolution(**{'reducer': ee.Reducer.count(),'bestEffort': True, 'maxPixels':65536}).reproject(data_pro,None, scale_x)
new_area_count_all = data.unmask().reduceResolution(**{'reducer': ee.Reducer.count(),'bestEffort': True, 'maxPixels':65536}).reproject(data_pro, None ,scale_x)
scaled_pixels =new_area_count.divide(new_area_count_all.divide(100)) # ((Sum of selected pixels)/Total_Count_Pixels)*100 To get percent
rio_pixels = scaled_pixels.clip(roi)
#Possibility to mask certain vaules etc.:
#imgUnmasked = rio_pixels.gt(0) #.select('b1')
#umasked_data = rio_pixels.updateMask(imgUnmasked)
if os.path.exists('{}/Image_Exported_{}_{}_{}_{}.tif'.format(path,scale_x,name_save,j,k)) == False:
geemap.ee_export_image(rio_pixels , filename='{}/Image_Exported_{}_{}_{}_{}.tif'.format(path,scale_x,name_save,j,k), scale= scale_x, region = ROI)
#print(name_save, features_country[j], k)
#else:
# print('This file already exists: ',name_save,k,features_country[j])
if os.path.exists('{}/Image_Exported_{}_{}_{}_{}.tif'.format(path,scale_x,name_save,j,k)) == False:
file_object = open('{}Missing_Files.txt'.format(path), 'a')
file_object.write('{}, {}, {}, '.format(name_save, features_country[j], k))
file_object.write("\n")
# Close the file
file_object.close()
print(name_save, features_country[j], k, 'Is still missing - Download process failed - Will be downloaded in smaller patches')
# Backup download in case there is downloading issue with the set tilesize
if os.path.exists('{}/Image_Exported_{}_{}_{}_{}.tif'.format(path,scale_x,name_save,j,k)) == False:
features_2 = roi.coveringGrid(data_pro, 200000)
geometries_new_2 = features_2.toList(5000)#.map(func_yhi)
for p in range(len(geometries_new_2.getInfo())):
roi_2 =ee.Feature(geometries_new_2.getInfo()[p]).geometry()
rio_pixels_2 = rio_pixels.clip(roi_2)
geemap.ee_export_image(rio_pixels_2 , filename='{}/Image_Exported_Failed_Down_{}_{}_{}_{}_{}.tif'.format(path,scale_x,name_save,j,k,p), scale= scale_x, region = roi_2)
bar.finish()
##################### Start the first the mining process in Google Earth Engine ##############################
"""
10 006400 Trees
20 ffbb22 Shrubland
30 ffff4c Grassland
40 f096ff Cropland
50 fa0000 Built-up
60 b4b4b4 Barren / sparse vegetation
70 f0f0f0 Snow and ice
80 0064c8 Open water
90 0096a0 Herbaceous wetland
95 00cf75 Mangroves
100 fae6a0 Moss and lichen
https://developers.google.com/earth-engine/datasets/catalog/ESA_WorldCover_v100#bands
"""
if __name__ == "__main__":
##### Input - user depndend ##########################
name = 'ESA_WorldCover_Trees' # Select name at which data will be sotred with
dataset = ee.ImageCollection("ESA/WorldCover/v100").first()
number_cover_type = 10
path_save = '/data/River_Density/New_River_Composition_Different_Res/'
folder_name = 'Test_Folder'
if os.path.exists('{}{}'.format(path_save,folder_name)) == False:
os.mkdir('{}{}'.format(path_save,folder_name))
path ='{}{}/'.format(path_save,folder_name)
scale_x= 25000 #In m ==> 25km
tile_size = 500000
number_of_processors = 4
######################################################
world = ee.FeatureCollection("USDOS/LSIB_SIMPLE/2017") # Feature collection which gives boundaries for countries and continents
sel_name = 'wld_rgn' # if interested for countries select 'country_na'
europe = world# Here is also option to select individual countries or continents, e.g. filter(ee.Filter.eq('wld_rgn', 'Europe'))
features_cont = np.array(['North America','Africa' , 'Australia', 'Caribbean' ,'Central America',
'Central Asia' ,'E Asia', 'Europe' ,'Indian Ocean', 'N Asia' ,
'Oceania', 'S Asia', 'S Atlantic' ,'SE Asia', 'SW Asia', 'South America'])
# To avoid spaces an addtional list of names has been created:
features_cont_name = np.array(['North_America','Africa' , 'Australia', 'Caribbean' ,'Central_America',
'Central_Asia' ,'E_Asia', 'Europe' ,'Indian_Ocean', 'N_Asia' ,
'Oceania', 'S_Asia', 'S_Atlantic' ,'SE_Asia', 'SW_Asia', 'South_America'])
# Creating a list to split the processes to the provided cores (this case 5 processes in parallel)
x = np.arange(len(features_cont))
split = np.array_split(x, number_of_processors) # Here the number of processors can be selected
print(split, len(split))
for s in range(len(split)):
#for s in range(1):
print('Split', s+1, 'out of ', len(split))
area_sel = features_cont[split[s]]
area_sel_name = features_cont_name[split[s]]
manager = multiprocessing.Manager()
print('entering the processing')
df_all = manager.list()
processes = []
for j in range(len(area_sel)):
name_save = area_sel_name[j]
name_inp = area_sel[j]
print(name_inp, 'is in the making')
p = Process(target=get_geotiff_gee, args=(dataset,world,name_inp, path, scale_x, name_save,tile_size,number_cover_type,)) # Passing the list
p.start()
processes.append(p)
for p in processes:
p.join()
print('Finished first part. Now its time to look for the date line issue.')
####################### Downloading the areas along the date line separately to aviod feature cross over at -180,180!
geometry_miss_1 = ee.Geometry.Polygon(
[[[158.84159346653087, 73.96789885519699],
[158.84159346653087, 52.15339248067615],
[179.84745284153087, 52.15339248067615],
[179.84745284153087, 73.96789885519699]]])
geometry_miss_2 = ee.Geometry.Polygon(
[[[-165.56270340846913, 73.72336873420824],
[-165.56270340846913, 44.519635837378665],
[-139.01973465846913, 44.519635837378665],
[-139.01973465846913, 73.72336873420824]]])
geometry_miss_all = [geometry_miss_1, geometry_miss_2]
data_pro = dataset.projection()
for i in range(len(geometry_miss_all)):
ROI = ee.Feature(geometry_miss_all[i]).geometry()
features = ROI.coveringGrid(data_pro, 1000000)
geometries_new = features.toList(5000)#.map(func_yhi)
list_images = []
for k in range(len(geometries_new.getInfo())):
roi =ee.Feature(geometries_new.getInfo()[k]).geometry()
##########!!!!!!!!!!!!!!! Depending on dataset!!!!!!!!!!!!!!!!!!!!############
data = dataset.updateMask(dataset.eq(number_cover_type)).clip(roi)
##########!!!!!!!!!!!!!!! Depending on dataset!!!!!!!!!!!!!!!!!!!!############
data_pro = data.projection(); # Select projection of the image
# Force the next reprojection to aggregate instead of resampling.
new_area_count = data.reduceResolution(**{'reducer': ee.Reducer.count(),'bestEffort': True, 'maxPixels':65536}).reproject(data_pro,None, scale_x)
new_area_count_all = data.unmask().reduceResolution(**{'reducer': ee.Reducer.count(),'bestEffort': True, 'maxPixels':65536}).reproject(data_pro, None ,scale_x)
scaled_pixels =new_area_count.divide(new_area_count_all.divide(100)) # ((Sum of selected pixels)/Total_Count_Pixels)*100 To get percent
rio_pixels = scaled_pixels.clip(roi)
if os.path.exists('{}Image_Date_Line_Missing_{}_{}_{}_{}.tif'.format(path,scale_x,i,k,len(geometries_new.getInfo()))) == False:
geemap.ee_export_image(rio_pixels, filename='{}Image_Date_Line_Missing_{}_{}_{}_{}.tif'.format(path,scale_x,i,k,len(geometries_new.getInfo()) ), scale= scale_x, region = roi)
print('All data is downloaded, its time to start creating some maps.')
######################### Merging and Reprojecting the data ###########################
folder_name_2 = 'Reprojected_Files'
if os.path.exists('{}{}'.format(path,folder_name_2)) == False:
os.mkdir('{}{}'.format(path,folder_name_2))
path_repro ='{}{}/'.format(path,folder_name_2)
folder_name_3 = 'Final_Files'
if os.path.exists('{}{}'.format(path,folder_name_3)) == False:
os.mkdir('{}{}'.format(path,folder_name_3))
path_final ='{}{}/'.format(path,folder_name_3)
files_to_mosaic = glob.glob('{}/*.tif'.format(path))
print(len(files_to_mosaic))
files_string = " ".join(files_to_mosaic)
for i in range(len(files_to_mosaic)):
# Possibility to set projection
command ='gdalwarp {} {}Out_{}.tif -overwrite -t_srs "+proj=longlat +ellps=WGS84"'.format(files_to_mosaic[i], path_repro,i)
print(os.popen(command).read())
files_to_mosaic = np.array(glob.glob('{}*.tif'.format(path_repro)))
long = np.array_split(range(len(files_to_mosaic)), 5) # This needs to be done because gdal has a limit of geotiff files which can be processed at the same time
for f in range(len(long)):
files_ib = files_to_mosaic[long[f].astype(int)]
print(len(files_to_mosaic))
files_string = " ".join(files_ib)
command = "gdal_merge.py -o {}inbetween_{}.tif -of gtiff -n 0 ".format(path_repro,f) + files_string
print(os.popen(command).read())
# Merging the inbetween files together
files_to_mosaic = glob.glob('{}inbetween*.tif'.format(path_repro))
files_string = " ".join(files_to_mosaic)
command = "gdal_merge.py -o {}{}_{}.tif -of gtiff -n 0 ".format(path_final,scale_x,name) + files_string
print(os.popen(command).read())
command = "gdal_translate -scale -of KMLSUPEROVERLAY {}{}_{}.tif {}{}_{}.kmz".format(path_final,scale_x,name,path_final,scale_x,name)
print(os.popen(command).read()) | |
import sys
import os
import time
from json_tricks.np import dump, load
from functools import reduce
import numpy as np
import tensorflow as tf
from sklearn.linear_model import LinearRegression
# from scipy.sparse import hstack, csr_matrix, csr
import pandas as pd
import edward as ed
from edward.models import Normal
if "../modules" not in sys.path:
sys.path.append("../modules")
from preprocess import *
from ScipyOptimizerInterface import ScipyOptimizerInterface
def run_sklearn_optim(optimizer, feed_dict, sess, loss, print_freq = 10):
'''Run sklearn optimizer and keep track of loss.
INPUTS:
optimizer: optimizer op
feed_dict:
sess: tf session
loss: loss op
print_freq: print loss per n iters
OUTPUTS:
dict of info on optimization results'''
global_cnt = 0
def callback(loss):
nonlocal global_cnt
if global_cnt % print_freq == 0:
print(loss)
sys.stdout.flush()
global_cnt += 1
results = optimizer.minimize(sess, feed_dict = feed_dict, fetches = [loss], loss_callback = callback)
return(results)
def make_sparse_tensor(csr_mat):
'''Take a sparse matrix in csr format and makes a tf.SparseTensor'''
coo_mat = csr_mat.tocoo()
inds = np.concatenate([coo_mat.row[:,None], coo_mat.col[:,None]], axis = 1)
vals = tf.to_float(coo_mat.data)
sp_tens = tf.SparseTensor(indices=inds, values=vals, dense_shape=coo_mat.shape)
return(sp_tens)
def update_param_dict(defaults, new_vals):
'''Take a default dict and a dict of values to override and return the updated dict'''
if new_vals is not None:
assert(all(k in defaults.keys() for k in new_vals.keys()))
defaults.update(new_vals)
return(defaults)
def merge_dicts(orig_dict, add_dict):
'''Update a dict with key-value pairs from a new dict'''
new_dict = orig_dict.copy()
new_dict.update(add_dict)
return(new_dict)
def SSMD(pop1, pop2):
'''Calculate SSMD between two samples'''
beta = np.nanmean(pop1) - np.nanmean(pop2)
beta = beta / np.sqrt(np.nanstd(pop1) ** 2 + np.nanstd(pop2) ** 2)
return(beta)
def make_eval_masks(LFC_mats, test_ind_sets, inverse = False):
'''Make boolean array to mask data used for training'''
eval_masks = []
if test_ind_sets is None:
test_ind_sets = [np.array([], dtype = int) for _ in range(len(LFC_mats))]
for LFC_mat, test_inds in zip(LFC_mats, test_ind_sets):
cur_eval_mask = np.ones_like(LFC_mat, dtype=bool)
cur_eval_mask[test_inds] = False
if inverse: #if you want to evaluate on the test set
cur_eval_mask = ~cur_eval_mask
cur_eval_mask[np.isnan(LFC_mat)] = False
eval_masks.append(cur_eval_mask)
return(eval_masks)
def compute_hairpins_per_gene_CL(LFC_mats, sparse_mat, unique_hp_seqs, unique_CLs, unique_genes):
'''Estimate number of measured hairpin LFCs per gene/CL or seed/CL
INPUTS:
LFC_mats: list of hairpin LFC mats
sparse_mat: sparse matrix mapping hairpins to genes/seeds
unique_hp_seqs: ordered list of unique hairpins
unique_CLs: ordered list of unique CLs
unique_genes: ordered list of unique genes
OUTPUTS:
matrix with number used hairpins per gene/CL
'''
hp_ind_map = {name: ind for ind, name in enumerate(unique_hp_seqs)}
CL_ind_map = {name: ind for ind, name in enumerate(unique_CLs)}
n_hps_per_gene = np.zeros((len(unique_genes), len(unique_CLs)))
for LFC_mat in LFC_mats:
cur_hp_set = [hp_ind_map[x] for x in LFC_mat.index.values]
cur_CL_set = [CL_ind_map[x] for x in LFC_mat.columns.values]
cur_hps_per_gene = sparse_mat[cur_hp_set,:].transpose().dot(~np.isnan(LFC_mat))
n_hps_per_gene[:, cur_CL_set] = n_hps_per_gene[:, cur_CL_set] + cur_hps_per_gene
return(n_hps_per_gene)
def map_effects(scores, CL_inds, sparse_mat):
'''Apply hairpin mapping to predicted scores'''
cur_scores = tf.gather(scores, CL_inds)
return(tf.sparse_tensor_dense_matmul(sparse_mat, cur_scores, adjoint_a = False, adjoint_b = True))
#******************* DEFINE DEMETER2 MODEl CLASS *************************#
class demeter:
'''Class implementing a DEMETER2 model'''
def default_reg_params(self):
params = {
'hairpin_l2_lambda': 0,
'hp_unpred_l2_lambda': 0,
'CL_l2_lambda': 0,
'gene_l2_lambda': 0, #L2 penalty on across-CL avg
'rel_gene_l2_lambda': 0, #L2 penalty on deviation from mean
'seed_l2_lambda': 0,
'rel_seed_l2_lambda': 0 #L2 penalty on deviation from mean
}
return(params)
def default_optim_params(self):
params = {'precision': 'double',
'maxiter': 2000,
'print_freq': 50,
'ftol': 1e-7}
return(params)
def __init__(self, LFC_mats, gene_matrix, seed_matrix, gene_sets, data_names = None,
reg_params = None, optim_params = None, test_inds = None, log_file = None):
'''
Create a demeter model instance
INPUTS:
LFC_mats: List of matrices [hairpins x CLs] of observed LFC values, one per batch/dataset
gene_matrix: [n_hairpins, n_genes] gene-target mapping, as a scipy csr sparse matrix.
seed_matrix: [n_hairpins, n_seeds] seed-target mapping, as a scipy csr sparse matrix.
gene_sets: dict with two entries 'pos' and 'neg'. Each are arrays of Gene names specifying positive and negative control gene sets respectively
data_names: dict of names for different entities (genes, CLs, hps)
reg_params: dict of regularization parameters.
Specify optional lambdas [hairpin_l2_lambda, CL_l2_lambda, gene_l2_lambda, seed_l2_lambda, rel_gene_l2_lambda, rel_seed_l2_lambda]
optim_params: dict of optimization parameters
test_inds: list of tuples specifying indices in the LFC data matrices to set aside for testing (set to None if not using xval)
log_file: path of log file
'''
self.min_hairpins_per = 2 #minimum number of hairpins per gene/seed to use for estimation of gene/seed effects
self.min_slope = 0.01 #minimum slope term (prevents them from getting set to 0 during optimization)
reg_params = update_param_dict(self.default_reg_params(), reg_params)
self.reg_params = reg_params
optim_params = update_param_dict(self.default_optim_params(), optim_params)
self.optim_params = optim_params
if data_names is not None:
self.data_names = data_names
self.log_file = log_file
if self.log_file is not None:
self._log_file = open(log_file, 'w')
else:
self._log_file = None
#init containers for storing stats across training iters
self.R2_vals = {'train': [], 'test': [], 'train_ms': [], 'test_ms': []} #store R2 evals in a dict
self.loss_evals = []
self.SSMD = {'train': [], 'test': []}
self.gene_sets = gene_sets
if self.optim_params['precision'] == 'double':
self.float = tf.float64
elif self.optim_params['precision'] == 'single':
self.float = tf.float32
else:
raise('invalid float type')
self.test_inds = test_inds
self.all_CL_names = get_CL_names(LFC_mats)
self.all_CL_batches = get_CL_batches(LFC_mats)
self.all_hp_seqs = get_hp_names(LFC_mats)
self.all_hp_batches = get_hp_batches(LFC_mats)
self.n_CLs = len(data_names['CLs'])
self.n_CL_batches = len(self.all_CL_names)
self.n_hp_batches = len(self.all_hp_seqs)
#BUILD GRAPH
self.g = tf.Graph()
self.sess = tf.Session(graph = self.g)
with self.g.as_default():
self.n_hairpins, self.n_genes = gene_matrix.shape
_, self.n_seeds = seed_matrix.shape
#calculate number of genes and seeds with data for each CL
self.n_used_hairpins_per_gene = compute_hairpins_per_gene_CL(
LFC_mats, gene_matrix, data_names['hps'], data_names['CLs'], data_names['genes'])
self.n_targeted_genes = np.sum(self.n_used_hairpins_per_gene >= self.min_hairpins_per, axis = 0)
self.n_used_hairpins_per_seed = compute_hairpins_per_gene_CL(
LFC_mats, seed_matrix, data_names['hps'], data_names['CLs'], data_names['seeds'])
self.n_targeted_seeds = np.sum(self.n_used_hairpins_per_seed >= self.min_hairpins_per, axis = 0)
#define parameter inits
init_params = {
'gene_score': tf.zeros([self.n_CLs, self.n_genes], self.float),
'seed_score': tf.zeros([self.n_CLs, self.n_seeds], self.float),
'gene_score_avgs': tf.zeros([1, self.n_genes], self.float),
'seed_score_avgs': tf.zeros([1, self.n_seeds], self.float),
'CL_offset': tf.zeros([self.n_CL_batches, 1], self.float),
'CL_slope': tf.ones([self.n_CL_batches, 1], self.float),
'gene_slope': tf.ones([self.n_CLs, 1], self.float),
'CL_noise_vars': tf.ones([self.n_CL_batches, 1], self.float),
'hairpin_offset': tf.zeros([self.n_hp_batches, 1], self.float),
'hairpin_unpred': tf.zeros([self.n_hairpins, 1], self.float),
'guide_Geff': tf.ones([self.n_hairpins, 1], self.float),
'guide_Seff': tf.ones([self.n_hairpins, 1], self.float)
}
self.obs = [tf.placeholder(self.float, dset.shape, name = "obs_" + str(ii)) \
for ii, dset in enumerate(LFC_mats)]
self.eval_mask = [tf.placeholder('bool', dset.shape, name = "eval_mask_" + str(ii)) \
for ii, dset in enumerate(LFC_mats)]
#Define variables
self.gene_score = tf.Variable(init_params['gene_score'], dtype = self.float, name = 'gene_score')
self.seed_score = tf.Variable(init_params['seed_score'], dtype = self.float, name = 'seed_score')
self.gene_score_avgs = tf.Variable(init_params['gene_score_avgs'], dtype = self.float, name = 'gene_score_avgs')
self.seed_score_avgs = tf.Variable(init_params['seed_score_avgs'], dtype = self.float, name = 'seed_score_avgs')
self.CL_offset = tf.Variable(init_params['CL_offset'], dtype = self.float, name = 'CL_offset')
self.CL_slope = tf.Variable(init_params['CL_slope'], dtype = self.float, name = 'CL_slope')
self.gene_slope = tf.Variable(init_params['gene_slope'], dtype = self.float, name = 'gene_slope')
self.hairpin_offset = tf.Variable(init_params['hairpin_offset'], dtype = self.float, name = 'hairpin_offset')
self.hairpin_unpred = tf.Variable(init_params['hairpin_unpred'], dtype = self.float, name = 'hairpin_offset')
self.guide_Geff = tf.Variable(init_params['guide_Geff'], dtype = self.float, name = 'guide_Geff')
self.guide_Seff = tf.Variable(init_params['guide_Seff'], dtype = self.float, name = 'guide_Seff')
self.CL_noise_vars = tf.Variable(init_params['CL_noise_vars'], dtype = self.float, name = 'noise_vars')
self.n_Geffs = self.n_hairpins
self.n_Seffs = self.n_hairpins
#maps from name to index value
self.hp_ind_map = {name: ind for ind, name in enumerate(data_names['hps'])}
self.CL_ind_map = {name: ind for ind, name in enumerate(data_names['CLs'])}
#make list of sparse gene and seed maps for each LFC dataset
gene_maps = [self.make_sparse_submap(gene_matrix, LFC_mat.index.values) for LFC_mat in LFC_mats]
seed_maps = [self.make_sparse_submap(seed_matrix, LFC_mat.index.values) for LFC_mat in LFC_mats]
#op that is the per-CL gene effect scaled by gene-KD slope (used for re-estimating gene slope)
self.ind_gene_effects = tf.multiply(self.gene_score_avgs + self.gene_score, self.gene_slope)
#package a dict of the model params
mod_params = {
'CL_noise_vars': self.CL_noise_vars,
'CL_offset': self.CL_offset,
'CL_slope': self.CL_slope,
'guide_Seff': self.guide_Seff,
'guide_Geff': self.guide_Geff,
'hairpin_offset': self.hairpin_offset,
'hairpin_unpred': self.hairpin_unpred,
'gene_slope': self.gene_slope,
'seed_score_avgs': self.seed_score_avgs,
'seed_score': self.seed_score,
'gene_score_avgs': self.gene_score_avgs,
'gene_score': self.gene_score}
#LOOP OVER DATASETS AND BUILD SUBGRAPH FOR EACH
dataset_nLLs = []
dataset_SS = []
self.shRNA_R2 = []
self.shRNA_nLL = []
self.shRNA_oSS = []
self.pred = []
hp_offset = 0
CL_offset = 0
for ii in range(len(self.obs)):
cur_pred = self.get_dataset_pred(
mod_params,
gene_maps[ii],
seed_maps[ii],
LFC_mats[ii].index.values,
LFC_mats[ii].columns.values,
hp_offset,
CL_offset)
cur_nLL, cur_SS = self.get_dataset_LL(
mod_params,
self.obs[ii],
cur_pred,
self.eval_mask[ii],
CL_offset)
cur_shRNA_R2, cur_shRNA_nLL, cur_shRNA_SS = self.get_shRNA_R2(
mod_params,
self.obs[ii],
cur_pred,
CL_offset)
self.shRNA_R2.append(cur_shRNA_R2)
self.shRNA_nLL.append(cur_shRNA_nLL)
self.shRNA_oSS.append(cur_shRNA_SS)
dataset_nLLs.append(cur_nLL)
dataset_SS.append(cur_SS)
self.pred.append(cur_pred)
hp_offset += LFC_mats[ii].shape[0]
CL_offset += LFC_mats[ii].shape[1]
self.nLL = tf.add_n(dataset_nLLs) #sum negative log-like across datasets
tot_SS = tf.add_n(dataset_SS) #sum squared error
#LOOP OVER DATASETS AND BUILD GENE-AVG SUBGRAPHS
dataset_avg_nLLs = []
hp_offset = 0
CL_offset = 0
for ii in range(len(self.obs)):
cur_pred = self.get_dataset_pred(
mod_params,
gene_maps[ii],
seed_maps[ii],
LFC_mats[ii].index.values,
LFC_mats[ii].columns.values,
hp_offset,
CL_offset,
just_avg_scores = True)
cur_nLL, cur_SS = self.get_dataset_LL(
mod_params,
self.obs[ii],
cur_pred,
self.eval_mask[ii],
CL_offset)
dataset_avg_nLLs.append(cur_nLL)
hp_offset += LFC_mats[ii].shape[0]
CL_offset += LFC_mats[ii].shape[1]
self.avg_effect_loss = tf.add_n(dataset_avg_nLLs)
#calc R2
self.R2 = 1 - self.nLL / tot_SS
#calc regularization penalty
self.CL_l2_lambda = tf.Variable(reg_params['CL_l2_lambda'], dtype = self.float)
self.hairpin_l2_lambda = tf.Variable(reg_params['hairpin_l2_lambda'], dtype = self.float)
self.hp_unpred_l2_lambda = tf.Variable(reg_params['hp_unpred_l2_lambda'], dtype = self.float)
self.rel_gene_l2_lambda = tf.Variable(reg_params['rel_gene_l2_lambda'], dtype = self.float)
self.rel_seed_l2_lambda = tf.Variable(reg_params['rel_seed_l2_lambda'], dtype = self.float)
self.gene_l2_lambda = tf.Variable(reg_params['gene_l2_lambda'], dtype = self.float)
self.seed_l2_lambda = tf.Variable(reg_params['seed_l2_lambda'], dtype = self.float)
self.pen = 0
self.pen += 0.5 * tf.reduce_sum(tf.pow(self.CL_offset, 2)) * self.CL_l2_lambda
self.pen += 0.5 * tf.reduce_sum(tf.pow(self.hairpin_offset, 2)) * self.hairpin_l2_lambda
self.pen += 0.5 * tf.reduce_sum(tf.pow(self.hairpin_unpred, 2)) * self.hp_unpred_l2_lambda
self.pen += 0.5 * tf.reduce_sum(tf.pow(self.gene_score, 2)) * self.rel_gene_l2_lambda
self.pen += 0.5 * tf.reduce_sum(tf.pow(self.seed_score, 2)) * self.rel_seed_l2_lambda
self.pen += 0.5 * tf.reduce_sum(tf.pow(self.gene_score_avgs, 2)) * self.gene_l2_lambda
self.pen += 0.5 * tf.reduce_sum(tf.pow(self.seed_score_avgs, 2)) * self.seed_l2_lambda
#get total loss as likelihood plus penalty
self.loss = self.nLL + self.pen
self.avg_effect_loss += self.pen
#make optimizer op for score estimation
score_var_list = [self.gene_score, self.gene_score_avgs, self.seed_score, self.seed_score_avgs,
self.hairpin_offset, self.hairpin_unpred, self.CL_offset]
self.score_optim = ScipyOptimizerInterface(self.loss,
options = self.optim_params,
var_list = score_var_list,
method = 'L-BFGS-B')
#make optimizer op for estimating guide efficacies
guide_var_list = [self.hairpin_offset, self.hairpin_unpred, self.CL_offset, self.guide_Geff, self.guide_Seff]
n_uncon = self.n_hp_batches + self.n_hairpins + self.n_CL_batches
n_bcon = self.n_Geffs + self.n_Seffs
bound_constraints = np.concatenate([
np.tile([None, None], [n_uncon, 1]),
np.tile([0, 1], [n_bcon, 1])],
axis = 0)
self.guide_optim = ScipyOptimizerInterface(self.loss,
options = self.optim_params,
var_list = guide_var_list,
method = 'L-BFGS-B',
bounds = bound_constraints)
#make optimizer ops for estimating gene slopes
gene_slope_var_list = [self.hairpin_offset, self.hairpin_unpred, self.CL_offset, self.gene_slope]
n_uncon = self.n_hp_batches + self.n_hairpins + self.n_CL_batches
n_pcon = self.n_CLs
bound_constraints = np.concatenate([
np.tile([None, None], [n_uncon, 1]),
np.tile([0, None], [n_pcon, 1])],
axis = 0)
self.gene_slope_optim = ScipyOptimizerInterface(self.avg_effect_loss,
options = self.optim_params,
var_list = gene_slope_var_list,
method = 'L-BFGS-B',
bounds = bound_constraints)
#make optimizer ops for estimating CL slopes
ov_slope_var_list = [self.hairpin_offset, self.CL_offset, self.CL_slope]
n_uncon = self.n_hp_batches + self.n_CL_batches
n_pcon = self.n_CL_batches
bound_constraints = np.concatenate([
np.tile([None, None], [n_uncon, 1]),
np.tile([0, None], [n_pcon, 1])],
axis = 0)
self.ov_slope_optim = ScipyOptimizerInterface(self.avg_effect_loss,
options = self.optim_params,
var_list = ov_slope_var_list,
method = 'L-BFGS-B',
bounds = bound_constraints)
init = tf.global_variables_initializer()
self.sess.run(init)
if self._log_file is not None:
self._log_file.close()
def write(self, data, silent = False):
'''Internal method to print to stdout and logfile
INPUTS:
data: string to print_freq
silent: print to terminal?
'''
if not silent:
print(data)
if self._log_file is not None:
if self._log_file.closed:
self._log_file = open(self.log_file, 'a')
self._log_file.write(data + '\n')
def predict(self):
'''Get model prediction'''
return(self.sess.run(self.pred))
def get_SSMD(self, use_test = False):
'''Calculate SSMD for a set of gene avgs, given sets of positive and negative controls'''
gene_score, gene_score_avgs, CL_noise_vars = \
self.sess.run([self.gene_score, self.gene_score_avgs, self.CL_noise_vars])
gene_scores = gene_score + gene_score_avgs
noise_vars_per_CL = pd.DataFrame({'noise_vars': CL_noise_vars.flatten(), 'CL_name': self.all_CL_names}) \
.groupby('CL_name').mean().ix[self.data_names['CLs'],:].values
weights = 1 / noise_vars_per_CL.reshape(-1,1)
weights = weights / np.nanmean(weights)
weight_avg = np.nanmean(gene_scores * weights, axis = 0)
if use_test: #if using cross-val on set of control genes
pop1 = weight_avg[np.in1d(self.data_names['genes'], self.gene_sets['neg_test'])]
pop2 = weight_avg[np.in1d(self.data_names['genes'], self.gene_sets['pos_test'])]
else:
pop1 = weight_avg[np.in1d(self.data_names['genes'], self.gene_sets['neg'])]
pop2 = weight_avg[np.in1d(self.data_names['genes'], self.gene_sets['pos'])]
return(SSMD(pop1, pop2))
def fit(self, LFC_mats, fit_params = 'scores', ignore_test = False):
'''
Train subset of model parameters
INPUTS:
LFC_mats: List of [n_hairpins, n_CLs] training data sets of measured hairpin-level LFCs
fit_params: model parameter set to estimate ['scores', 'guide_effs', 'gene_slopes', 'ov_slopes', 'noise_vars', 'gene_slopes_ML', 'ov_slopes_ML']
ignore_test: optional fit to all data even if test_inds are defined
'''
poss_fit_params = ['scores', 'guide_effs', 'gene_slopes', 'ov_slopes', 'noise_vars', 'gene_slopes_ML', 'ov_slopes_ML']
assert(fit_params in poss_fit_params)
if self.log_file is not None:
self._log_file = open(self.log_file, 'a')
if self.test_inds is not None and not ignore_test:
train_eval_masks = make_eval_masks(LFC_mats, self.test_inds)
test_eval_masks = make_eval_masks(LFC_mats, self.test_inds, inverse = True)
else:
train_eval_masks = make_eval_masks(LFC_mats, None)
LFC_mats_no_na = []
for LFC_mat in LFC_mats:
cur = LFC_mat.copy()
cur[np.isnan(cur)] = 0
LFC_mats_no_na.append(cur)
feed_dict = {i: d for i, d in zip(self.obs, LFC_mats_no_na)}
train_mask_dict = {i: d for i, d in zip(self.eval_mask, train_eval_masks)}
train_dict = merge_dicts(feed_dict, train_mask_dict)
if self.test_inds is not None and not ignore_test:
test_mask_dict = {i: d for i, d in zip(self.eval_mask, test_eval_masks)}
test_dict = merge_dicts(feed_dict, test_mask_dict)
if fit_params == 'scores':
R2_evals = self.sess.run(self.R2, feed_dict = train_dict)
self.write('Init R2: {}'.format(R2_evals))
if self.test_inds and not ignore_test:
R2_evals = self.sess.run(self.R2, feed_dict = test_dict)
self.write('Init Test R2: {}'.format(R2_evals))
t0 = time.time()
if fit_params == 'scores':
self.write('Fitting model scores')
optim_res = self._fit_scores(train_dict)
elif fit_params == 'guide_effs':
self.write('Fitting guide efficacies')
optim_res = self._fit_guide_efficacies(train_dict)
elif fit_params == 'gene_slopes':
self._fit_gene_slopes()
elif fit_params == 'ov_slopes':
self._fit_ov_slopes(LFC_data, ignore_test = ignore_test)
elif fit_params == 'gene_slopes_ML':
optim_res = self._fit_gene_slopes_ML(train_dict)
elif fit_params == 'ov_slopes_ML':
optim_res = self._fit_ov_slopes_ML(train_dict)
elif fit_params == 'noise_vars':
self._fit_noise_vars(LFC_mats, ignore_test = ignore_test)
elif fit_params == 'slopes':
self._fit_slopes(train_dict)
if fit_params in ['scores', 'guide_effs', 'gene_slopes_ML', 'ov_slopes_ML']:
self.write(optim_res['message'].decode('utf-8'))
self.write('Optimization finished after: {} sec, {} iter, {} fevals'.format(int(time.time() - t0),
optim_res['nit'],optim_res['nfev']))
if fit_params == 'scores':
R2_evals = self.sess.run(self.R2, feed_dict = train_dict)
self.R2_vals['train'].append(R2_evals)
self.write('New R2: {}'.format(R2_evals))
if self.test_inds and not ignore_test:
R2_evals = self.sess.run(self.R2, feed_dict = test_dict)
self.R2_vals['test'].append(R2_evals)
self.write('New Test R2: {}'.format(R2_evals))
self.SSMD['train'].append(self.get_SSMD(use_test = False))
self.write('Train SSMD: {}'.format(self.SSMD['train'][-1]))
if ('pos_test' in self.gene_sets) and (len(self.gene_sets['pos_test']) > 0):
self.SSMD['test'].append(self.get_SSMD(use_test = True))
self.write('Test SSMD: {}'.format(self.SSMD['test'][-1]))
self.loss_evals.append(self.sess.run(self.loss, feed_dict = train_dict))
if self._log_file is not None:
self._log_file.close()
def _fit_scores(self, feed_dict):
'''
Fit scores + intercepts using BFGS
INPUTS:
feed_dict: input data
optim_params: dict of optimization parameters
'''
optim_res = run_sklearn_optim(self.score_optim, feed_dict, self.sess, self.loss,
print_freq = self.optim_params['print_freq'])
return(optim_res)
def _fit_gene_slopes_ML(self, feed_dict):
'''
Fit slopes + intercepts using BFGS
INPUTS:
feed_dict: input data
optim_params: dict of optimization parameters
'''
init_gene_slopes = self.sess.run([self.gene_slope])
optim_res = run_sklearn_optim(self.gene_slope_optim, feed_dict, self.sess, self.avg_effect_loss,
print_freq = self.optim_params['print_freq'])
new_gene_slopes = self.sess.run(self.gene_slope)
self.write('init gene slopes avg: {}, new gene slope avg: {}'.format(np.mean(init_gene_slopes), np.mean(new_gene_slopes)))
new_gene_slopes[new_gene_slopes < self.min_slope] = self.min_slope #constrain to be non negative
# new_gene_slopes = euclidean_proj_simplex(new_gene_slopes.flatten(), s=self.n_CLs).reshape(1,-1)
new_gene_slopes = new_gene_slopes / np.nanmean(new_gene_slopes)
_=self.sess.run(self.gene_slope.assign(new_gene_slopes.reshape(-1,1)))
return(optim_res)
def _fit_ov_slopes_ML(self, feed_dict):
'''
Fit slopes + intercepts using BFGS
INPUTS:
feed_dict: input data
optim_params: dict of optimization parameters
'''
init_CL_slopes = self.sess.run([self.CL_slope])
optim_res = run_sklearn_optim(self.ov_slope_optim, feed_dict, self.sess, self.avg_effect_loss,
print_freq = self.optim_params['print_freq'])
new_CL_slopes = self.sess.run(self.CL_slope)
self.write('init ov slopes avg: {}, new ov slope avg: {}'.format(np.mean(init_CL_slopes), np.mean(new_CL_slopes)))
new_CL_slopes[new_CL_slopes < self.min_slope] = self.min_slope
# new_CL_slopes = euclidean_proj_simplex(new_CL_slopes.flatten(), s=self.n_CLs).reshape(1,-1)
new_CL_slopes = new_CL_slopes / np.nanmean(new_CL_slopes)
_=self.sess.run(self.CL_slope.assign(new_CL_slopes))
return(optim_res)
def _fit_gene_slopes(self):
'''Re-estimate gene score slope terms using pos/neg control gene set median separation'''
init_gene_slopes, init_gene_effects = self.sess.run([self.gene_slope, self.ind_gene_effects])
# NA out gene scores for cell lines where we dont have targeting guides
init_gene_effects[self.n_used_hairpins_per_gene.transpose() < self.min_hairpins_per] = np.nan
#estimate centers of positive and negative gene set distributions
pos_med = np.nanmedian(init_gene_effects[:, np.in1d(self.data_names['genes'], self.gene_sets['pos'])], axis = 1)
neg_med = np.nanmedian(init_gene_effects[:, np.in1d(self.data_names['genes'], self.gene_sets['neg'])], axis = 1)
new_gene_slopes = neg_med - pos_med
self.write('negative gene slopes: {}/{}'.format(np.sum(new_gene_slopes < 0), self.n_CLs))
self.write('init gene slopes avg: {}, new gene slope avg: {}'.format(np.mean(init_gene_slopes), np.mean(new_gene_slopes)))
new_gene_slopes = new_gene_slopes / np.nanmean(new_gene_slopes) #normalize to have mean 1
_=self.sess.run(self.gene_slope.assign(new_gene_slopes.reshape(-1,1)))
def _fit_guide_efficacies(self, feed_dict):
'''
Fit guide_efficacies + intercepts using BFGS
INPUTS:
feed_dict: input data
optim_params: dict of optimization parameters
'''
init_guide_Geffs, init_guide_Seffs = self.sess.run([self.guide_Geff, self.guide_Seff])
optim_res = run_sklearn_optim(self.guide_optim, feed_dict, self.sess, self.loss,
print_freq = self.optim_params['print_freq'])
new_guide_Geffs, new_guide_Seffs = self.sess.run([self.guide_Geff, self.guide_Seff])
self.write('init avg Geff: {} Seff: {}, new avg Geff: {} Seff: {}'.format(np.mean(init_guide_Geffs),
np.mean(init_guide_Seffs), np.mean(new_guide_Geffs), np.mean(new_guide_Seffs)))
return(optim_res)
def _fit_noise_vars(self, LFC_mats, ignore_test = False):
'''Estimate noise variance per CL'''
tot_SSE = np.zeros(self.n_CL_batches)
tot_used_hps = np.zeros(self.n_CL_batches)
batch_offset = 0
for batch_ii, (LFC_mat, pred_mat) in enumerate(zip(LFC_mats, self.predict())):
cur_CL_inds = np.arange(LFC_mat.shape[1]) + batch_offset
cur_d = LFC_mat.values.copy()
if not ignore_test and self.test_inds is not None:
cur_d[self.test_inds[batch_ii]] = np.nan
tot_SSE[cur_CL_inds] += np.nansum((pred_mat - cur_d)**2, axis = 0)
tot_used_hps[cur_CL_inds] += np.sum(~np.isnan(cur_d), axis = 0)
batch_offset += LFC_mat.shape[1]
# dof = tot_used_hps - self.n_targeted_genes - self.n_targeted_seeds - 1 #dof per CL (approximate)
dof = tot_used_hps #dof per CL (gives biased estimate)
per_CL_noise_var = tot_SSE / np.max(np.concatenate([dof.reshape(-1,1), np.ones((self.n_CL_batches, 1))], axis = 1), axis = 1)
self.sess.run(self.CL_noise_vars.assign(per_CL_noise_var.reshape(-1,1).astype(np.float32)))
def compute_R2_stats(self, LFC_mats):
'''
Computes R2 values per CL, and per hairpin
'''
self.CL_R2_df = pd.DataFrame()
self.hp_R2_df = pd.DataFrame()
for batch_id, (LFC_data, pred) in enumerate(zip(LFC_mats, self.predict())):
resids = LFC_data - pred
CL_noise_var = np.nanvar(resids, axis = 0)
CL_tot_var = np.nanvar(LFC_data, axis = 0)
CL_R2 = 1 - CL_noise_var / CL_tot_var
self.CL_R2_df = pd.concat([self.CL_R2_df,
pd.DataFrame({'CCLE_ID': LFC_data.columns.values,
'batch_id': np.ones_like(CL_R2)*batch_id,
'R2': CL_R2})])
hp_noise_var = np.nanvar(resids, axis = 1)
hp_tot_var = np.nanvar(LFC_data, axis = 1)
hp_R2 = 1 - hp_noise_var / hp_tot_var
self.hp_R2_df = pd.concat([self.hp_R2_df,
pd.DataFrame({'hp_seq': LFC_data.index.values,
'batch_id': np.ones_like(hp_R2)*batch_id,
'R2': hp_R2})])
def init_slopes(self, LFC_mats):
'''Get initial estimates of CL slopes by regressing each CL's data on within-batch avg'''
lm = LinearRegression(fit_intercept=True)
#first get overall slope adjustment per batch
if len(LFC_mats) > 0:
common_hps = reduce(np.intersect1d, [LFC_mat.index.values for LFC_mat in LFC_mats])
else:
common_hps = LFC_mats[0].index.values
if len(common_hps) > 100:
per_batch_avgs = np.ones((len(LFC_mats), len(common_hps)))
for ii, LFC_mat in enumerate(LFC_mats):
cur_d = LFC_mat.ix[np.in1d(LFC_mat.index.values, common_hps),:]
cur_d = cur_d.reset_index().drop_duplicates(subset='index', keep='last').set_index('index')
per_batch_avgs[ii,:] = np.nanmean(cur_d.ix[common_hps,:].values, axis = 1)
ov_avg = np.nanmean(per_batch_avgs, axis = 0)
batch_slopes = np.ones(per_batch_avgs.shape[0])
for ii in range(per_batch_avgs.shape[0]):
uset = np.where(~np.isnan(per_batch_avgs[ii,:]))[0]
lm.fit(ov_avg.reshape(-1,1)[uset,:], per_batch_avgs[ii,uset].transpose())
batch_slopes[ii] = lm.coef_
else:
batch_slopes = np.array([np.nanstd(LFC_mat.values) for LFC_mat in LFC_mats])
batch_slopes = batch_slopes / np.nanmean(batch_slopes)
CL_slopes = np.ones(self.n_CL_batches)
CL_offset = 0
for batch_ii, LFC_mat in enumerate(LFC_mats):
avg_hp = np.nanmean(LFC_mat, axis = 1)
for ii in np.arange(LFC_mat.shape[1]):
uvals = np.where(~np.isnan(LFC_mat.values[:,ii]))[0]
lm.fit(avg_hp.reshape(-1,1)[uvals,:], LFC_mat.values[uvals,ii])
CL_slopes[ii + CL_offset] = lm.coef_ * batch_slopes[batch_ii]
CL_offset += LFC_mat.shape[1]
_=self.sess.run(self.CL_slope.assign(CL_slopes.reshape(-1,1)))
def save(self, results_dir, save_perf_only = False, edward = False):
'''
Write parameter matrices to text files. Also serialize other model params to json file at specified path
'''
if not os.path.exists(results_dir):
os.makedirs(results_dir)
if not edward:
other_df = {}
other_df['reg_params'] = self.reg_params
other_df['R2_vals'] = self.R2_vals
other_df['optim_params'] = self.optim_params
other_df['loss_evals'] = self.loss_evals
other_df['SSMD'] = self.SSMD
with open(os.path.join(results_dir, 'other_info.json'), 'w') as f:
dump(other_df, f, primitives = True, allow_nan = True)
if not save_perf_only: #if not just saving performance params
CL_df = pd.DataFrame({
'CCLE_ID': self.data_names['CLs'],
'gene_slope': self.gene_slope.eval(session = self.sess).flatten()
# 'noise_vars': self.CL_noise_vars.eval(session = self.sess).flatten()
})
CL_df.to_csv(os.path.join(results_dir, 'CL_data.csv'), index = False)
CL_batch_df = pd.DataFrame({'CCLE_ID': self.all_CL_names,
'CL_slope': self.CL_slope.eval(session = self.sess).flatten(),
'CL_offset': self.CL_offset.eval(session = self.sess).flatten(),
'CL_batch': self.all_CL_batches,
'noise_vars': self.CL_noise_vars.eval(session = self.sess).flatten()})
if hasattr(self, 'CL_R2'):
CL_batch_df['R2'] = self.CL_R2_df['R2']
if edward:
CL_batch_df['offset_mean'] = self.q_CL_offset.loc.eval().flatten()
CL_batch_df['offset_sd'] = self.q_CL_offset.scale.eval().flatten()
CL_batch_df.to_csv(os.path.join(results_dir, 'CL_batch_data.csv'), index = False)
hp_df = pd.DataFrame({
'hp': self.data_names['hps'],
'unpred_offset': self.hairpin_unpred.eval(session = self.sess).flatten(),
'Geff': self.guide_Geff.eval(session = self.sess).flatten(),
'Seff': self.guide_Seff.eval(session = self.sess).flatten()
})
if edward:
hp_df['unpred_offset_mean'] = self.q_hairpin_unpred.loc.eval()
hp_df['unpred_offset_sd'] = self.q_hairpin_unpred.scale.eval()
hp_df.to_csv(os.path.join(results_dir, 'hp_data.csv'), index = False)
hp_batch_df = pd.DataFrame({
'hp': self.all_hp_seqs,
'hp_batch': self.all_hp_batches,
'hairpin_offset': self.hairpin_offset.eval(session = self.sess).flatten()
})
if hasattr(self, 'hp_R2'):
hp_batch_df['R2'] = self.hp_R2_df['R2']
if edward:
hp_batch_df['hairpin_offset_mean'] = self.q_hairpin_offset.loc.eval()
hp_batch_df['hairpin_offset_sd'] = self.q_hairpin_offset.scale.eval()
hp_batch_df.to_csv(os.path.join(results_dir, 'hp_batch_data.csv'), index = False)
per_gene_df = pd.DataFrame({'avg': self.gene_score_avgs.eval(session = self.sess).flatten()},
index = self.data_names['genes'])
if edward:
gene_mean_df = pd.DataFrame((self.q_gene_score.loc.eval() + \
self.q_gene_score_avgs.loc.eval()).transpose(),
index = self.data_names['genes'], columns = self.data_names['CLs'])
gene_sd_df = pd.DataFrame(np.sqrt(self.q_gene_score.scale.eval()**2 + \
self.q_gene_score_avgs.scale.eval()**2).transpose(),
index = self.data_names['genes'], columns = self.data_names['CLs'])
gene_mean_df = gene_mean_df.where(self.n_used_hairpins_per_gene >= self.min_hairpins_per, other = np.nan)
gene_sd_df = gene_sd_df.where(self.n_used_hairpins_per_gene >= self.min_hairpins_per, other = np.nan)
gene_mean_df.to_csv(os.path.join(results_dir, 'gene_means.csv'))
gene_sd_df.to_csv(os.path.join(results_dir, 'gene_SDs.csv'))
per_gene_df['SD'] = self.q_gene_score_avgs.scale.eval().flatten()
else:
gene_df = pd.DataFrame((self.gene_score.eval(session = self.sess) + \
self.gene_score_avgs.eval(session = self.sess)).transpose(),
index = self.data_names['genes'], columns = self.data_names['CLs'])
gene_df = gene_df.where(self.n_used_hairpins_per_gene >= self.min_hairpins_per, other = np.nan)
gene_df.to_csv(os.path.join(results_dir, 'gene_data.csv'))
per_gene_df.to_csv(os.path.join(results_dir, 'per_gene_data.csv'))
if edward:
seed_mean_df = pd.DataFrame((self.q_seed_score.loc.eval() + \
self.q_seed_score_avgs.loc.eval()).transpose(),
index = self.data_names['seeds'], columns = self.data_names['CLs'])
seed_sd_df = pd.DataFrame(np.sqrt(self.q_seed_score.scale.eval()**2 + \
self.q_seed_score_avgs.scale.eval()**2).transpose(),
index = self.data_names['seeds'], columns = self.data_names['CLs'])
seed_mean_df = seed_mean_df.where(self.n_used_hairpins_per_seed >= self.min_hairpins_per, other = np.nan)
seed_sd_df = seed_sd_df.where(self.n_used_hairpins_per_seed >= self.min_hairpins_per, other = np.nan)
seed_mean_df.to_csv(os.path.join(results_dir, 'seed_means.csv'))
seed_sd_df.to_csv(os.path.join(results_dir, 'seed_SDs.csv'))
else:
seed_df = pd.DataFrame((self.seed_score.eval(session = self.sess) + \
self.seed_score_avgs.eval(session = self.sess)).transpose(),
index = self.data_names['seeds'], columns = self.data_names['CLs'])
seed_df = seed_df.where(self.n_used_hairpins_per_seed >= self.min_hairpins_per, other = np.nan)
seed_df.to_csv(os.path.join(results_dir, 'seed_data.csv'))
def make_edward_model(self, LFC_mats, gene_matrix, seed_matrix, data_names):
'''Create a Bayesian model in edward, using current parameter estimates to initialize'''
#define priors on parameters
gene_score = Normal(loc=tf.zeros([self.n_CLs, self.n_genes], dtype = self.float),
scale = np.sqrt(1.0/self.reg_params['rel_gene_l2_lambda']) * tf.ones([self.n_CLs, self.n_genes], dtype = self.float))
seed_score = Normal(loc=tf.zeros([self.n_CLs, self.n_seeds], dtype = self.float),
scale = np.sqrt(1.0/self.reg_params['rel_seed_l2_lambda']) * tf.ones([self.n_CLs, self.n_seeds], dtype = self.float))
gene_score_avgs = Normal(loc=tf.zeros([1, self.n_genes], dtype = self.float),
scale = np.sqrt(1.0/self.reg_params['gene_l2_lambda']) * tf.ones([1, self.n_genes], dtype = self.float))
seed_score_avgs = Normal(loc=tf.zeros([1, self.n_seeds], dtype = self.float),
scale = np.sqrt(1.0/self.reg_params['seed_l2_lambda']) * tf.ones([1, self.n_seeds], dtype = self.float))
hairpin_offset = Normal(loc=tf.zeros([self.n_hp_batches, 1], dtype = self.float),
scale = np.sqrt(1.0/self.reg_params['hairpin_l2_lambda']) * tf.ones([self.n_hp_batches, 1], dtype = self.float))
hairpin_unpred = Normal(loc=tf.zeros([self.n_hairpins, 1], dtype = self.float),
scale = np.sqrt(1.0/self.reg_params['hp_unpred_l2_lambda']) * tf.ones([self.n_hairpins, 1], dtype = self.float))
CL_offset = Normal(loc=tf.zeros([self.n_CL_batches, 1], dtype = self.float),
scale = np.sqrt(1.0/self.reg_params['CL_l2_lambda']) * tf.ones([self.n_CL_batches, 1], dtype = self.float))
#parameters we dont try to fit here
CL_slope = tf.constant(self.CL_slope.eval(session = self.sess))
gene_slope = tf.constant(self.gene_slope.eval(session = self.sess))
# region_weights = tf.constant(self.region_weights.eval(session = self.sess))
guide_Geff = tf.constant(self.guide_Geff.eval(session = self.sess))
guide_Seff = tf.constant(self.guide_Seff.eval(session = self.sess))
gene_maps = [self.make_sparse_submap(gene_matrix, LFC_mat.index.values) for LFC_mat in LFC_mats]
seed_maps = [self.make_sparse_submap(seed_matrix, LFC_mat.index.values) for LFC_mat in LFC_mats]
self.noise_sigma = tf.sqrt(tf.exp(tf.Variable(np.log(self.CL_noise_vars.eval(session = self.sess)), dtype = self.float)))
mod_params = {
'CL_offset': CL_offset,
'CL_slope': CL_slope,
'guide_Seff': guide_Seff,
'guide_Geff': guide_Geff,
'hairpin_offset': hairpin_offset,
'hairpin_unpred': hairpin_unpred,
'gene_slope': gene_slope,
'seed_score_avgs': seed_score_avgs,
'seed_score': seed_score,
'gene_score_avgs': gene_score_avgs,
'gene_score': gene_score
}
bool_masks = [tf.logical_not(tf.is_nan(LFC_mat.values)) for LFC_mat in LFC_mats]
y_list = []
CL_cnt = 0
hp_cnt = 0
for ii in range(len(self.obs)):
cur_pred = self.get_dataset_pred(
mod_params,
gene_maps[ii],
seed_maps[ii],
LFC_mats[ii].index.values,
LFC_mats[ii].columns.values,
hp_cnt,
CL_cnt)
cur_CL_inds = np.arange(LFC_mats[ii].shape[1]) + CL_cnt
hp_cnt += LFC_mats[ii].shape[0]
CL_cnt += LFC_mats[ii].shape[1]
cur_sigma = tf.transpose(tf.gather(self.noise_sigma, cur_CL_inds)) * tf.ones_like(cur_pred)
y_list.append(Normal(loc=tf.boolean_mask(cur_pred, bool_masks[ii]),
scale = tf.boolean_mask(cur_sigma, bool_masks[ii])))
LFC_mats_no_na = []
for LFC_mat in LFC_mats:
cur = LFC_mat.values.copy()
cur[np.isnan(cur)] = 0
LFC_mats_no_na.append(cur)
# obs_list = [tf.constant(LFC_mat, dtype = 'float') for LFC_mat in LFC_mats_no_na]
obs_list = [tf.placeholder(self.float, dset.shape) for dset in LFC_mats_no_na]
#posterior approximating distributions (fully factorized gaussian)
self.q_gene_score = Normal(loc=tf.Variable(self.gene_score.eval(session = self.sess), dtype = self.float),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([self.n_CLs, self.n_genes], dtype = self.float))))
self.q_seed_score = Normal(loc=tf.Variable(self.seed_score.eval(session = self.sess), dtype = self.float),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([self.n_CLs, self.n_seeds], dtype = self.float))))
self.q_gene_score_avgs = Normal(loc=tf.Variable(self.gene_score_avgs.eval(session = self.sess), dtype = self.float),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([1, self.n_genes], dtype = self.float))))
self.q_seed_score_avgs = Normal(loc=tf.Variable(self.seed_score_avgs.eval(session = self.sess), dtype = self.float),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([1, self.n_seeds], dtype = self.float))))
self.q_hairpin_offset = Normal(loc=tf.Variable(self.hairpin_offset.eval(session = self.sess), dtype = self.float),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([self.n_hp_batches, 1], dtype = self.float))))
self.q_hairpin_unpred = Normal(loc=tf.Variable(self.hairpin_unpred.eval(session = self.sess), dtype = self.float),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([self.n_hairpins, 1], dtype = self.float))))
self.q_CL_offset = Normal(loc=tf.Variable(self.CL_offset.eval(session = self.sess), dtype = self.float),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([self.n_CL_batches, 1], dtype = self.float))))
data_dict = {i: tf.boolean_mask(d, m) for i, d, m in zip(y_list, obs_list, bool_masks)}
for i, d in zip(obs_list, LFC_mats):
data_dict.update({i: d})
self.inference = ed.KLqp({gene_score: self.q_gene_score,
seed_score: self.q_seed_score,
gene_score_avgs: self.q_gene_score_avgs,
seed_score_avgs: self.q_seed_score_avgs,
hairpin_offset: self.q_hairpin_offset,
hairpin_unpred: self.q_hairpin_unpred,
CL_offset: self.q_CL_offset},
data=data_dict)
self.inference.initialize()
tf.global_variables_initializer().run()
def run_edward_inference(self, n_iter = 1000, print_freq = 100):
loss_evals = np.zeros(n_iter)
orig_GS = self.gene_score.eval(session = self.sess).flatten()
tot_GS_var = np.var(orig_GS)
delta_G_R2 = 1 - np.var(orig_GS - self.q_gene_score.mean().eval().flatten()) / tot_GS_var
self.write('Init DeltaG_R2: {}'.format(delta_G_R2))
for ii in range(n_iter):
info_dict = self.inference.update()
loss_evals[ii] = info_dict['loss']
if ii % print_freq == 0:
delta_G_R2 = 1 - np.var(orig_GS - self.q_gene_score.mean().eval().flatten()) / tot_GS_var
self.write('It: {}, DeltaG_R2: {}, Loss: {}'.format(ii, delta_G_R2, loss_evals[ii]))
return(loss_evals)
## prediction
def get_dataset_pred(self, mod_params, gene_map, seed_map, cur_hp_seqs, cur_CL_names, hp_offset, CL_offset, just_avg_scores = False):
cur_hp_inds = np.array([self.hp_ind_map[x] for x in cur_hp_seqs])
cur_CL_inds = np.array([self.CL_ind_map[x] for x in cur_CL_names])
CL_batch_range = CL_offset + np.arange(len(cur_CL_names))
hp_batch_range = hp_offset + np.arange(len(cur_hp_seqs))
if just_avg_scores:
cur_gene_effect = map_effects(mod_params['gene_score_avgs'] + tf.zeros_like(mod_params['gene_score'], dtype = self.float),
cur_CL_inds, gene_map)
else:
cur_gene_effect = map_effects(mod_params['gene_score'] + mod_params['gene_score_avgs'],
cur_CL_inds, gene_map)
cur_gene_effect = tf.multiply(cur_gene_effect,
tf.gather(mod_params['guide_Geff'],cur_hp_inds))
cur_gene_effect = tf.multiply(cur_gene_effect,
tf.reshape(tf.gather(mod_params['gene_slope'], cur_CL_inds), [1, -1]))
if just_avg_scores:
cur_seed_effect = map_effects(mod_params['seed_score_avgs'] + tf.zeros_like(mod_params['seed_score'], dtype = self.float),
cur_CL_inds, seed_map)
else:
cur_seed_effect = map_effects(mod_params['seed_score'] + mod_params['seed_score_avgs'],
cur_CL_inds, seed_map)
cur_seed_effect = tf.multiply(cur_seed_effect,
tf.gather(mod_params['guide_Seff'], cur_hp_inds))
#total KD effect of each hp
cur_KD_effect = tf.gather(mod_params['hairpin_unpred'], cur_hp_inds) + cur_gene_effect + cur_seed_effect
cur_KD_effect = tf.multiply(cur_KD_effect,
tf.reshape(tf.gather(mod_params['CL_slope'], CL_batch_range), [1, -1]))
cur_pred = cur_KD_effect + tf.reshape(tf.gather(mod_params['CL_offset'], CL_batch_range), [1, -1]) + \
tf.gather(mod_params['hairpin_offset'], hp_batch_range) #add offset terms
return(cur_pred)
def get_dataset_LL(self, mod_params, LFC_mat, preds, cur_eval_mask, CL_offset):
CL_batch_range = CL_offset + np.arange(LFC_mat.get_shape().as_list()[1])
cur_nLL = 0.5 * tf.reduce_sum(
tf.boolean_mask(
tf.multiply(tf.pow(preds - LFC_mat, 2),
1/tf.reshape(tf.gather(mod_params['CL_noise_vars'], CL_batch_range), [1, -1])),
cur_eval_mask
))
cur_SS = 0.5 * tf.reduce_sum(
tf.boolean_mask(
tf.multiply(tf.pow(LFC_mat, 2),
1/tf.reshape(tf.gather(mod_params['CL_noise_vars'], CL_batch_range), [1, -1])),
cur_eval_mask
))
return(cur_nLL, cur_SS)
def get_shRNA_R2(self, mod_params, LFC_mat, preds, CL_offset):
CL_batch_range = CL_offset + np.arange(LFC_mat.get_shape().as_list()[1])
preds_ms = preds - tf.reduce_mean(preds, axis = 1, keep_dims = True)
LFC_mat_ms = LFC_mat - tf.reduce_mean(LFC_mat, axis = 1, keep_dims = True)
cur_nLL = 0.5 * tf.reduce_sum(
tf.multiply(tf.pow(preds_ms - LFC_mat_ms, 2),
1/tf.reshape(tf.gather(mod_params['CL_noise_vars'], CL_batch_range), [1, -1])),
axis = 1)
cur_SS = 0.5 * tf.reduce_sum(
tf.multiply(tf.pow(LFC_mat_ms, 2),
1/tf.reshape(tf.gather(mod_params['CL_noise_vars'], CL_batch_range), [1, -1])),
axis = 1)
cur_R2 = 1 - tf.div(cur_nLL , cur_SS)
return(cur_R2, cur_nLL, cur_SS)
def make_sparse_submap(self, sparse_hp_mat, cur_hp_seqs):
'''Extract a set of rows for specific hairpins from a sparse matrix'''
cur_hp_inds = np.array([self.hp_ind_map[x] for x in cur_hp_seqs])
map_coo = sparse_hp_mat[cur_hp_inds,:].tocoo()
map_inds = np.concatenate([map_coo.row[:,None], map_coo.col[:,None]], axis = 1)
if self.float == tf.float32:
vals = tf.to_float(map_coo.data)
else:
vals = tf.to_double(map_coo.data)
return(tf.SparseTensor(indices=map_inds, values=vals, dense_shape=map_coo.shape)) | |
# Image-based testing borrowed from vispy
"""
Procedure for unit-testing with images:
Run individual test scripts with the PYQTGRAPH_AUDIT environment variable set:
$ PYQTGRAPH_AUDIT=1 python pyqtgraph/graphicsItems/tests/test_PlotCurveItem.py
Any failing tests will display the test results, standard image, and the
differences between the two. If the test result is bad, then press (f)ail.
If the test result is good, then press (p)ass and the new image will be
saved to the test-data directory.
To check all test results regardless of whether the test failed, set the
environment variable PYQTGRAPH_AUDIT_ALL=1.
"""
import time
import os
import sys
import inspect
import warnings
import numpy as np
from pathlib import Path
from pyqtgraph.Qt import QtGui, QtCore
from pyqtgraph import functions as fn
from pyqtgraph import GraphicsLayoutWidget
from pyqtgraph import ImageItem, TextItem
tester = None
# Convenient stamp used for ensuring image orientation is correct
axisImg = [
" 1 1 1 ",
" 1 1 1 1 1 1 ",
" 1 1 1 1 1 1 1 1 1 1",
" 1 1 1 1 1 ",
" 1 1 1 1 1 1 ",
" 1 1 ",
" 1 1 ",
" 1 ",
" ",
" 1 ",
" 1 ",
" 1 ",
"1 1 1 1 1 ",
"1 1 1 1 1 ",
" 1 1 1 ",
" 1 1 1 ",
" 1 ",
" 1 ",
]
axisImg = np.array([map(int, row[::2].replace(' ', '0')) for row in axisImg])
def getTester():
global tester
if tester is None:
tester = ImageTester()
return tester
def getImageFromWidget(widget):
# just to be sure the widget size is correct (new window may be resized):
QtGui.QApplication.processEvents()
qimg = QtGui.QImage(widget.size(), QtGui.QImage.Format.Format_ARGB32)
qimg.fill(QtCore.Qt.GlobalColor.transparent)
painter = QtGui.QPainter(qimg)
widget.render(painter)
painter.end()
qimg = qimg.convertToFormat(QtGui.QImage.Format.Format_RGBA8888)
return fn.qimage_to_ndarray(qimg).copy()
def assertImageApproved(image, standardFile, message=None, **kwargs):
"""Check that an image test result matches a pre-approved standard.
If the result does not match, then the user can optionally invoke a GUI
to compare the images and decide whether to fail the test or save the new
image as the standard.
Run the test with the environment variable PYQTGRAPH_AUDIT=1 to bring up
the auditing GUI.
Parameters
----------
image : (h, w, 4) ndarray
standardFile : str
The name of the approved test image to check against. This file name
is relative to the root of the pyqtgraph test-data repository and will
be automatically fetched.
message : str
A string description of the image. It is recommended to describe
specific features that an auditor should look for when deciding whether
to fail a test.
Extra keyword arguments are used to set the thresholds for automatic image
comparison (see ``assertImageMatch()``).
"""
if isinstance(image, QtGui.QWidget):
# just to be sure the widget size is correct (new window may be resized):
QtGui.QApplication.processEvents()
graphstate = scenegraphState(image, standardFile)
image = getImageFromWidget(image)
if message is None:
code = inspect.currentframe().f_back.f_code
message = "%s::%s" % (code.co_filename, code.co_name)
# Make sure we have a test data repo available
dataPath = getTestDataDirectory()
# Read the standard image if it exists
stdFileName = os.path.join(dataPath, standardFile + '.png')
if not os.path.isfile(stdFileName):
stdImage = None
else:
qimg = QtGui.QImage(stdFileName)
qimg = qimg.convertToFormat(QtGui.QImage.Format.Format_RGBA8888)
stdImage = fn.qimage_to_ndarray(qimg).copy()
del qimg
# If the test image does not match, then we go to audit if requested.
try:
if stdImage is None:
raise Exception("No reference image saved for this test.")
if image.shape[2] != stdImage.shape[2]:
raise Exception("Test result has different channel count than standard image"
"(%d vs %d)" % (image.shape[2], stdImage.shape[2]))
if image.shape != stdImage.shape:
# Allow im1 to be an integer multiple larger than im2 to account
# for high-resolution displays
ims1 = np.array(image.shape).astype(float)
ims2 = np.array(stdImage.shape).astype(float)
sr = ims1 / ims2 if ims1[0] > ims2[0] else ims2 / ims1
if (sr[0] != sr[1] or not np.allclose(sr, np.round(sr)) or
sr[0] < 1):
raise TypeError("Test result shape %s is not an integer factor"
" different than standard image shape %s." %
(ims1, ims2))
sr = np.round(sr).astype(int)
image = fn.downsample(image, sr[0], axis=(0, 1)).astype(image.dtype)
assertImageMatch(image, stdImage, **kwargs)
if bool(os.getenv('PYQTGRAPH_PRINT_TEST_STATE', False)):
print(graphstate)
if os.getenv('PYQTGRAPH_AUDIT_ALL') == '1':
raise Exception("Image test passed, but auditing due to PYQTGRAPH_AUDIT_ALL evnironment variable.")
except Exception:
if os.getenv('PYQTGRAPH_AUDIT') == '1' or os.getenv('PYQTGRAPH_AUDIT_ALL') == '1':
sys.excepthook(*sys.exc_info())
getTester().test(image, stdImage, message)
stdPath = os.path.dirname(stdFileName)
print('Saving new standard image to "%s"' % stdFileName)
if not os.path.isdir(stdPath):
os.makedirs(stdPath)
qimg = fn.ndarray_to_qimage(image, QtGui.QImage.Format.Format_RGBA8888)
qimg.save(stdFileName)
del qimg
else:
if stdImage is None:
raise Exception("Test standard %s does not exist. Set "
"PYQTGRAPH_AUDIT=1 to add this image." % stdFileName)
if os.getenv('CI') is not None:
standardFile = os.path.join(os.getenv("SCREENSHOT_DIR", "screenshots"), standardFile)
saveFailedTest(image, stdImage, standardFile)
print(graphstate)
raise
def assertImageMatch(im1, im2, minCorr=None, pxThreshold=50.,
pxCount=-1, maxPxDiff=None, avgPxDiff=None,
imgDiff=None):
"""Check that two images match.
Images that differ in shape or dtype will fail unconditionally.
Further tests for similarity depend on the arguments supplied.
By default, images may have no pixels that gave a value difference greater
than 50.
Parameters
----------
im1 : (h, w, 4) ndarray
Test output image
im2 : (h, w, 4) ndarray
Test standard image
minCorr : float or None
Minimum allowed correlation coefficient between corresponding image
values (see numpy.corrcoef)
pxThreshold : float
Minimum value difference at which two pixels are considered different
pxCount : int or None
Maximum number of pixels that may differ. Default is 0, on Windows some
tests have a value of 2.
maxPxDiff : float or None
Maximum allowed difference between pixels
avgPxDiff : float or None
Average allowed difference between pixels
imgDiff : float or None
Maximum allowed summed difference between images
"""
assert im1.ndim == 3
assert im1.shape[2] == 4
assert im1.dtype == im2.dtype
if pxCount == -1:
pxCount = 0
diff = im1.astype(float) - im2.astype(float)
if imgDiff is not None:
assert np.abs(diff).sum() <= imgDiff
pxdiff = diff.max(axis=2) # largest value difference per pixel
mask = np.abs(pxdiff) >= pxThreshold
if pxCount is not None:
assert mask.sum() <= pxCount
maskedDiff = diff[mask]
if maxPxDiff is not None and maskedDiff.size > 0:
assert maskedDiff.max() <= maxPxDiff
if avgPxDiff is not None and maskedDiff.size > 0:
assert maskedDiff.mean() <= avgPxDiff
if minCorr is not None:
with np.errstate(invalid='ignore'):
corr = np.corrcoef(im1.ravel(), im2.ravel())[0, 1]
assert corr >= minCorr
def saveFailedTest(data, expect, filename):
# concatenate data, expect, and diff into a single image
ds = data.shape
es = expect.shape
shape = (max(ds[0], es[0]) + 4, ds[1] + es[1] + 8 + max(ds[1], es[1]), 4)
img = np.empty(shape, dtype=np.ubyte)
img[..., :3] = 100
img[..., 3] = 255
img[2:2+ds[0], 2:2+ds[1], :ds[2]] = data
img[2:2+es[0], ds[1]+4:ds[1]+4+es[1], :es[2]] = expect
diff = makeDiffImage(data, expect)
img[2:2+diff.shape[0], -diff.shape[1]-2:-2] = diff
png = makePng(data) # change `img` to `data` to save just the failed image
directory = os.path.dirname(filename)
if not os.path.isdir(directory):
os.makedirs(directory)
with open(filename + ".png", "wb") as png_file:
png_file.write(png)
print("\nImage comparison failed. Test result: %s %s Expected result: "
"%s %s" % (data.shape, data.dtype, expect.shape, expect.dtype))
def makePng(img):
"""Given an array like (H, W, 4), return a PNG-encoded byte string.
"""
io = QtCore.QBuffer()
qim = fn.ndarray_to_qimage(img, QtGui.QImage.Format.Format_RGBX8888)
qim.save(io, 'PNG')
return bytes(io.data().data())
def makeDiffImage(im1, im2):
"""Return image array showing the differences between im1 and im2.
Handles images of different shape. Alpha channels are not compared.
"""
ds = im1.shape
es = im2.shape
diff = np.empty((max(ds[0], es[0]), max(ds[1], es[1]), 4), dtype=int)
diff[..., :3] = 128
diff[..., 3] = 255
diff[:ds[0], :ds[1], :min(ds[2], 3)] += im1[..., :3]
diff[:es[0], :es[1], :min(es[2], 3)] -= im2[..., :3]
diff = np.clip(diff, 0, 255).astype(np.ubyte)
return diff
class ImageTester(QtGui.QWidget):
"""Graphical interface for auditing image comparison tests.
"""
def __init__(self):
self.lastKey = None
QtGui.QWidget.__init__(self)
self.resize(1200, 800)
#self.showFullScreen()
self.layout = QtGui.QGridLayout()
self.setLayout(self.layout)
self.view = GraphicsLayoutWidget()
self.layout.addWidget(self.view, 0, 0, 1, 2)
self.label = QtGui.QLabel()
self.layout.addWidget(self.label, 1, 0, 1, 2)
self.label.setWordWrap(True)
font = QtGui.QFont("monospace", 14, QtGui.QFont.Bold)
self.label.setFont(font)
self.passBtn = QtGui.QPushButton('Pass')
self.failBtn = QtGui.QPushButton('Fail')
self.layout.addWidget(self.passBtn, 2, 0)
self.layout.addWidget(self.failBtn, 2, 1)
self.passBtn.clicked.connect(self.passTest)
self.failBtn.clicked.connect(self.failTest)
self.views = (self.view.addViewBox(row=0, col=0),
self.view.addViewBox(row=0, col=1),
self.view.addViewBox(row=0, col=2))
labelText = ['test output', 'standard', 'diff']
for i, v in enumerate(self.views):
v.setAspectLocked(1)
v.invertY()
v.image = ImageItem(axisOrder='row-major')
v.image.setAutoDownsample(True)
v.addItem(v.image)
v.label = TextItem(labelText[i])
v.setBackgroundColor(0.5)
self.views[1].setXLink(self.views[0])
self.views[1].setYLink(self.views[0])
self.views[2].setXLink(self.views[0])
self.views[2].setYLink(self.views[0])
def test(self, im1, im2, message):
"""Ask the user to decide whether an image test passes or fails.
This method displays the test image, reference image, and the difference
between the two. It then blocks until the user selects the test output
by clicking a pass/fail button or typing p/f. If the user fails the test,
then an exception is raised.
"""
self.show()
if im2 is None:
message += '\nImage1: %s %s Image2: [no standard]' % (im1.shape, im1.dtype)
im2 = np.zeros((1, 1, 3), dtype=np.ubyte)
else:
message += '\nImage1: %s %s Image2: %s %s' % (im1.shape, im1.dtype, im2.shape, im2.dtype)
self.label.setText(message)
self.views[0].image.setImage(im1)
self.views[1].image.setImage(im2)
diff = makeDiffImage(im1, im2)
self.views[2].image.setImage(diff)
self.views[0].autoRange()
while True:
QtGui.QApplication.processEvents()
lastKey = self.lastKey
self.lastKey = None
if lastKey in ('f', 'esc') or not self.isVisible():
raise Exception("User rejected test result.")
elif lastKey == 'p':
break
time.sleep(0.03)
for v in self.views:
v.image.setImage(np.zeros((1, 1, 3), dtype=np.ubyte))
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Escape:
self.lastKey = 'esc'
else:
self.lastKey = str(event.text()).lower()
def passTest(self):
self.lastKey = 'p'
def failTest(self):
self.lastKey = 'f'
def getTestDataRepo():
warnings.warn(
"Test data data repo has been merged with the main repo"
"use getTestDataDirectory() instead, this method will be removed"
"in a future version of pyqtgraph",
DeprecationWarning, stacklevel=2
)
return getTestDataDirectory()
def getTestDataDirectory():
dataPath = Path(__file__).absolute().parent / "images"
return dataPath.as_posix()
def scenegraphState(view, name):
"""Return information about the scenegraph for debugging test failures.
"""
state = "====== Scenegraph state for %s ======\n" % name
state += "view size: %dx%d\n" % (view.width(), view.height())
state += "view transform:\n" + indent(transformStr(view.transform()), " ")
for item in view.scene().items():
if item.parentItem() is None:
state += itemState(item) + '\n'
return state
def itemState(root):
state = str(root) + '\n'
from pyqtgraph import ViewBox
state += 'bounding rect: ' + str(root.boundingRect()) + '\n'
if isinstance(root, ViewBox):
state += "view range: " + str(root.viewRange()) + '\n'
state += "transform:\n" + indent(transformStr(root.transform()).strip(), " ") + '\n'
for item in root.childItems():
state += indent(itemState(item).strip(), " ") + '\n'
return state
def transformStr(t):
return ("[%0.2f %0.2f %0.2f]\n"*3) % (t.m11(), t.m12(), t.m13(), t.m21(), t.m22(), t.m23(), t.m31(), t.m32(), t.m33())
def indent(s, pfx):
return '\n'.join(pfx+line for line in s.split('\n'))
class TransposedImageItem(ImageItem):
# used for testing image axis order; we can test row-major and col-major using
# the same test images
def __init__(self, *args, **kwds):
self.__transpose = kwds.pop('transpose', False)
ImageItem.__init__(self, *args, **kwds)
def setImage(self, image=None, **kwds):
if image is not None and self.__transpose is True:
image = np.swapaxes(image, 0, 1)
return ImageItem.setImage(self, image, **kwds) | |
# -*- coding: utf-8 -*-
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Mindspore quantum simulator layer."""
import mindspore as ms
import mindspore.nn as nn
from mindspore.common.parameter import Parameter
from mindspore.common.initializer import initializer
from .operations import MQOps
from .operations import MQN2Ops
from .operations import MQAnsatzOnlyOps
from .operations import MQN2AnsatzOnlyOps
class MQLayer(nn.Cell):
"""
MindQuantum trainable layer. The parameters of ansatz circuit are trainable parameters.
Args:
expectation_with_grad (GradOpsWrapper): a grad ops that receive encoder data and
ansatz data and return the expectation value and gradient value of parameters
respect to expectation.
weight (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the
convolution kernel. It can be a Tensor, a string, an Initializer or a number.
When a string is specified, values from 'TruncatedNormal', 'Normal', 'Uniform',
'HeUniform' and 'XavierUniform' distributions as well as constant 'One' and 'Zero'
distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones' and
'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to
the values of Initializer for more details. Default: 'normal'.
Inputs:
- **enc_data** (Tensor) - Tensor of encoder data that you want to encode into quantum state.
Outputs:
Tensor, The expectation value of the hamiltonian.
Raises:
ValueError: If length of shape of `weight` is not equal to 1 or shape[0] of `weight`
is not equal to `weight_size`.
Supported Platforms:
``GPU``, ``CPU``
Examples:
>>> import numpy as np
>>> from mindquantum import Circuit, Hamiltonian, QubitOperator
>>> from mindquantum import Simulator, MQLayer
>>> import mindspore as ms
>>> ms.context.set_context(mode=ms.context.PYNATIVE_MODE, device_target="CPU")
>>> enc = Circuit().ry('a', 0)
>>> ans = Circuit().h(0).rx('b', 0)
>>> ham = Hamiltonian(QubitOperator('Z0'))
>>> sim = Simulator('projectq', 1)
>>> grad_ops = sim.get_expectation_with_grad(ham, enc+ans,
... encoder_params_name=['a'],
... ansatz_params_name=['b'])
>>> enc_data = ms.Tensor(np.array([[0.1]]))
>>> net = MQLayer(grad_ops)
>>> opti = ms.nn.Adam(net.trainable_params(), learning_rate=0.1)
>>> train_net = ms.nn.TrainOneStepCell(net, opti)
>>> for i in range(100):
... train_net(enc_data)
>>> net.weight.asnumpy()
array([-3.1424556], dtype=float32)
>>> net(enc_data)
Tensor(shape=[1, 1], dtype=Float32, value=
[[-9.98333767e-02]])
"""
def __init__(self, expectation_with_grad, weight='normal'):
super(MQLayer, self).__init__()
self.evolution = MQOps(expectation_with_grad)
weight_size = len(self.evolution.expectation_with_grad.ansatz_params_name)
if isinstance(weight, ms.Tensor):
if weight.ndim != 1 or weight.shape[0] != weight_size:
raise ValueError(f"Weight init shape error, required ({weight_size}, ), but get {weight.shape}.")
self.weight = Parameter(initializer(weight, weight_size, dtype=ms.float32), name='ansatz_weight')
def construct(self, x):
return self.evolution(x, self.weight)
class MQN2Layer(nn.Cell):
"""
MindQuantum trainable layer. The parameters of ansatz circuit are trainable parameters.
This layer will calculate the square of absolute value of expectation automatically.
Args:
expectation_with_grad (GradOpsWrapper): a grad ops that receive encoder data and
ansatz data and return the square of absolute value of expectation value and
gradient value of parameters respect to expectation.
weight (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the
convolution kernel. It can be a Tensor, a string, an Initializer or a number.
When a string is specified, values from 'TruncatedNormal', 'Normal', 'Uniform',
'HeUniform' and 'XavierUniform' distributions as well as constant 'One' and 'Zero'
distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones' and
'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to
the values of Initializer for more details. Default: 'normal'.
Inputs:
- **enc_data** (Tensor) - Tensor of encoder data that you want to encode into quantum state.
Outputs:
Tensor, The square of absolute value of expectation value of the hamiltonian.
Raises:
ValueError: If length of shape of `weight` is not equal to 1 and shape[0] of `weight`
is not equal to `weight_size`.
Supported Platforms:
``GPU``, ``CPU``
Examples:
>>> import numpy as np
>>> from mindquantum import Circuit, Hamiltonian, QubitOperator
>>> from mindquantum import Simulator, MQN2Layer
>>> import mindspore as ms
>>> ms.context.set_context(mode=ms.context.PYNATIVE_MODE, device_target="CPU")
>>> enc = Circuit().ry('a', 0)
>>> ans = Circuit().h(0).rx('b', 0)
>>> ham = Hamiltonian(QubitOperator('Z0'))
>>> sim = Simulator('projectq', 1)
>>> grad_ops = sim.get_expectation_with_grad(ham, enc+ans,
... encoder_params_name=['a'],
... ansatz_params_name=['b'])
>>> enc_data = ms.Tensor(np.array([[0.1]]))
>>> net = MQN2Layer(grad_ops)
>>> opti = ms.nn.Adam(net.trainable_params(), learning_rate=0.1)
>>> train_net = ms.nn.TrainOneStepCell(net, opti)
>>> for i in range(100):
... train_net(enc_data)
>>> net.weight.asnumpy()
array([-1.56476], dtype=float32)
>>> net(enc_data)
Tensor(shape=[1, 1], dtype=Float32, value=
[[ 3.63158676e-07]])
"""
def __init__(self, expectation_with_grad, weight='normal'):
super(MQN2Layer, self).__init__()
self.evolution = MQN2Ops(expectation_with_grad)
weight_size = len(self.evolution.expectation_with_grad.ansatz_params_name)
if isinstance(weight, ms.Tensor):
if weight.ndim != 1 or weight.shape[0] != weight_size:
raise ValueError(f"Weight init shape error, required ({weight_size}, ), but get f{weight.shape}.")
self.weight = Parameter(initializer(weight, weight_size, dtype=ms.float32), name='ansatz_weight')
def construct(self, x):
return self.evolution(x, self.weight)
class MQAnsatzOnlyLayer(nn.Cell):
"""
MindQuantum trainable layer. The parameters of ansatz circuit are trainable parameters.
Args:
expectation_with_grad (GradOpsWrapper): a grad ops that receive encoder data and
ansatz data and return the expectation value and gradient value of parameters
respect to expectation.
weight (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the
convolution kernel. It can be a Tensor, a string, an Initializer or a number.
When a string is specified, values from 'TruncatedNormal', 'Normal', 'Uniform',
'HeUniform' and 'XavierUniform' distributions as well as constant 'One' and 'Zero'
distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones' and
'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to
the values of Initializer for more details. Default: 'normal'.
Outputs:
Tensor, The expectation value of the hamiltonian.
Raises:
ValueError: If length of shape of `weight` is not equal to 1 and shape[0] of `weight`
is not equal to `weight_size`.
Supported Platforms:
``GPU``, ``CPU``
Examples:
>>> import numpy as np
>>> from mindquantum import Circuit, Hamiltonian, QubitOperator
>>> from mindquantum import Simulator, MQAnsatzOnlyLayer
>>> import mindspore as ms
>>> ms.context.set_context(mode=ms.context.PYNATIVE_MODE, device_target="CPU")
>>> circ = Circuit().ry('a', 0).h(0).rx('b', 0)
>>> ham = Hamiltonian(QubitOperator('Z0'))
>>> sim = Simulator('projectq', 1)
>>> grad_ops = sim.get_expectation_with_grad(ham, circ)
>>> net = MQAnsatzOnlyLayer(grad_ops)
>>> opti = ms.nn.Adam(net.trainable_params(), learning_rate=0.1)
>>> train_net = ms.nn.TrainOneStepCell(net, opti)
>>> for i in range(100):
... train_net()
>>> net.weight.asnumpy()
array([-1.5724511e+00, 1.3100551e-04], dtype=float32)
>>> net()
Tensor(shape=[1], dtype=Float32, value= [-9.99998629e-01])
"""
def __init__(self, expectation_with_grad, weight='normal'):
super(MQAnsatzOnlyLayer, self).__init__()
self.evolution = MQAnsatzOnlyOps(expectation_with_grad)
weight_size = len(self.evolution.expectation_with_grad.ansatz_params_name)
if isinstance(weight, ms.Tensor):
if weight.ndim != 1 or weight.shape[0] != weight_size:
raise ValueError(f"Weight init shape error, required ({weight_size}, ), but get f{weight.shape}.")
self.weight = Parameter(initializer(weight, weight_size, dtype=ms.float32), name='ansatz_weight')
def construct(self):
return self.evolution(self.weight)
class MQN2AnsatzOnlyLayer(nn.Cell):
"""
MindQuantum trainable layer. The parameters of ansatz circuit are trainable parameters.
This layer will calculate the square of absolute value of expectation automatically.
Args:
expectation_with_grad (GradOpsWrapper): a grad ops that receive encoder data and
ansatz data and return the expectation value and gradient value of parameters
respect to expectation.
weight (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the
convolution kernel. It can be a Tensor, a string, an Initializer or a number.
When a string is specified, values from 'TruncatedNormal', 'Normal', 'Uniform',
'HeUniform' and 'XavierUniform' distributions as well as constant 'One' and 'Zero'
distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones' and
'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to
the values of Initializer for more details. Default: 'normal'.
Inputs:
- **enc_data** (Tensor) - Tensor of encoder data that you want to encode into quantum state.
Outputs:
Tensor, The expectation value of the hamiltonian.
Raises:
ValueError: If length of shape of `weight` is not equal to 1 and shape[0] of `weight`
is not equal to `weight_size`.
Supported Platforms:
``GPU``, ``CPU``
Examples:
>>> import numpy as np
>>> from mindquantum import Circuit, Hamiltonian, QubitOperator
>>> from mindquantum import Simulator, MQN2AnsatzOnlyLayer
>>> import mindspore as ms
>>> ms.set_seed(43)
>>> ms.context.set_context(mode=ms.context.PYNATIVE_MODE, device_target="CPU")
>>> circ = Circuit().ry('a', 0).h(0).rx('b', 0)
>>> ham = Hamiltonian(QubitOperator('Z0'))
>>> sim = Simulator('projectq', 1)
>>> grad_ops = sim.get_expectation_with_grad(ham, circ)
>>> net = MQN2AnsatzOnlyLayer(grad_ops)
>>> opti = ms.nn.Adam(net.trainable_params(), learning_rate=0.1)
>>> train_net = ms.nn.TrainOneStepCell(net, opti)
>>> for i in range(100):
... train_net()
>>> net.weight.asnumpy()
array([ 0.05957536, -1.5686935 ], dtype=float32)
>>> net()
Tensor(shape=[1], dtype=Float32, value= [ 1.56753845e-08])
"""
def __init__(self, expectation_with_grad, weight='normal'):
super(MQN2AnsatzOnlyLayer, self).__init__()
self.evolution = MQN2AnsatzOnlyOps(expectation_with_grad)
weight_size = len(self.evolution.expectation_with_grad.ansatz_params_name)
if isinstance(weight, ms.Tensor):
if weight.ndim != 1 or weight.shape[0] != weight_size:
raise ValueError(f"Weight init shape error, required ({weight_size}, ), but get f{weight.shape}.")
self.weight = Parameter(initializer(weight, weight_size, dtype=ms.float32), name='ansatz_weight')
def construct(self):
return self.evolution(self.weight) | |
#!/usr/bin/env python
import random, math
import numpy as np
import game
from randomPlayer import RandomPlayer
import play
class OmniscientAdversary:
def __init__(self, nPlay):
self._rp = RandomPlayer()
self._rand = random.Random()
self._epsSame = 1e-6
self._nPlay = nPlay
def __str__(self):
return "%s nPlay = %d" % (self.__class__.__name__, self._nPlay)
def reconfigure(self, nn):
self._nn = nn
def setSeed(self, seed):
if seed is None:
self._rp.setSeed(None)
self._rand.seed(None)
else:
self._rp.setSeed(seed)
self._rand.seed(seed+1)
def move(self, ttt):
bestQ = -1e99
qs = []
vm = ttt.validMoves()
for m in vm:
q = self._moveQuality(ttt, m)
if q > bestQ:
bestQ = q
qs.append(q)
bestMoves = []
for iMove, q in enumerate(qs):
if abs(q-bestQ) < self._epsSame:
bestMoves.append(vm[iMove])
return random.choice(bestMoves)
def xx_move(self, ttt):
bestQ = -1e99
qs = []
vm = ttt.validMoves()
for m in vm:
q = self._moveQuality(ttt, m)
if q > bestQ:
bestQ = q
qs.append(q)
qs = np.array(qs)
pMove = qs - qs.min() + 1e-6
pMove /= pMove.sum()
return np.random.choice(vm, p=pMove)
def _moveQuality(self, ttt, m):
scores = []
if ttt.whoseTurn() == game.X:
pX = self._rp
pO = self._nn
else:
pX = self._nn
pO = self._rp
nPlay = self._nPlay
for _ in range(nPlay):
scores.append(play.simGame(pX, pO, ttt, m))
scores = np.array(scores)
return scores.mean()
if __name__ == "__main__":
from ticTacToe import TicTacToe
from nnPlayer import NNPlayerFactory
npf = NNPlayerFactory([18])
w = np.random.normal(size=(npf.numParams(),))
player = npf(w)
adv = OmniscientAdversary(nPlay = 10)
adv.reconfigure(player)
print (play.play(TicTacToe, player, adv, True)) | |
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import math
import os
import pickle
from typing import List
import mxnet as mx
import numpy as np
from mxnet import gluon, autograd
from bertsota.common.config import _Config
from bertsota.common.data import ParserVocabulary, DataLoader, ConllWord, ConllSentence
from bertsota.common.exponential_scheduler import ExponentialScheduler
from bertsota.common.utils import init_logger, mxnet_prefer_gpu, Progbar
from bertsota.parser.biaffine_parser import BiaffineParser, SharedRNNParser, SharedPrivateRNNParser, BlendParser, \
RefineParser, StairParser
from bertsota.parser.evaluate import evaluate_official_script
from bertsota.parser.evaluate.evaluate import evaluate_joint_official_script
class JointParser(object):
"""User interfaces for biaffine dependency parser. It wraps a biaffine model inside, provides training,
evaluating and parsing
"""
def __init__(self, cls_parser=SharedRNNParser):
super().__init__()
self._parser = None
self._vocab = []
self.cls_parser = cls_parser
def train(self, train_file: List[str], dev_file: List[str], save_dir, pretrained_embeddings_file=None,
min_occur_count=2,
lstm_layers=3, word_dims=100, tag_dims=100, dropout_emb=0.33, lstm_hiddens=400,
dropout_lstm_input=0.33, dropout_lstm_hidden=0.33, mlp_arc_size=500, mlp_rel_size=100,
dropout_mlp=0.33, learning_rate=1e-3, decay=.75, decay_steps=5000, beta_1=.9, beta_2=.9, epsilon=1e-12,
num_buckets_train=40,
num_buckets_valid=10, train_iters=50000, train_batch_size=5000, dev_batch_size=5000, validate_every=100,
save_after=5000, root='root', transfer=None, bert_path=None, debug=False):
"""Train a deep biaffine dependency parser
Parameters
----------
train_file : str
path to training set
dev_file : str
path to dev set
save_dir : str
a directory for saving model and related meta-data
pretrained_embeddings_file : str
pre-trained embeddings file, plain text format
min_occur_count : int
threshold of rare words, which will be replaced with UNKs,
lstm_layers : int
layers of lstm
word_dims : int
dimension of word embedding
tag_dims : int
dimension of tag embedding
dropout_emb : float
word dropout
lstm_hiddens : int
size of lstm hidden states
dropout_lstm_input : int
dropout on x in variational RNN
dropout_lstm_hidden : int
dropout on h in variational RNN
mlp_arc_size : int
output size of MLP for arc feature extraction
mlp_rel_size : int
output size of MLP for rel feature extraction
dropout_mlp : float
dropout on the output of LSTM
learning_rate : float
learning rate
decay : float
see ExponentialScheduler
decay_steps : int
see ExponentialScheduler
beta_1 : float
see ExponentialScheduler
beta_2 : float
see ExponentialScheduler
epsilon : float
see ExponentialScheduler
num_buckets_train : int
number of buckets for training data set
num_buckets_valid : int
number of buckets for dev data set
train_iters : int
training iterations
train_batch_size : int
training batch size
dev_batch_size : int
test batch size
validate_every : int
validate on dev set every such number of batches
save_after : int
skip saving model in early epochs
root : str
token for ROOT
debug : bool
debug mode
Returns
-------
DepParser
parser itself
"""
logger = init_logger(save_dir)
config = _Config(train_file, dev_file, None, save_dir, pretrained_embeddings_file, min_occur_count,
lstm_layers, word_dims, tag_dims, dropout_emb, lstm_hiddens, dropout_lstm_input,
dropout_lstm_hidden, mlp_arc_size, mlp_rel_size, dropout_mlp, learning_rate, decay,
decay_steps,
beta_1, beta_2, epsilon, num_buckets_train, num_buckets_valid, None, train_iters,
train_batch_size, 0, debug)
if transfer:
with open(os.path.join(transfer, 'vocab.pkl'), 'rb') as f:
self._vocab = pickle.load(f)
self._vocab.append(ParserVocabulary(train_file[-1],
pretrained_embeddings_file,
min_occur_count, root=root,
shared_vocab=self._vocab[0],
))
else:
for t, d in zip(train_file, dev_file):
self._vocab.append(ParserVocabulary(t,
pretrained_embeddings_file,
min_occur_count, root=root,
shared_vocab=None if len(self._vocab) == 0 else self._vocab[0],
))
with open(config.save_vocab_path, 'wb') as f:
pickle.dump(self._vocab, f)
for voc in self._vocab:
voc.log_info(logger)
with mx.Context(mxnet_prefer_gpu()):
data_loaders = [DataLoader(t, num_buckets_train, vocab, bert=bert_path[0] if bert_path else None) for
t, vocab
in zip(train_file, self._vocab)]
config.bert_dim = data_loaders[0].bert_dim
config.save()
self._parser = parser = self.cls_parser(self._vocab, word_dims, tag_dims,
dropout_emb,
lstm_layers,
lstm_hiddens, dropout_lstm_input,
dropout_lstm_hidden,
mlp_arc_size,
mlp_rel_size, dropout_mlp, bert=data_loaders[0].bert_dim,
debug=debug)
if transfer:
parser.transfer = True
parser.fill(transfer)
parser.initialize()
scheduler = ExponentialScheduler(learning_rate, decay, decay_steps)
optimizer = mx.optimizer.Adam(learning_rate, beta_1, beta_2, epsilon,
lr_scheduler=scheduler)
trainer = gluon.Trainer(parser.collect_params(), optimizer=optimizer)
global_step = 0
best_LF = 0.
batch_id = 0
epoch = 1
total_epoch = math.ceil(train_iters / validate_every)
logger.info("Epoch {} out of {}".format(epoch, total_epoch))
bar = Progbar(target=min(validate_every, train_iters))
gs = [dl.get_batches(batch_size=train_batch_size, shuffle=False) for dl in data_loaders]
while global_step < train_iters:
arcs_tasks = []
rels_tasks = []
bert_tasks = []
for g in gs:
words, bert, tags, arcs, rels = next(g, (None, None, None, None, None))
if words is None:
break
arcs_tasks.append(arcs)
rels_tasks.append(rels)
bert_tasks.append(bert)
if words is None:
gs = [dl.get_batches(batch_size=train_batch_size, shuffle=False) for dl in data_loaders]
continue
with autograd.record():
arc_accuracy, rel_accuracy, loss = parser.forward(words, bert, tags, arcs_tasks,
rels_tasks)
loss_value = loss.asscalar()
loss.backward()
trainer.step(train_batch_size)
batch_id += 1
try:
bar.update(batch_id,
exact=[("LR", rel_accuracy, 2),
("loss", loss_value)])
except OverflowError:
pass # sometimes loss can be 0 or infinity, crashes the bar
global_step += 1
if global_step % validate_every == 0:
batch_id = 0
UF, LF, speed = evaluate_joint_official_script(parser, self._vocab, num_buckets_valid,
dev_batch_size,
dev_file,
os.path.join(save_dir, 'dev.predict.conllu'),
bert=None if bert_path is None else bert_path[1])
score_str = ''
for dataset, lf in zip(dev_file, LF):
dataset = os.path.basename(dataset).replace('.conllu', '')
lf = lf * 100
score_str += '{}={:0.1f} '.format(dataset, lf)
if transfer:
LF = LF[-1] * 100
else:
LF = sum(LF) / len(LF) * 100
score_str += '{}={:0.1f} '.format('avg', LF)
logger.info(score_str + '%d sents/s' % (speed))
epoch += 1
bar = Progbar(target=min(validate_every, train_iters - global_step))
if global_step > save_after and LF > best_LF:
logger.info('- new best score!')
best_LF = LF
parser.save(config.save_model_path)
if global_step < train_iters:
logger.info("Epoch {} out of {}".format(epoch, total_epoch))
# When validate_every is too big
if not os.path.isfile(config.save_model_path) or best_LF == 0:
parser.save(config.save_model_path)
return self
def load(self, path, debug=False):
"""Load from disk
Parameters
----------
path : str
path to the directory which typically contains a config.pkl file and a model.bin file
Returns
-------
DepParser
parser itself
"""
config = _Config.load(os.path.join(path, 'config.pkl'))
if debug:
print(config)
with open(config.save_vocab_path, 'rb') as f:
self._vocab = pickle.load(f)
with mx.Context(mxnet_prefer_gpu()):
self._parser = self.cls_parser(self._vocab, config.word_dims, config.tag_dims, config.dropout_emb,
config.lstm_layers,
config.lstm_hiddens, config.dropout_lstm_input, config.dropout_lstm_hidden,
config.mlp_arc_size,
config.mlp_rel_size, config.dropout_mlp, bert=config.bert_dim, debug=True)
self._parser.load(config.save_model_path)
self._parser.rnn.pret_word_embs.initialize(ctx=mxnet_prefer_gpu())
return self
def dump(self, path):
self._parser.dump(path)
def fill(self, path):
self._parser.fill(path)
def evaluate(self, test_files: List[str], save_dir=None, logger=None, num_buckets_test=10, test_batch_size=5000,
bert_path=None, debug=False):
"""Run evaluation on test set
Parameters
----------
test_files : str
path to test set
save_dir : str
where to store intermediate results and log
logger : logging.logger
logger for printing results
num_buckets_test : int
number of clusters for sentences from test set
test_batch_size : int
batch size of test set
Returns
-------
tuple
UAS, LAS
"""
parser = self._parser
with mx.Context(mxnet_prefer_gpu()):
UF, LF, speed = evaluate_joint_official_script(parser, self._vocab, num_buckets_test, test_batch_size,
test_files, save_dir, bert=bert_path,
debug=debug)
score_str = 'Test\n'
for dataset, uf, lf in zip(test_files, UF, LF):
dataset = os.path.basename(dataset)
uf = uf * 100
lf = lf * 100
score_str += '{} UF={:0.1f} LF={:0.1f}\n'.format(dataset, uf, lf)
LF = sum(LF) / len(LF) * 100
if logger is None:
logger = init_logger(save_dir, 'test.log')
logger.info(score_str + '%d sents/s' % (speed))
return LF
def parse(self, sentence):
"""Parse raw sentence into ConllSentence
Parameters
----------
sentence : list
a list of (word, tag) tuples
Returns
-------
ConllSentence
ConllSentence object
"""
words = np.zeros((len(sentence) + 1, 1), np.int32)
tags = np.zeros((len(sentence) + 1, 1), np.int32)
words[0, 0] = ParserVocabulary.ROOT
tags[0, 0] = ParserVocabulary.ROOT
vocab = self._vocab
for i, (word, tag) in enumerate(sentence):
words[i + 1, 0], tags[i + 1, 0] = vocab.word2id(word.lower()), vocab.tag2id(tag)
with mx.Context(mxnet_prefer_gpu()):
outputs = self._parser.forward(words, tags)
words = []
for arc, rel, (word, tag) in zip(outputs[0][0], outputs[0][1], sentence):
words.append(ConllWord(id=len(words) + 1, form=word, pos=tag, head=arc, relation=vocab.id2rel(rel)))
return ConllSentence(words)
if __name__ == '__main__':
parser = JointParser(RefineParser)
save_dir = 'data/model/joint-sdp'
parser.train(train_file=['data/semeval15/en.id.dm.conllu',
'data/semeval15/en.id.pas.conllu',
'data/semeval15/en.id.psd.conllu'],
dev_file=['data/semeval15/en.id.dm.conllu',
'data/semeval15/en.id.pas.conllu',
'data/semeval15/en.id.psd.conllu'],
save_dir=save_dir,
pretrained_embeddings_file='data/embedding/glove/glove.6B.100d.debug.txt',
num_buckets_train=20,
train_iters=1000,
root='root',
debug=True)
# parser.load(save_dir)
# parser.evaluate(test_files=['data/semeval15/en.id.dm.conllu',
# 'data/semeval15/en.id.pas.conllu',
# 'data/semeval15/en.id.psd.conllu'], save_dir=save_dir,
# num_buckets_test=10, debug=True) | |
import os
import streamlit.components.v1 as components
import streamlit as st
import time
import numpy as np
import IPython.display as ipd
#ipd.Audio(audio, rate=16000)
from online_scd.model import SCDModel
from online_scd.streaming import StreamingDecoder
import timeit
import base64
import scipy.io.wavfile
from online_scd.utils import load_wav_file
import multiprocessing
#import playsound
import queue
import time
from typing import List
import numpy as np
import pydub
from pydub.playback import play
import streamlit as st
from streamlit_webrtc import (
ClientSettings,
WebRtcMode,
webrtc_streamer,
)
from pathlib import Path
import os, time, sys
# Create a _RELEASE constant. We'll set this to False while we're developing
# the component, and True when we're ready to package and distribute it.
# (This is, of course, optional - there are innumerable ways to manage your
# release process.)
_RELEASE = False
upload_counter = 0
# Declare a Streamlit component. `declare_component` returns a function
# that is used to create instances of the component. We're naming this
# function "_component_func", with an underscore prefix, because we don't want
# to expose it directly to users. Instead, we will create a custom wrapper
# function, below, that will serve as our component's public API.
# It's worth noting that this call to `declare_component` is the
# *only thing* you need to do to create the binding between Streamlit and
# your component frontend. Everything else we do in this file is simply a
# best practice.
if not _RELEASE:
_component_func = components.declare_component(
# We give the component a simple, descriptive name ("my_component"
# does not fit this bill, so please choose something better for your
# own component :)
"my_component",
# Pass `url` here to tell Streamlit that the component will be served
# by the local dev server that you run via `npm run start`.
# (This is useful while your component is in development.)
url="http://localhost:3001",
)
model = SCDModel.load_from_checkpoint("template/my_component/test/sample_model/checkpoints/epoch=102.ckpt")
#file_path = "template/my_component/frontend/src/audio"
file_name = "template/my_component/frontend/src/audio/3321821.wav"
build_dir = "template/my_component/frontend/src"
else:
# When we're distributing a production version of the component, we'll
# replace the `url` param with `path`, and point it to to the component's
# build directory:
parent_dir = os.path.dirname(os.path.abspath(__file__))
build_dir = os.path.join(parent_dir, "frontend/build")
_component_func = components.declare_component("my_component", path=build_dir)
model = SCDModel.load_from_checkpoint("template/my_component/test/sample_model/checkpoints/epoch=102.ckpt")
#file_path = "template/my_component/frontend/src/audio"
file_name = "template/my_component/frontend/src/audio/3321821.wav"
# Create a wrapper function for the component. This is an optional
# best practice - we could simply expose the component function returned by
# `declare_component` and call it done. The wrapper allows us to customize
# our component's API: we can pre-process its input args, post-process its
# output value, and add a docstring for users.
def my_component(name, audio, key=None):
"""Create a new instance of "my_component".
Parameters
----------
name: str
The name of the thing we're saying hello to. The component will display
the text "Hello, {name}!"
key: str or None
An optional key that uniquely identifies this component. If this is
None, and the component's arguments are changed, the component will
be re-mounted in the Streamlit frontend and lose its current state.
Returns
-------
int
The number of times the component's "Click Me" button has been clicked.
(This is the value passed to `Streamlit.setComponentValue` on the
frontend.)
"""
# Call through to our private component function. Arguments we pass here
# will be sent to the frontend, where they'll be available in an "args"
# dictionary.
#
# "default" is a special argument that specifies the initial return
# value of the component before the user has interacted with it.
component_value = _component_func(name=name, audio=audio, key=key, default=0)
# We could modify the value returned from the component if we wanted.
# There's no need to do this in our simple example - but it's an option.
return component_value
# Add some test code to play with the component while it's in development.
# During development, we can run this just as we would any other Streamlit
# app: `$ streamlit run my_component/__init__.py`
def stream_sample():
st.subheader("Streaming a sample .wav")
# Create a second instance of our component whose `name` arg will vary
# based on a text_input widget.
#
# We use the special "key" argument to assign a fixed identity to this
# component instance. By default, when a component's arguments change,
# it is considered a new instance and will be re-mounted on the frontend
# and lose its current state. In this case, we want to vary the component's
# "name" argument without having it get recreated.
sound = pydub.AudioSegment.from_wav(file_name)
sound = sound.set_channels(1).set_frame_rate(16000)
audio = np.array(sound.get_array_of_samples())/32768
#enc=base64.b64encode(open(file_name, "rb").read())
last_rows = np.zeros((1,1))
chart = st.line_chart(last_rows)
frame_number = 0
#p = multiprocessing.Process(target=playsound.playsound, args=(file_name,))
#play_obj = wave_obj.play()
start_0 = timeit.default_timer()
was_clicked = my_component(name="test", audio = "sample", key="foo")
if was_clicked:
for i in range(0, len(audio), 1000):
# while (num_clicks%2 == 0):
# time.sleep(0.1)
start = timeit.default_timer()
for probs in st.session_state.model.process_audio(audio[i: i+1000]):
new_rows = np.zeros((1, 1))
new_rows[0,0] = probs[1].detach().numpy()
chart.add_rows(new_rows)
frame_number += 1
end = timeit.default_timer()
# text_output.markdown(f"{end-start_0} seconds")
time.sleep(max(0,1/16-end+start))
# st.button("Re-run")
def stream_mic():
st.subheader("Streaming from microphone")
webrtc_ctx = webrtc_streamer(
key="speech-to-text",
mode=WebRtcMode.SENDONLY,
audio_receiver_size=1024,
client_settings=ClientSettings(
rtc_configuration={
"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]
},
media_stream_constraints={"video": False, "audio": True},
),
)
status_indicator = st.empty()
if not webrtc_ctx.state.playing:
return
status_indicator.write("Loading...")
text_output = st.empty()
stream = None
last_rows = np.zeros((1,1))
chart = st.line_chart(last_rows)
streaming_decoder = StreamingDecoder(model)
frame_number = 0
status_indicator.write("Model loaded.")
ct=0
while True:
if webrtc_ctx.audio_receiver:
sound_chunk = pydub.AudioSegment.empty()
try:
audio_frames = webrtc_ctx.audio_receiver.get_frames(timeout=1)
except queue.Empty:
time.sleep(0.1)
status_indicator.write("No frame arrived.")
continue
status_indicator.write("Running. Say something!")
for audio_frame in audio_frames:
sound = pydub.AudioSegment(
data=audio_frame.to_ndarray().tobytes(),
sample_width=audio_frame.format.bytes,
frame_rate=audio_frame.sample_rate,
channels=len(audio_frame.layout.channels),
)
sound_chunk += sound
if len(sound_chunk) > 0:
sound_chunk = sound_chunk.set_channels(1).set_frame_rate(
16000
)
buffer = np.array(sound_chunk.get_array_of_samples())
text_output.markdown(f"{ct/16000} seconds")
buffer = np.array(buffer)/32768
ct+=len(buffer)
#text_output.markdown(f"burh{ct}")
for i in range(0, len(buffer), 1000):
for probs in st.session_state.model.process_audio(buffer[i: i+1000]):
new_rows = np.zeros((1, 1))
new_rows[0,0] = probs[1].detach().numpy()
chart.add_rows(new_rows)
frame_number += 1
else:
status_indicator.write("AudioReciver is not set. Abort.")
break
file_changed = False
def upload_file():
global file_changed
file_changed = True
if 'upload' not in st.session_state:
st.session_state['upload'] = 'value'
if 'model' not in st.session_state:
st.session_state['model'] = StreamingDecoder(model)
def stream_upload():
#global upload_counter
st.subheader("Streaming an upload")
# Create a second instance of our component whose `name` arg will vary
# based on a text_input widget.
#
# We use the special "key" argument to assign a fixed identity to this
# component instance. By default, when a component's arguments change,
# it is considered a new instance and will be re-mounted on the frontend
# and lose its current state. In this case, we want to vary the component's
# "name" argument without having it get recreated.
# name_input = st.text_input("Enter a name", value="Streamlit")
uploaded_file = st.file_uploader("Choose a file", on_change=upload_file())
#if uploaded_file is not None
#was_clicked = my_component(name="test",audio = file_name, key="foo")
if uploaded_file is not None:
if (uploaded_file.name != st.session_state['upload']):
st.session_state['upload'] = uploaded_file.name
#upload_counter+=1
path = build_dir + "/audio"
current_uploads = []
for f in os.listdir(path):
current_uploads.append(f.split(".")[0])
i = 0
while True:
if str(i) not in current_uploads:
new_name = str(i)
break
i+=1
sound = pydub.AudioSegment.from_wav(uploaded_file)
sound = sound.set_channels(1).set_frame_rate(16000)
#only consider first minute of the file for uploads
sound = sound[:60*1000]
audio = np.array(sound.get_array_of_samples())/32768
file_name = new_name + ".wav"
save_location = build_dir +"/audio/"+ file_name
sound = (sound[:2000]-1000) + sound
sound.export(save_location, format="wav")
st.session_state['file_name'] = file_name
st.session_state['audio'] = audio
#p = multiprocessing.Process(target=playsound.playsound, args=(file_name,))
#play_obj = wave_obj.play()
file_name = st.session_state['file_name']
start_0 = timeit.default_timer()
was_clicked = my_component(name="test2",audio = file_name)
if was_clicked:
#streaming_decoder = StreamingDecoder(model)
frame_number = 0
last_rows = np.zeros((1,1))
chart = st.line_chart(last_rows)
#audio = st.session_state['audio']
for i in range(0, len(st.session_state.audio), 1000):
# while (num_clicks%2 == 0):
# time.sleep(0.1)
start = timeit.default_timer()
for probs in st.session_state.model.process_audio(st.session_state.audio[i: i+1000]):
new_rows = np.zeros((1, 1))
new_rows[0,0] = probs[1].detach().numpy()
chart.add_rows(new_rows)
frame_number += 1
end = timeit.default_timer()
# text_output.markdown(f"{end-start_0} seconds")
time.sleep(max(0,1/16-end+start))
# st.button("Re-run")
#os.remove(save_location)
def main():
st.header("Demo of Collar-Aware Training for Speaker Change Detection")
st.markdown("The model uses a multi-layer LSTM on top of pre-trained speech embeddings, and a final softmax layer. The model uses a step size of 100 ms (i.e., it outputs 10 decisions per second). The model is implemented in Pytorch while this demo was built using Streamlit.")
st.markdown("The model is trained using a special version of cross-entropy training which tolerates small errors in the hypothesized speaker change timestamps. Due to this, the softmax outputs of the trained model are very peaky and do not require any local maxima tracking for extracting the final speaker turn points. This makes the model suitable for online appications.")
st.markdown("This demo visualizes the output of the model for an audio source. The audio source can be either a sample file, a microphone or an uploaded file, first 60 seconds of which is used.")
option_1 = 'A sample file'
option_2 = 'A microphone'
option_3 = 'An uploaded .wav file'
option = st.selectbox(
'Which audio source would you like to use?',
(option_1,option_2,option_3), 0)
if option == option_1:
#file_name = "3321821.wav"
stream_sample()
elif option == option_2:
stream_mic()
elif option == option_3:
stream_upload()
path = build_dir + "/audio"
now = time.time()
for f in os.listdir(path):
if f!="3321821.wav" and f[-3:] == "wav":
f = os.path.join(path, f)
if os.stat(f).st_mtime < now - 3600:
if os.path.isfile(f):
os.remove(f)
if __name__ == "__main__":
main() | |
'''
03_WindyGridWorld_nStepSARSA_OffPolicy.py : n-step off-policy SARSA applied to Windy Grid World problem (Example 6.5)
Cem Karaoguz, 2020
MIT License
'''
import numpy as np
import pylab as pl
from IRL.environments.Gridworlds import StochasticGridWorld
from IRL.agents.TemporalDifferenceLearning import nStepOffPolicySARSA
from IRL.utils.Policies import StochasticPolicy
from IRL.utils.Helpers import runSimulation
def runExperiment(nEpisodes, env, agent, policy_behaviour, doUpdateBehaviourPolicy):
reward_sums = []
episodesvstimesteps = []
timesteps = 0
for e in range(nEpisodes):
if(e%10==0):
print("Episode : ", e)
state = env.reset()
action = policy_behaviour.sampleAction(state)
done = False
experiences = [{}]
reward_sums.append(0.0)
while not done:
timesteps += 1
experiences[-1]['state'] = state
experiences[-1]['action'] = action
experiences[-1]['done'] = done
new_state, reward, done = env.step(action)
#print("State:", state, "Action: ", env.actionMapping[action][1], "Reward: ", reward, "New state:", new_state, "done:", done)
new_action = policy_behaviour.sampleAction(new_state)
xp = {}
xp['state'] = new_state
xp['reward'] = reward
xp['done'] = done
xp['action'] = new_action
experiences.append(xp)
agent.update(experiences[-2:], policy_behaviour)
state = new_state
action = new_action
episodesvstimesteps.append([e,timesteps])
reward_sums[-1] += reward
if(doUpdateBehaviourPolicy):
# update behaviour policy to be e-soft version of the target policy
for idx_state in range(env.nStates):
policy_behaviour.update(idx_state, agent.actionValueTable[idx_state,:])
return reward_sums, np.array(episodesvstimesteps)
if __name__=="__main__":
exerciseID = 0
nExperiments = 1
nEpisodes = 800
# Environment
sizeX = 10
sizeY = 7
defaultReward = -1.0
startStates = [(0,3)]
terminalStates = [(7,3)]
if exerciseID==0:
# Example 6.5
actionMapping = {0:(np.array([0,-1]), "N"), 1:(np.array([0,1]), "S"), 2:(np.array([1,0]), "E"), 3:(np.array([-1,0]), "W")}
sigmaY_actionNoise = 0
elif exerciseID==1:
# Exercise 6.9 part 1
actionMapping = {0:(np.array([0,-1]), "N"), 1:(np.array([0,1]), "S"), 2:(np.array([1,0]), "E"), 3:(np.array([-1,0]), "W"),
4:(np.array([1,-1]), "NE"), 5:(np.array([1,1]), "SE"), 6:(np.array([-1,-1]), "NW"), 7:(np.array([-1,1]), "SW")}
# Example 6.5 and Exercise 6.9
sigmaY_actionNoise = 0
# Exercise 6.10
sigmaY_actionNoise = 1
else:
# Exercise 6.9 part 2
actionMapping = {0:(np.array([0,-1]), "N"), 1:(np.array([0,1]), "S"), 2:(np.array([1,0]), "E"), 3:(np.array([-1,0]), "W"),
4:(np.array([1,-1]), "NE"), 5:(np.array([1,1]), "SE"), 6:(np.array([-1,-1]), "NW"), 7:(np.array([-1,1]), "SW"), 8:(np.array([0,0]), "0")}
sigmaY_actionNoise = 0
actionNoiseParams = {}
aux = [(x,y) for x in range(3,6) for y in range(0,7)]
for pos in aux:
actionNoiseParams[pos] = [0,-1,0,sigmaY_actionNoise]
aux = [(x,y) for x in range(6,8) for y in range(0,7)]
for pos in aux:
actionNoiseParams[pos] = [0,-2,0,sigmaY_actionNoise]
aux = [(8,y) for y in range(0,7)]
for pos in aux:
actionNoiseParams[pos] = [0,-1,0,sigmaY_actionNoise]
# Agent
alpha_nStepOPSARSA_1 = 0.1
gamma_nStepOPSARSA_1 = 1.0
n_nStepOPSARSA_1 = 1
alpha_nStepOPSARSA_2 = 0.1
gamma_nStepOPSARSA_2 = 1.0
n_nStepOPSARSA_2 = 5
alpha_nStepOPSARSA_3 = 0.05
gamma_nStepOPSARSA_3 = 1.0
n_nStepOPSARSA_3 = 10
# Policy
doUpdateBehaviourPolicy = True
epsilon_behaviourPolicy = 0.1
env = StochasticGridWorld(sizeX, sizeY, actionNoiseParams=actionNoiseParams, startStates=startStates,
defaultReward=defaultReward, terminalStates=terminalStates, actionMapping=actionMapping)
env.printEnv()
avg_reward_sums_nStepOPSARSA_1 = np.zeros(nEpisodes)
avg_reward_sums_nStepOPSARSA_2 = np.zeros(nEpisodes)
avg_reward_sums_nStepOPSARSA_3 = np.zeros(nEpisodes)
for idx_experiment in range(1, nExperiments+1):
print("Experiment : ", idx_experiment)
agent_nStepOPSARSA_1 = nStepOffPolicySARSA(env.nStates, env.nActions, alpha_nStepOPSARSA_1, gamma_nStepOPSARSA_1, n_nStepOPSARSA_1)
agent_nStepOPSARSA_2 = nStepOffPolicySARSA(env.nStates, env.nActions, alpha_nStepOPSARSA_2, gamma_nStepOPSARSA_2, n_nStepOPSARSA_2)
agent_nStepOPSARSA_3 = nStepOffPolicySARSA(env.nStates, env.nActions, alpha_nStepOPSARSA_3, gamma_nStepOPSARSA_3, n_nStepOPSARSA_3)
policy_behaviour = StochasticPolicy(env.nStates, env.nActions, policyUpdateMethod="esoft", epsilon=epsilon_behaviourPolicy)
reward_sums_nStepOPSARSA_1, evst_nStepOPSARSA_1 = runExperiment(nEpisodes, env, agent_nStepOPSARSA_1, policy_behaviour, doUpdateBehaviourPolicy)
policy_behaviour = StochasticPolicy(env.nStates, env.nActions, policyUpdateMethod="esoft", epsilon=epsilon_behaviourPolicy)
reward_sums_nStepOPSARSA_2, evst_nStepOPSARSA_2 = runExperiment(nEpisodes, env, agent_nStepOPSARSA_2, policy_behaviour, doUpdateBehaviourPolicy)
policy_behaviour = StochasticPolicy(env.nStates, env.nActions, policyUpdateMethod="esoft", epsilon=epsilon_behaviourPolicy)
reward_sums_nStepOPSARSA_3, evst_nStepOPSARSA_3 = runExperiment(nEpisodes, env, agent_nStepOPSARSA_3, policy_behaviour, doUpdateBehaviourPolicy)
avg_reward_sums_nStepOPSARSA_1 = avg_reward_sums_nStepOPSARSA_1 + (1.0/idx_experiment)*(reward_sums_nStepOPSARSA_1 - avg_reward_sums_nStepOPSARSA_1)
avg_reward_sums_nStepOPSARSA_2 = avg_reward_sums_nStepOPSARSA_2 + (1.0/idx_experiment)*(reward_sums_nStepOPSARSA_2 - avg_reward_sums_nStepOPSARSA_2)
avg_reward_sums_nStepOPSARSA_3 = avg_reward_sums_nStepOPSARSA_3 + (1.0/idx_experiment)*(reward_sums_nStepOPSARSA_3 - avg_reward_sums_nStepOPSARSA_3)
pl.figure()
pl.plot(evst_nStepOPSARSA_1[:,1],evst_nStepOPSARSA_1[:,0], '-r', label=str(n_nStepOPSARSA_1)+' Step SARSA')
pl.plot(evst_nStepOPSARSA_2[:,1],evst_nStepOPSARSA_2[:,0], '-g', label=str(n_nStepOPSARSA_2)+' Step SARSA')
pl.plot(evst_nStepOPSARSA_3[:,1],evst_nStepOPSARSA_3[:,0], '-k', label=str(n_nStepOPSARSA_3)+' Step SARSA')
pl.xlabel("Time steps")
pl.ylabel("Episodes")
pl.legend()
pl.figure()
pl.plot(avg_reward_sums_nStepOPSARSA_1, '-r', label=str(n_nStepOPSARSA_1)+' Step SARSA')
pl.plot(avg_reward_sums_nStepOPSARSA_2, '-g', label=str(n_nStepOPSARSA_2)+' Step SARSA')
pl.plot(avg_reward_sums_nStepOPSARSA_3, '-k', label=str(n_nStepOPSARSA_3)+' Step SARSA')
pl.xlabel("Episodes")
pl.ylabel("Sum of reward during episodes")
pl.legend()
pl.show()
agents = [agent_nStepOPSARSA_1, agent_nStepOPSARSA_2, agent_nStepOPSARSA_3]
for agent in agents:
print("Policy for :", agent.getName())
env.printEnv(agent)
for agent in agents:
input("Press any key to simulate agent "+agent.getName())
agentHistory = runSimulation(env, agent)
print("Simulation:", agent.getName())
env.render(agentHistory) | |
from competition_and_mutation import Competition, MoranStyleComp, normal_fitness_dist, uniform_fitness_dist
from colourscales import get_colourscale_with_random_mutation_colour
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
def example1():
# Run a single simulation of algorithm 1
# If all parameters kept the same here, will be the first run from the multiple runs example
np.random.seed(0) # Change the seed or comment this line to see a different result
WT_init = 9900 # Initial population of wildtype cells
A_init = 100 # Initial population of cells with mutation A
A_fitness = 1.05 # Initial fitness of cells with mutation A. Wildtype has fitness 1.
mutation_rate = 0.3 # Chance of a single mutation per generation.
fitness_dist = normal_fitness_dist(var=0.1, mean=1) # The distribution for fitness of new mutations
# fitness_dist = uniform_fitness_dist(0.8, 1.2) # Uncomment to use a uniform distribution instead
num_generations = 150 # Change to vary the length of the simulation
combine_mutations = True # Change to False to have new mutation fitness replace old instead of combining
# Add a file path here to save the plot created. Use None to have plot pop up in window instead
plot_file = 'example1_plot.pdf'
c = Competition(WT_init=WT_init, A_init=A_init, A_fitness=A_fitness, mutation_rate=mutation_rate,
fitness_distribution=fitness_dist, num_generations=num_generations,
combine_mutations=combine_mutations, plot_file=plot_file,
figsize=(10, 8))
c.run_sim()
max_fitness = c.clones_array[:, c.growth_idx].max()
cs = get_colourscale_with_random_mutation_colour(max_fitness)
c.colourscales = cs
c.plot()
def example2():
# Run a single simulation of algorithm 2
# This can be significantly slower than example1 above
np.random.seed(2) # Change the seed or comment this line to see a different result
WT_init = 9900 # Initial population of wildtype cells
A_init = 100 # Initial population of cells with mutation A
A_fitness = 1.1 # Initial fitness of cells with mutation A. Wildtype has fitness 1.
mutation_rate = 0.00025 # Chance of adding a mutation.
fitness_dist = normal_fitness_dist(var=0.1, mean=1) # The distribution for fitness of new mutations
# fitness_dist = uniform_fitness_dist(0.8, 1.2) # Uncomment to use a uniform distribution instead
num_births = 1000000 # Change to vary the length of the simulation
sampling = 500 # Change to vary how often the samples for the plot are taken. Small=more memory, Large=courser plot
combine_mutations = True # Change to False to have new mutation fitness replace old instead of combining
plot_file = 'example2_plot.pdf' # Add a file path here to save a pdf of the plot created
c = MoranStyleComp(WT_init=WT_init, A_init=A_init, A_fitness=A_fitness, mutation_rate=mutation_rate,
fitness_distribution=fitness_dist, num_births=num_births, sampling=sampling,
combine_mutations=combine_mutations, plot_file=plot_file,
figsize=(10, 8))
c.run_sim()
max_fitness = c.clones_array[:, c.growth_idx].max()
cs = get_colourscale_with_random_mutation_colour(max_fitness)
c.colourscales = cs
c.plot()
def multiple_runs_example():
# Plotting multiple results on the same plot, the proportion of descendants of the labelled mutant population
# This will output the plots to pdf files
np.random.seed(0) # Change the seed or comment this line to see a different result
num_repeats = 12
WT_init = 9900 # Initial population of wildtype cells
A_init = 100 # Initial population of cells with mutation A
A_fitness = 1.05 # Initial fitness of cells with mutation A. Wildtype has fitness 1.
mutation_rate = 0.3 # Chance of a single mutation per generation.
fitness_distribution = normal_fitness_dist(var=0.1, mean=1) # The distribution for fitness of new mutations
num_generations = 150 # Change to vary the length of the simulation
combine_mutations = True # Change to False to have new mutation fitness replace old instead of combining
x = range(num_generations + 1)
fig1, ax1 = plt.subplots()
for n in range(num_repeats):
c = Competition(WT_init=WT_init, A_init=A_init, A_fitness=A_fitness, mutation_rate=mutation_rate,
fitness_distribution=fitness_distribution, num_generations=num_generations,
combine_mutations=combine_mutations, figsize=(10, 8),
plot_file='single_plot{0}.pdf'.format(n))
c.run_sim()
max_fitness = c.clones_array[:, c.growth_idx].max()
cs = get_colourscale_with_random_mutation_colour(max_fitness)
c.colourscales = cs
c.plot()
A_proportion = c.proportional_populations[np.where(c.clones_array[:, c.type_idx] == 1)].sum(axis=0)
if n == 0:
ax1.plot(x, A_proportion, label=n, c='g', linewidth=3, zorder=2)
else:
ax1.plot(x, A_proportion, label=n, c=cm.Greys((n+2)/(num_repeats+5)), zorder=1)
ax1.set_ylim([0, 1])
ax1.set_xlim([0, num_generations])
ax1.set_yticklabels([0, 20, 40, 60, 80, 100])
ax1.set_xticks([])
legend = ax1.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
fig1.savefig('multiplot.pdf', bbox_extra_artists=(legend,), bbox_inches='tight')
if __name__ == '__main__':
example1()
# example2()
# multiple_runs_example() | |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import unittest
import numpy as np
# pyre-fixme[21]: Could not find module `pytest`.
import pytest
# pyre-fixme[21]: Could not find `pyspark`.
from pyspark.sql.functions import asc
# pyre-fixme[21]: Could not find `workflow`.
from reagent.test.workflow.reagent_sql_test_base import ReagentSQLTestBase
# pyre-fixme[21]: Could not find module `reagent.test.workflow.test_data.ex_mdps`.
from reagent.test.workflow.test_data.ex_mdps import generate_discrete_mdp_pandas_df
from reagent.workflow.data_fetcher import query_data
from reagent.workflow.types import Dataset, TableSpec
logger = logging.getLogger(__name__)
def generate_data_discrete(sqlCtx, multi_steps: bool, table_name: str):
# pyre-fixme[16]: Module `test` has no attribute `workflow`.
df, _ = generate_discrete_mdp_pandas_df(
multi_steps=multi_steps, use_seq_num_diff_as_time_diff=False
)
df = sqlCtx.createDataFrame(df)
logger.info("Created dataframe")
df.show()
df.createOrReplaceTempView(table_name)
# pyre-fixme[11]: Annotation `ReagentSQLTestBase` is not defined as a type.
class TestQueryData(ReagentSQLTestBase):
def setUp(self):
super().setUp()
logging.getLogger(__name__).setLevel(logging.INFO)
self.table_name = "test_table"
logger.info(f"Table name is {self.table_name}")
def generate_data(self, multi_steps=False):
generate_data_discrete(
self.sqlCtx, multi_steps=multi_steps, table_name=self.table_name
)
def _discrete_read_data(
self, custom_reward_expression=None, gamma=None, multi_steps=None
):
ts = TableSpec(table_name=self.table_name)
dataset: Dataset = query_data(
input_table_spec=ts,
discrete_action=True,
actions=["L", "R", "U", "D"],
custom_reward_expression=custom_reward_expression,
multi_steps=multi_steps,
gamma=gamma,
)
df = self.sqlCtx.read.parquet(dataset.parquet_url)
df = df.orderBy(asc("sequence_number"))
logger.info("Read parquet dataframe: ")
df.show()
return df
@pytest.mark.serial
def test_query_data(self):
# single step
self.generate_data()
df = self._discrete_read_data()
df = df.toPandas()
self.verify_discrete_single_step_except_rewards(df)
self.assertEq(df["reward"], np.array([0.0, 1.0, 4.0, 5.0], dtype="float32"))
logger.info("discrete single-step seems fine")
# single step with reward := reward^3 + 10
df = self._discrete_read_data(custom_reward_expression="POWER(reward, 3) + 10")
df = df.toPandas()
self.verify_discrete_single_step_except_rewards(df)
self.assertEq(
df["reward"], np.array([10.0, 11.0, 74.0, 135.0], dtype="float32")
)
logger.info("discrete single-step custom reward seems fine")
# multi-step
gamma = 0.9
self.generate_data(multi_steps=True)
df = self._discrete_read_data(multi_steps=2, gamma=gamma)
df = df.toPandas()
self.verify_discrete_multi_steps_except_rewards(df)
self.assertAllClose(
df["reward"],
np.array(
[gamma * 1, 1 * 1.0 + gamma * 4, 1 * 4.0 + gamma * 5, 1 * 5.0],
dtype="float32",
),
)
logger.info("discrete multi-step seems fine.")
def verify_discrete_single_step_except_rewards(self, df):
""" expects a pandas dataframe """
self.assertEq(df["sequence_number"], np.array([1, 2, 3, 4], dtype="int32"))
state_features_presence = np.array(
[
[True, False, False, False, False],
[False, True, False, False, False],
[False, False, True, False, False],
[False, False, False, True, False],
],
dtype="bool",
)
self.assertEq(df["state_features_presence"], state_features_presence)
state_features = np.array(
[
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
],
dtype="float32",
)
self.assertEqWithPresence(
df["state_features"], state_features_presence, state_features
)
self.assertEq(df["action"], np.array([0, 1, 2, 3]))
self.assertEq(
df["action_probability"], np.array([0.3, 0.4, 0.5, 0.6], dtype="float32")
)
self.assertEq(df["not_terminal"], np.array([1, 1, 1, 0], dtype="bool"))
next_state_features_presence = np.array(
[
[False, True, False, False, False],
[False, False, True, False, False],
[False, False, False, True, False],
[False, False, False, False, True],
],
dtype="bool",
)
self.assertEq(df["next_state_features_presence"], next_state_features_presence)
next_state_features = np.array(
[
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
],
dtype="float32",
)
self.assertEqWithPresence(
df["next_state_features"], next_state_features_presence, next_state_features
)
self.assertEq(df["next_action"], np.array([1, 2, 3, 4]))
self.assertEq(df["time_diff"], np.array([1, 3, 1, 1]))
self.assertEq(df["step"], np.array([1, 1, 1, 1]))
self.assertEq(
df["possible_actions_mask"],
np.array([[1, 1, 0, 0], [0, 1, 1, 0], [0, 0, 1, 1], [0, 0, 0, 1]]),
)
self.assertEq(
df["possible_next_actions_mask"],
np.array([[0, 1, 1, 0], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]),
)
def verify_discrete_multi_steps_except_rewards(self, df):
self.assertEq(df["sequence_number"], np.array([1, 2, 3, 4], dtype="int32"))
state_features_presence = np.array(
[
[True, False, False, False, False],
[False, True, False, False, False],
[False, False, True, False, False],
[False, False, False, True, False],
],
dtype="bool",
)
self.assertEq(df["state_features_presence"], state_features_presence)
state_features = np.array(
[
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
],
dtype="float32",
)
self.assertEqWithPresence(
df["state_features"], state_features_presence, state_features
)
self.assertEq(df["action"], np.array([0, 1, 2, 3]))
self.assertEq(
df["action_probability"], np.array([0.3, 0.4, 0.5, 0.6], dtype="float32")
)
self.assertEq(df["not_terminal"], np.array([1, 1, 0, 0], dtype="bool"))
next_state_features_presence = np.array(
[
[False, False, True, False, False],
[False, False, False, True, False],
[False, False, False, False, True],
[False, False, False, False, True],
],
dtype="bool",
)
self.assertEq(df["next_state_features_presence"], next_state_features_presence)
next_state_features = np.array(
[
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
],
dtype="float32",
)
self.assertEqWithPresence(
df["next_state_features"], next_state_features_presence, next_state_features
)
self.assertEq(df["next_action"], np.array([2, 3, 4, 4]))
self.assertEq(df["time_diff"], np.array([1, 1, 1, 1]))
self.assertEq(df["step"], np.array([2, 2, 2, 1]))
self.assertEq(
df["possible_actions_mask"],
np.array([[1, 1, 0, 0], [0, 1, 1, 0], [0, 0, 1, 1], [0, 0, 0, 1]]),
)
self.assertEq(
df["possible_next_actions_mask"],
np.array([[0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]]),
)
if __name__ == "__main__":
unittest.main() | |
from __future__ import division, print_function, absolute_import
import time
import numpy as np
import tensorflow as tf
from scipy.stats.mstats import gmean
from tefla.da import tta
from tefla.da.iterator import BatchIterator
from tefla.utils import util
class PredictSessionMixin(object):
def __init__(self, weights_from):
self.weights_from = weights_from
def predict(self, X):
graph = tf.Graph()
with graph.as_default():
self._build_model()
saver = tf.train.Saver()
with tf.Session() as sess:
print('Loading weights from: %s' % self.weights_from)
util.load_variables(sess, saver, self.weights_from)
return self._real_predict(X, sess)
def _real_predict(self, X, sess):
pass
def _build_model(self):
pass
class OneCropPredictor(PredictSessionMixin):
def __init__(self, model, cnf, weights_from, prediction_iterator, output_layer='predictions'):
self.model = model
self.output_layer = output_layer
self.cnf = cnf
self.prediction_iterator = prediction_iterator
super(OneCropPredictor, self).__init__(weights_from)
def _build_model(self):
end_points_predict = self.model(is_training=False, reuse=None)
self.inputs = end_points_predict['inputs']
self.predictions = end_points_predict[self.output_layer]
def _real_predict(self, X, sess, xform=None, crop_bbox=None):
tic = time.time()
print('Making %d predictions' % len(X))
data_predictions = []
for X, y in self.prediction_iterator(X, xform=xform, crop_bbox=crop_bbox):
predictions_e = sess.run(self.predictions, feed_dict={self.inputs: X})
data_predictions.append(predictions_e)
data_predictions = np.vstack(data_predictions)
print('took %6.2f seconds' % (time.time() - tic))
return data_predictions
class InputFeaturesPredictor(PredictSessionMixin):
def __init__(self, model, cnf, weights_from, output_layer='predictions'):
self.model = model
self.output_layer = output_layer
self.cnf = cnf
self.prediction_iterator = BatchIterator(cnf['batch_size_test'], False)
super(InputFeaturesPredictor, self).__init__(weights_from)
def _build_model(self):
end_points_predict = self.model(is_training=False, reuse=None)
self.inputs = end_points_predict['inputs']
self.predictions = end_points_predict[self.output_layer]
def _real_predict(self, X, sess):
tic = time.time()
print('Making %d predictions' % len(X))
data_predictions = []
for X, y in self.prediction_iterator(X):
predictions_e = sess.run(self.predictions, feed_dict={self.inputs: X})
data_predictions.append(predictions_e)
data_predictions = np.vstack(data_predictions)
print('took %6.2f seconds' % (time.time() - tic))
return data_predictions
class QuasiCropPredictor(PredictSessionMixin):
def __init__(self, model, cnf, weights_from, prediction_iterator, number_of_transforms, output_layer='predictions'):
self.number_of_transforms = number_of_transforms
self.cnf = cnf
self.prediction_iterator = prediction_iterator
self.predictor = OneCropPredictor(model, cnf, weights_from, prediction_iterator, output_layer)
super(QuasiCropPredictor, self).__init__(weights_from)
def _build_model(self):
self.predictor._build_model()
def _real_predict(self, X, sess):
standardizer = self.prediction_iterator.standardizer
da_params = standardizer.da_processing_params()
util.veryify_args(da_params, ['sigma'], 'QuasiPredictor.standardizer does unknown da with param(s):')
color_sigma = da_params.get('sigma', 0.0)
tfs, color_vecs = tta.build_quasirandom_transforms(self.number_of_transforms, color_sigma=color_sigma,
**self.cnf['aug_params'])
multiple_predictions = []
for i, (xform, color_vec) in enumerate(zip(tfs, color_vecs), start=1):
print('Quasi-random tta iteration: %d' % i)
standardizer.set_tta_args(color_vec=color_vec)
predictions = self.predictor._real_predict(X, sess, xform=xform)
multiple_predictions.append(predictions)
return np.mean(multiple_predictions, axis=0)
class TenCropPredictor(PredictSessionMixin):
def __init__(self, model, cnf, weights_from, prediction_iterator, im_size, crop_size, output_layer='predictions'):
self.crop_size = crop_size
self.im_size = im_size
self.cnf = cnf
self.prediction_iterator = prediction_iterator
self.predictor = OneCropPredictor(model, cnf, weights_from, prediction_iterator, output_layer)
super(TenCropPredictor, self).__init__(weights_from)
def _build_model(self):
self.predictor._build_model()
def _real_predict(self, X, sess):
crop_size = np.array(self.crop_size)
im_size = np.array(self.im_size)
bboxs = util.get_bbox_10crop(crop_size, im_size)
multiple_predictions = []
for i, bbox in enumerate(bboxs, start=1):
print('Crop-deterministic iteration: %d' % i)
predictions = self.predictor._real_predict(X, sess, crop_bbox=bbox)
multiple_predictions.append(predictions)
return np.mean(multiple_predictions, axis=0)
class EnsemblePredictor(object):
def __init__(self, predictors):
self.predictors = predictors
def predict(self, X):
multiple_predictions = []
for p in self.predictors:
print('Ensembler - running predictions using: %s' % p)
predictions = p.predict(X)
multiple_predictions.append(predictions)
# Todo: introduce voting policies other than the arithmetic mean below
# return np.mean(multiple_predictions, axis=0)
return gmean(multiple_predictions, axis=0)
def predict_with_voting(self, X, score_to_classes, vote_combiner):
votes = []
for i, p in enumerate(self.predictors):
print('Ensembler - running predictions using: %s' % p)
predictions = p.predict(X)
votes.append(score_to_classes[i](predictions))
return vote_combiner(votes) | |
import os
import logging
import queue
import re
import shutil
import string
import torch
import torch
import torch.nn as nn
import torch.nn.functional as F
import tqdm
import numpy as np
import ujson as json
from torch.utils.data import Dataset
def masked_softmax(logits, mask, dim=-1, log_softmax=False):
"""Take the softmax of `logits` over given dimension, and set
entries to 0 wherever `mask` is 0.
Args:
logits (torch.Tensor): Inputs to the softmax function.
mask (torch.Tensor): Same shape as `logits`, with 0 indicating
positions that should be assigned 0 probability in the output.
dim (int): Dimension over which to take softmax.
log_softmax (bool): Take log-softmax rather than regular softmax.
E.g., some PyTorch functions such as `F.nll_loss` expect log-softmax.
Returns:
probs (torch.Tensor): Result of taking masked softmax over the logits.
"""
mask = mask.type(torch.float32)
masked_logits = mask * logits + (1 - mask) * -1e30
softmax_fn = F.log_softmax if log_softmax else F.softmax
probs = softmax_fn(masked_logits, dim)
print('masked_logit {}'.format(masked_logits))
print('1 - mask {}'.format(1 - mask))
return probs
def get_save_dir(base_dir, name, training, id_max=100):
"""Get a unique save directory by appending the smallest positive integer
`id < id_max` that is not already taken (i.e., no dir exists with that id).
Args:
base_dir (str): Base directory in which to make save directories.
name (str): Name to identify this training run. Need not be unique.
training (bool): Save dir. is for training (determines subdirectory).
id_max (int): Maximum ID number before raising an exception.
Returns:
save_dir (str): Path to a new directory with a unique name.
"""
for uid in range(1, id_max):
subdir = 'train' if training else 'test'
save_dir = os.path.join(base_dir, subdir, f'{name}-{uid:02d}')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
return save_dir
raise RuntimeError('Too many save directories created with the same name. \
Delete old save directories or use another name.')
def get_logger(log_dir, name):
"""Get a `logging.Logger` instance that prints to the console
and an auxiliary file.
Args:
log_dir (str): Directory in which to create the log file.
name (str): Name to identify the logs.
Returns:
logger (logging.Logger): Logger instance for logging events.
"""
class StreamHandlerWithTQDM(logging.Handler):
"""Let `logging` print without breaking `tqdm` progress bars.
See Also:
> https://stackoverflow.com/questions/38543506
"""
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
# Create logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# Log everything (i.e., DEBUG level and above) to a file
log_path = os.path.join(log_dir, 'log.txt')
file_handler = logging.FileHandler(log_path)
file_handler.setLevel(logging.DEBUG)
# Log everything except DEBUG level (i.e., INFO level and above) to console
console_handler = StreamHandlerWithTQDM()
console_handler.setLevel(logging.INFO)
# Create format for the logs
file_formatter = logging.Formatter('[%(asctime)s] %(message)s',
datefmt='%m.%d.%y %H:%M:%S')
file_handler.setFormatter(file_formatter)
console_formatter = logging.Formatter('[%(asctime)s] %(message)s',
datefmt='%m.%d.%y %H:%M:%S')
console_handler.setFormatter(console_formatter)
# add the handlers to the logger
logger.addHandler(file_handler)
logger.addHandler(console_handler)
return logger
def get_available_devices():
"""Get IDs of all available GPUs.
Returns:
device (torch.device): Main device (GPU 0 or CPU).
gpu_ids (list): List of IDs of all GPUs that are available.
"""
gpu_ids = []
if torch.cuda.is_available():
gpu_ids += [gpu_id for gpu_id in range(torch.cuda.device_count())]
device = torch.device(f'cuda:{gpu_ids[0]}')
torch.cuda.set_device(device)
else:
device = torch.device('cpu')
return device, gpu_ids
def load_model(model, checkpoint_path, device, gpu_ids, return_step=True):
"""Load model parameters from disk.
Args:
model (torch.nn.DataParallel): Load parameters into this model.
checkpoint_path (str): Path to checkpoint to load.
gpu_ids (list): GPU IDs for DataParallel.
return_step (bool): Also return the step at which checkpoint was saved.
Returns:
model (torch.nn.DataParallel): Model loaded from checkpoint.
step (int): Step at which checkpoint was saved. Only if `return_step`.
"""
# device = f"cuda:{gpu_ids[0] if gpu_ids else 'cpu'}"
ckpt_dict = torch.load(checkpoint_path, map_location=device)
# Build model, load parameters
model.load_state_dict(ckpt_dict['model_state'])
if return_step:
step = ckpt_dict['step']
return model, step
return model
class EMA:
"""Exponential moving average of model parameters.
Args:
model (torch.nn.Module): Model with parameters whose EMA will be kept.
decay (float): Decay rate for exponential moving average.
"""
def __init__(self, model, decay):
self.decay = decay
self.shadow = {}
self.original = {}
# Register model parameters
for name, param in model.named_parameters():
if param.requires_grad:
self.shadow[name] = param.data.clone()
def __call__(self, model, num_updates):
decay = min(self.decay, (1.0 + num_updates) / (10.0 + num_updates))
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
new_average = \
(1.0 - decay) * param.data + decay * self.shadow[name]
self.shadow[name] = new_average.clone()
def assign(self, model):
"""Assign exponential moving average of parameter values to the
respective parameters.
Args:
model (torch.nn.Module): Model to assign parameter values.
"""
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
self.original[name] = param.data.clone()
param.data = self.shadow[name]
def resume(self, model):
"""Restore original parameters to a model. That is, put back
the values that were in each parameter at the last call to `assign`.
Args:
model (torch.nn.Module): Model to assign parameter values.
"""
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
param.data = self.original[name]
class CheckpointSaver:
"""Class to save and load model checkpoints.
Save the best checkpoints as measured by a metric value passed into the
`save` method. Overwrite checkpoints with better checkpoints once
`max_checkpoints` have been saved.
Args:
save_dir (str): Directory to save checkpoints.
max_checkpoints (int): Maximum number of checkpoints to keep before
overwriting old ones.
metric_name (str): Name of metric used to determine best model.
maximize_metric (bool): If true, best checkpoint is that which maximizes
the metric value passed in via `save`. Otherwise, best checkpoint
minimizes the metric.
log (logging.Logger): Optional logger for printing information.
"""
def __init__(self, save_dir, max_checkpoints, metric_name,
maximize_metric=False, log=None):
super(CheckpointSaver, self).__init__()
self.save_dir = save_dir
self.max_checkpoints = max_checkpoints
self.metric_name = metric_name
self.maximize_metric = maximize_metric
self.best_val = None
self.ckpt_paths = queue.PriorityQueue()
self.log = log
self._print(f"Saver will {'max' if maximize_metric else 'min'}imize {metric_name}...")
def is_best(self, metric_val):
"""Check whether `metric_val` is the best seen so far.
Args:
metric_val (float): Metric value to compare to prior checkpoints.
"""
if metric_val is None:
# No metric reported
return False
if self.best_val is None:
# No checkpoint saved yet
return True
return ((self.maximize_metric and self.best_val < metric_val)
or (not self.maximize_metric and self.best_val > metric_val))
def _print(self, message):
"""Print a message if logging is enabled."""
if self.log is not None:
self.log.info(message)
def save(self, step, model, device, metric_val=0):
"""Save model parameters to disk.
Args:
step (int): Total number of examples seen during training so far.
model (torch.nn.DataParallel): Model to save.
metric_val (float): Determines whether checkpoint is best so far.
device (torch.device): Device where model resides.
"""
ckpt_dict = {
'model_name': model.__class__.__name__,
'model_state': model.cpu().state_dict(),
'step': step
}
model.to(device)
checkpoint_path = os.path.join(self.save_dir,
f'step_{step}.pth.tar')
torch.save(ckpt_dict, checkpoint_path)
self._print(f'Saved checkpoint: {checkpoint_path}')
# if self.is_best(metric_val):
# # Save the best model
# self.best_val = metric_val
# best_path = os.path.join(self.save_dir, 'best.pth.tar')
# shutil.copy(checkpoint_path, best_path)
# self._print(f'New best checkpoint at step {step}...')
# # Add checkpoint path to priority queue (lowest priority removed first)
# if self.maximize_metric:
# priority_order = metric_val
# else:
# priority_order = -metric_val
# self.ckpt_paths.put((priority_order, checkpoint_path))
# # Remove a checkpoint if more than max_checkpoints have been saved
# if self.ckpt_paths.qsize() > self.max_checkpoints:
# _, worst_ckpt = self.ckpt_paths.get()
# try:
# os.remove(worst_ckpt)
# self._print(f'Removed checkpoint: {worst_ckpt}')
# except OSError:
# # Avoid crashing if checkpoint has been removed or protected
# pass | |
import numpy as np
import itertools as it
#solve
#A.T*Ax=A.T*b
#x=inv(A.T*A)*A.T*b
#Z=inv(H.T*H)*H.T*y
def min2_mtx(A,b):
x=np.matmul(A.T,A)
x=np.linalg.inv(x)
x=np.matmul(x,A.T)
x=np.matmul(x,b)
return x
#euler
#t time array
#y0 init value
#f function f(t,y)
def euler(t,y0,f):
h=t[1]-t[0]
y=[y0]
for ti in t[:-1]:
y.append(y[-1]+h*f(ti,y[-1]))
return y
#runge kutta 4 orden
#t time array
#y0 init value
#f function f(t,y)
def rk4(t,y0,f):
h=t[1]-t[0]
y_rk=[y0]
for ti in t[:-1]:
k1=h*(f(ti,y_rk[-1]))
k2=h*(f(ti+h/2,y_rk[-1]+k1/2))
k3=h*(f(ti+h/2,y_rk[-1]+k2/2))
k4=h*(f(ti+h,y_rk[-1]+k3))
y_rk.append(y_rk[-1]+(k1+2*k2+2*k3+k4)/6)
return y_rk
#runge kutta 2 orden
#t time array
#y0 init value
#f function f(t,y)
def rk4(t,y0,f):
h=t[1]-t[0]
y_rk=[y0]
for ti in t[:-1]:
k1=h*(f(ti,y_rk[-1]))
k2=h*(f(ti+h/2,y_rk[-1]+k1/2))
k3=h*(f(ti+h/2,y_rk[-1]+k2/2))
k4=h*(f(ti+h,y_rk[-1]+k3))
y_rk.append(y_rk[-1]+(k1+2*k2+2*k3+k4)/6)
return y_rk
def find_cell(point, axis):
'''
devuelve los ìndices en donde se encuentra el punto en los ejes dados
point - punto a ubicar
axis - lista de ejes
si los valores se encuentran fuera de rango regresa -1
'''
if len(point) != len(axis):
print('len(axis) debe ser igual a len(point): {} != {}'.format(
len(axis),
len(point),
))
return(-1)
cell=[]
for p,ax in zip(point, axis):
if p<ax[0] or p>ax[-1]:
print('punto fuera de rango')
return(-1)
if p==ax[-1]:
high=-1
else:
high,=np.where(p<ax)
high=high[0]
cell.append([high-1, high])
return(np.array(cell))
def mlineal(point, axis, data):
'''
Realiza interpolación lineal en varios ejes usando la ecuación:
fx=fx1*(x2-xi)/dx + fx2*(xi-x1)/dx
Parámetros:
point - el punto a interpolar
axis - la lista de los ejes p. Ej: [lat lon t]
data - arreglo con los datos, debe coincidir con las dimensiones de los ejes (axis) dados
'''
#obtiene índices de la celda donde se encuentra el punto
idx_cell=find_cell(point, axis)
# obtiene valores en los vértices
# se almacenan en f_list
f_list=[]
# i representa cada una de las combinaciones posibles
# la cantidad depende del número de ejes
# 1 eje = 2 vértices
# 2 ejes = 4 vértices
# 3 ejes = 8 vértices
# etc...
for i in it.product([0,1],repeat=len(idx_cell)):
tp_idx=[]
naxis=len(idx_cell)
for idx in range(naxis):
tp_idx.append(idx_cell[naxis-1-idx][i[idx]])
tp_idx=tuple(tp_idx)
f_list.append(data[tp_idx])
#calcula interpolación para cada par de vértices
#ax_ list contiene la lista de los puntos que se utilizarán para cada operación de interpolación
#comienza con los vértices
ax_list=[f_list]
for n_ax,fx12 in enumerate(ax_list):
f_list=[]
for j in range(0,len(fx12),2):
#print('axis:',axis[n_ax])
x2=axis[n_ax][idx_cell[n_ax][1]]
x1=axis[n_ax][idx_cell[n_ax][0]]
dx=x2-x1
xi=point[n_ax]
fx=fx12[j]*(x2-xi)/dx+\
fx12[j+1]*(xi-x1)/dx
# se agregan los nuevos puntos a un nuevo elemento
f_list.append(fx)
#print('x1:{},x2:{},xi:{},dx:{}, fx:{}'.format(
#x1,x2,xi,dx,fx))
#se deja de agregar cuando sólo se obtiene un punto
if len(f_list)>1:
ax_list.append(f_list)
return f_list | |
# -*- coding: utf-8 -*-
"""
Created on Sat May 25 14:21:27 2019
@author: Tin
"""
import numpy as np
import pandas as pd
import datetime
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings("ignore")
# yahoo finance used to fetch data
import yfinance as yf
yf.pdr_override()
options = " Data Preprocessing, Exit".split(",")
# Input Start Date
def start_date():
date_entry = input('Enter a starting date in MM/DD/YYYY format: ')
start = datetime.datetime.strptime(date_entry,'%m/%d/%Y')
start = start.strftime('%Y-%m-%d')
return start
# Input End Date
def end_date():
date_entry = input('Enter a ending date in MM/DD/YYYY format: ')
end = datetime.datetime.strptime(date_entry,'%m/%d/%Y')
end = end.strftime('%Y-%m-%d')
return end
# Input Symbols
def input_symbol():
symbol = input("Enter symbol: ").upper()
return symbol
def preprocessing_dataset():
s = start_date()
e = end_date()
sym = input_symbol()
df = yf.download(sym, s, e)
array = df.values
X = array[:,0:5]
Y = array[:,5]
# initialising the MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
# learning the statistical parameters for each of the data and transforming
rescaledX = scaler.fit_transform(X)
np.set_printoptions(precision=3)
print('Rescaled values between 0 to 1')
print(rescaledX[0:5,:])
print("")
# Splitting the datasets into training sets and Test sets
X_train, X_test, Y_train, Y_test = train_test_split( X , Y , test_size = 0.2, random_state = 0)
sc_X = StandardScaler()
# Splitting the datasets into training sets and Test sets
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.fit_transform(X_test)
print("Training Dataset")
print(X_train)
print("")
print(Y_train)
print("")
print("Testing Dataset")
print(X_test)
print("")
print(Y_test)
return
def main():
run_program = True
while run_program:
print("")
print("__________Preprocessing Dataset__________")
print("")
print("Choose Options:")
print("")
for i in range(1, len(options)+1):
print("{} - {}".format(i, options[i-1]))
choice = int(input())
if choice == 1:
preprocessing_dataset()
elif choice == 2:
run_program = False
if __name__ == "__main__":
main() | |
#!python3
# ##----------------------------------------## #
# Author: M. Burak Yesilyurt #
# Truss Optimization by Employing #
# Genetic Algorithms #
# ##----------------------------------------## #
# Importing necessary modules
# To run the code below, imported python packages must be installed.
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import LineCollection
from numpy.linalg import inv
import random as rnd
np.set_printoptions(suppress=True,precision=2)
class Truss:
# Truss Class.
def __init__(self, DNA, param):
self.point = 0 # Fitness point set to zero at initiation
self.h1 = DNA[0] # Shoulder Height of the Truss in mm
self.h2 = DNA[1] # Peak Height of the Truss in mm
self.nd = DNA[2] # Number of Divisions of the Truss
self.dia = DNA[3] # Alignment of the Diagonals for Each Bay
self.B = [shs_catalog[int(i)][0] for i in DNA[4]] # Height of SHS member
self.t = [shs_catalog[int(i)][1] for i in DNA[4]] # Wall Thickness of SHS member in mm
self.L = param[0] # Half of the Span Length of the Truss
self.q = param[1] # Line Load on the Truss in kN/m
self.nn = 2 * self.nd + 2 # Numer of Nodes
self.nm = 4 * self.nd + 1 # Number of Members
self.Lines = [] # Line list for plotting
self.j_forces = np.zeros((self.nn, 2)) # Joint Force Matrix
self.f = np.zeros((self.nn * 2, 1)) # Dof Force Vector - will be mapped from j_forces
self.u = np.zeros((self.nn * 2, 1)) # Deformation Field
self.p = np.zeros((self.nm, 1)) # Member Axial Forces
self.util = np.zeros((self.nm, 1)) # Member Utilization Ratios
xvals = np.array([np.hstack((np.linspace(0, self.L, self.nd + 1), np.linspace(0, self.L, self.nd + 1)))]).T
# Nodal coordinates - in X axis
yvals = np.array([np.hstack((np.zeros(self.nd + 1), np.linspace(self.h1, self.h2, self.nd + 1)))]).T
# Nodal coordinates - in Y axis
self.n_coord = np.hstack((xvals, yvals)) # Nodal Coordinates are stacked in n_coord
self.n_conn = np.zeros((self.nm, 2)) # Connectivity matrix
self.n_bound = np.array([[0, 1], [0, 2], [self.nd, 1], [self.nd * 2 + 1, 1]]) # Introducing boundary cnds.
self.n_dof = np.zeros((self.nn, 2)) # Nodal Dof matrix - info on which dof is assigned to which joint
self.m_type = np.zeros((self.nm, 1)) # Member Type bot chord :0, top chord :1, post :2, diagonal :3
self.geo_props = [] # Geometric props. of each member.
self.mate_props = [] # Material props. of each member.
self.sec_props = [] # Section props. of each member.
# Populating connectivity matrix
for i in range(self.nd):
self.n_conn[i] = [i, i + 1]
if i < self.nd / 2:
mt = 0
else:
mt = 1
self.m_type[i] = mt
for i in range(self.nd):
self.n_conn[i + self.nd] = [i + self.nd + 1, i + self.nd + 2]
if i < self.nd / 2:
mt = 0
else:
mt = 1
self.m_type[i + self.nd] = 2 + mt
for i in range(self.nd + 1):
self.n_conn[i + 2 * self.nd] = [i, i + self.nd + 1]
if i < self.nd / 2:
mt = 0
else:
mt = 1
self.m_type[i + 2 * self.nd] = 4 + mt
for i in range(self.nd):
if i < self.nd / 2:
mt = 0
else:
mt = 1
self.m_type[i + 3 * self.nd + 1] = 6 + mt
if self.dia[i]:
self.n_conn[i + 3 * self.nd + 1] = [i, i + self.nd + 2]
else:
self.n_conn[i + 3 * self.nd + 1] = [i + self.nd + 1, i + 1]
self.n_conn = self.n_conn.astype(int)
# Populating dof matrix - restrained dofs are shifted to the end.
count = 0
for n in range(self.nn):
for j in range(1, 3):
if not np.equal(self.n_bound, [[n, j]]).all(axis=1).any():
self.n_dof[n, j - 1] = int(count)
count += 1
for i in self.n_bound:
node = i[0]
dof = i[1]
self.n_dof[node, dof - 1] = int(count)
count += 1
# Material properties
E = 210000 # N/mm2
fy = 355 # N/mm2
for i in range(self.nm):
self.mate_props.append((E, fy))
for i in range(self.nm):
mt = int(self.m_type[i])
A, I, r = shs_props(self.B[mt], self.t[mt])
self.sec_props.append((self.B[mt], self.t[mt], A, I, r))
# Geometric Properties
for x, y in enumerate(self.n_conn):
i = int(y[0])
j = int(y[1])
ix = self.n_coord[i, 0]
iy = self.n_coord[i, 1]
jx = self.n_coord[j, 0]
jy = self.n_coord[j, 1]
dx = jx - ix
dy = jy - iy
L = np.sqrt(dx ** 2 + dy ** 2)
alpha = np.arctan2(dy, dx)
self.geo_props.append((L, alpha))
# Assigning point loads to to top chord joints. First and last joints loaded half of the interior joints.
p = self.q * self.L / self.nd
self.j_forces[self.nd + 1, 1] = p / 2
self.j_forces[2 * self.nd + 1, 1] = p / 2
for i in range(self.nd + 2, 2 * self.nd + 1):
self.j_forces[i, 1] = p
# Mapping joint forces to dof forces. Difference is due to boundary conditions
# Assembling Force Vector
for n, i in enumerate(self.n_dof):
self.f[int(i[0]), 0] = self.j_forces[n, 0]
self.f[int(i[1]), 0] = self.j_forces[n, 1]
self.W = 0
for n, i in enumerate(self.sec_props):
L, alpha = self.geo_props[n]
B, t, A, I, ir = i
self.W += L * A * 7.85e-6
def truss_geo(self, offset=0, deformed=False, ud=0, scale=20):
# this method is for creating drawing
u = np.zeros((self.nn * 2, 1))
if deformed:
u = ud
lines = []
for i in self.n_conn:
node_i = int(i[0])
node_j = int(i[1])
dofi_1 = int(self.n_dof[node_i, 1])
dofj_0 = int(self.n_dof[node_j, 0])
dofj_1 = int(self.n_dof[node_j, 1])
dofi_0 = int(self.n_dof[node_i, 0])
coord_ix = self.n_coord[node_i, 0] + u[dofi_0, 0] * scale
coord_iz = self.n_coord[node_i, 1] + offset + u[dofi_1, 0] * scale
coord_jx = self.n_coord[node_j, 0] + u[dofj_0, 0] * scale
coord_jz = self.n_coord[node_j, 1] + offset + u[dofj_1, 0] * scale
coord_i = (coord_ix, coord_iz)
coord_j = (coord_jx, coord_jz)
lines.append([coord_i, coord_j])
return lines
def stiffness(self):
# Assembling stiffness matrix. Usual procedure.
k_stiff = np.zeros((2 * self.nn, 2 * self.nn))
k_loc = np.zeros((4, 4))
r = np.zeros((4, 4))
for i in range(self.nm):
nodes = self.n_conn[i]
dofs = self.n_dof[nodes, :].reshape(4)
L, alpha = self.geo_props[i]
B, t, A, I, ir = self.sec_props[i]
E, fy = self.mate_props[i]
k11 = E * A / L
k_loc[0, 0] = k11
k_loc[2, 2] = k11
k_loc[0, 2] = -k11
k_loc[2, 0] = -k11
r[0, 0] = np.cos(alpha)
r[0, 1] = np.sin(alpha)
r[1, 0] = -np.sin(alpha)
r[1, 1] = np.cos(alpha)
r[2, 2] = np.cos(alpha)
r[2, 3] = np.sin(alpha)
r[3, 2] = -np.sin(alpha)
r[3, 3] = np.cos(alpha)
k_gl = np.dot(np.dot(r.T, k_loc), r)
for x in range(4):
for y in range(4):
dof1 = int(dofs[x])
dof2 = int(dofs[y])
k_stiff[dof1, dof2] += k_gl[x, y]
return k_stiff
def analyze(self):
k_loc = np.zeros((4, 4))
r = np.zeros((4, 4))
for i, n in enumerate(self.sec_props):
nodes = self.n_conn[i]
dofs = self.n_dof[nodes, :].reshape(4).astype(int)
u_mem = self.u[dofs, :]
L, alpha = self.geo_props[i]
B, t, A, I, ir = n
E, fy = self.mate_props[i]
k11 = E * A / L
k_loc[0, 0] = k11
k_loc[2, 2] = k11
k_loc[0, 2] = -k11
k_loc[2, 0] = -k11
r[0, 0] = np.cos(alpha)
r[0, 1] = np.sin(alpha)
r[1, 0] = -np.sin(alpha)
r[1, 1] = np.cos(alpha)
r[2, 2] = np.cos(alpha)
r[2, 3] = np.sin(alpha)
r[3, 2] = -np.sin(alpha)
r[3, 3] = np.cos(alpha)
f = np.dot(k_loc, np.dot(r, u_mem))
self.p[i] = (f[2] - f[0]) / 2
self.util[i] = mem_design(self.p[i], n, self.geo_props[i], self.mate_props[i])
def disp(f, k, n_bc):
n_dof = len(f)
kff = k[:-n_bc, :-n_bc]
# kfr = k[:-n_bc, -n_bc:]
krf = k[-n_bc:, :-n_bc]
# krr = k[-n_bc:, -n_bc:]
ff = f[:-n_bc]
ff.shape = (n_dof - n_bc, 1)
# fr = f[-n_bc:]
# fr.shape = (n_bc,1)
ur = np.zeros((n_bc, 1))
uf = np.dot(inv(kff), ff) # - np.dot(kfr, ur)))
r = np.dot(krf, uf) # + np.dot(krr, ur) - fr
return uf
# def stiff(nn, nm, conn, dof, mate, geo):
# # Stiffness assembly, force assembly and matrix inversion
# k_stiff = np.zeros((2 * nn, 2 * nn))
# k_loc = np.zeros((4, 4))
# r = np.zeros((4, 4))
#
# for i in range(nm):
# nodes = conn[i]
# dofs = dof[nodes, :].reshape(4)
#
# A = geo[i, 0]
# L = geo[i, 3]
# alpha = geo[i, 4]
# E = mate[i, 0]
#
# k11 = E * A / L
# k_loc[0, 0] = k11
# k_loc[2, 2] = k11
# k_loc[0, 2] = -k11
# k_loc[2, 0] = -k11
#
# r[0, 0] = np.cos(alpha)
# r[0, 1] = np.sin(alpha)
# r[1, 0] = -np.sin(alpha)
# r[1, 1] = np.cos(alpha)
# r[2, 2] = np.cos(alpha)
# r[2, 3] = np.sin(alpha)
# r[3, 2] = -np.sin(alpha)
# r[3, 3] = np.cos(alpha)
#
# k_gl = np.dot(np.dot(r.T, k_loc), r)
#
# for x in range(4):
# for y in range(4):
# dof1 = dofs[x]
# dof2 = dofs[y]
# k_stiff[dof1, dof2] += k_gl[x, y]
#
# return k_stiff
def shs_props(B, t):
sec_A = B * B - (B - 2 * t) * (B - 2 * t)
sec_I = (B ** 4 - (B - 2 * t) ** 4) / 12
sec_r = np.sqrt(sec_I / sec_A)
return sec_A, sec_I, sec_r
def mem_design(N, sec, geo, mat):
B, t, A, I, r = sec
L, alpha = geo
E, fy = mat
if N > 0:
Pn = 0.9 * A * fy
return N / Pn
else:
b = B - 2 * t
lmd = b / t
lmdr = 1.4 * np.sqrt(E / fy)
Fe = (np.pi ** 2) * E / (L / r)
if L / r < 4.71 * np.sqrt(E / fy):
Fcr = fy * (0.658 ** (fy / Fe))
else:
Fcr = 0.877 * Fe
if lmd < lmdr * np.sqrt(E / fy):
# Section fully compact
Pn = 0.9 * A * Fcr
return abs(N / Pn)
else:
c1 = 0.18
c2 = 1.31
Fel = c2 * lmdr * fy / lmd
U = (1 - c1 * np.sqrt(Fel / fy)) * np.sqrt(Fel / fy)
Ae = U * A
Pn = 0.9 * Ae * Fcr
return abs(N / Pn)
def population(size, param, const, seed=0):
if seed:
rnd.seed(seed)
pop = []
for _ in range(size):
h1 = rnd.randrange(param[0][0], param[0][1], param[0][2])
h2 = rnd.randrange(max(h1, param[1][0]), param[1][1], param[1][2])
n_div = rnd.randint(param[2][0], param[2][1])
division = [rnd.randrange(0, 3, 1) for j in range(n_div)]
sec = []
cnt = len(shs_catalog)
for i in range(8):
sec.append(rnd.randint(0, cnt - 1))
pop.append(Truss([h1, h2, n_div, division, sec], const))
return pop
def pop_analyze(trusses):
for i in trusses:
k = i.stiffness()
i.analyze()
u = np.vstack((disp(i.f, k, 4), np.zeros((4, 1))))
i.u = u
def fitness(trusses):
wg = np.zeros((len(trusses), 2))
for n, i in enumerate(trusses):
wg[n] = [n, i.W]
i.analyze()
ws = wg[wg[:, 1].argsort()]
w_pnt = 1000
w_decr = w_pnt / len(trusses)
util_pnt = 1000
el_pnt = -10
for n,i in enumerate(trusses):
i.point -= w_decr * np.where(ws == i.W)[0] # Weight Point
for j in i.util:
i.point += util_pnt * j * np.sign(0.95 - j) # Utilization Point
i.point += el_pnt * i.nm # Member Count Point
shs_catalog = [[20, 2], [30, 2], [40, 2], [40, 3], [40, 4], [50, 2], [50, 3], [50, 4], [50, 5], [60, 3], [60, 2],
[60, 4], [70, 3], [60, 5], [70, 4],
[70, 5], [80, 3], [80, 4], [80, 5], [80, 6], [90, 3], [90, 4], [90, 5], [90, 6], [100, 5], [100, 4],
[100, 6], [120, 5], [120, 4],
[120, 6], [120, 8], [140, 4], [140, 5], [140, 6], [140, 8], [140, 10], [150, 5], [150, 6], [150, 8],
[160, 4], [150, 10],
[160, 5], [160, 8], [160, 6], [160, 10], [180, 6], [180, 8], [180, 10], [180, 12.5], [200, 6], [200, 8],
[200, 10], [200, 12.5],
[220, 8], [220, 12.5], [220, 10], [250, 6], [250, 10], [250, 8], [250, 12.5], [260, 8], [260, 10],
[260, 12.5], [300, 6],
[300, 8], [300, 10], [300, 12.5], [350, 8], [350, 10], [400, 10], [350, 12.5], [400, 12.5]]
parameters = ([1000, 2000, 250], [1000, 4000, 250], [3, 10])
constraints = (10000, -20)
Trusses = population(100, parameters, constraints,1)
pop_analyze(Trusses)
fitness(Trusses)
gen_sum = np.zeros((100, 6))
for n, i in enumerate(Trusses):
gen_sum[n] = [int(n), i.point, i.util.max(), i.W, i.nm, i.L/i.u.max()]
sorted = gen_sum[gen_sum[:, 1].argsort()]
print(sorted)
ax = plt.axes()
ax.set_xlim(-1000, 10000 + 1000)
ax.set_ylim(-3000, 1 * (12000 + 1000))
for n, i in enumerate(Trusses):
lines = i.truss_geo(5000 * n)
segments = LineCollection(lines, linewidths=2)
ax.add_collection(segments)
lines = i.truss_geo(5000 * n, deformed=True, ud=i.u)
segments = LineCollection(lines, linewidths=2, color="r")
ax.add_collection(segments)
plt.show() | |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import itertools
import os
from typing import Callable
import numpy as np
import pytest
import megengine as mge
import megengine.module.init as init
from megengine.core import tensor
from megengine.functional import cross_entropy_with_softmax, relu
from megengine.jit import trace
from megengine.module import Linear, Module
from megengine.optimizer import SGD, Optimizer
from megengine.test import assertTensorClose
batch_size = 64
data_shape = (batch_size, 2)
label_shape = (batch_size,)
def minibatch_generator():
while True:
inp_data = np.zeros((batch_size, 2))
label = np.zeros(batch_size, dtype=np.int32)
for i in range(batch_size):
# [x0, x1], sampled from U[-1, 1]
inp_data[i, :] = np.random.rand(2) * 2 - 1
label[i] = 0 if np.prod(inp_data[i]) < 0 else 1
yield inp_data.astype(np.float32), label.astype(np.int32)
class SimpleNet(Module):
def __init__(self):
self.mid_layers = 14
self.num_class = 2
super().__init__()
self.fc0 = Linear(self.num_class, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc0.weight)
init.normal_(self.fc0.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc0.bias)
self.fc1 = Linear(self.mid_layers, self.mid_layers, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc1.weight)
init.normal_(self.fc1.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc1.bias)
self.fc2 = Linear(self.mid_layers, self.num_class, bias=True)
fan_in, _ = init.calculate_fan_in_and_fan_out(self.fc2.weight)
init.normal_(self.fc2.weight, std=np.sqrt(float(1.0) / fan_in))
init.zeros_(self.fc2.bias)
def forward(self, x):
x = self.fc0(x)
x = relu(x) # Should use tanh but it's not stable now.
x = self.fc1(x)
x = relu(x) # Should use tanh but it's not stable now.
x = self.fc2(x)
return x
def generate_eager_step(net: Module, opt_factory: Callable[[Module], Optimizer]):
data_inp = tensor(np.zeros(data_shape), dtype=np.float32)
label_inp = tensor(np.zeros(label_shape), dtype=np.int32)
opt = opt_factory(net)
def step(data, label):
opt.zero_grad()
data_inp.set_value(data)
label_inp.set_value(label)
pred = net(data_inp)
loss = cross_entropy_with_softmax(pred, label_inp)
opt.backward(loss)
opt.step()
return loss.numpy()[0]
return step
def generate_static_step(net: Module, opt_factory: Callable[[Module], Optimizer]):
data = tensor(np.zeros(data_shape), dtype=np.float32)
label = tensor(np.zeros(label_shape), dtype=np.int32)
opt = opt_factory(net)
# Save state to reset parameters later.
state = copy.deepcopy(net.state_dict())
# Evaluate network in eager mode once.
pred = net(data)
loss = cross_entropy_with_softmax(pred, label)
opt.zero_grad()
grads = opt.backward(loss)
f = mge.graph.compile(loss, grads)
def step(data, label):
opt.zero_grad()
out = f(data=data, label=label)
opt.step()
loss = out[0][0]
return loss
# Reset parameters.
net.load_state_dict(state)
return step
def generate_trace_step(
net: Module, opt_factory: Callable[[Module], Optimizer], enable: bool
):
opt = opt_factory(net)
@trace
def train(data, label):
pred = net(data)
loss = cross_entropy_with_softmax(pred, label)
opt.zero_grad()
opt.backward(loss)
return loss
train.enabled = enable
def step(data, label):
out = train(data, label)
opt.step()
loss = out[0][0]
return loss
return step
def assert_network_equvilence(nets):
net_state = [net.state_dict() for net in nets]
for state in net_state[1:]:
assert len(net_state[0]) == len(state)
for k, v in net_state[0].items():
for state in net_state[1:]:
assert k in state
assertTensorClose(v, state[k])
@pytest.mark.slow
def test_eager_equvilence():
eager_net = SimpleNet()
trace_enable_net = copy.deepcopy(eager_net)
trace_disable_net = copy.deepcopy(eager_net)
opt_factory = lambda net: SGD(
net.parameters(requires_grad=True), lr=0.01, momentum=0.01
)
estep = generate_eager_step(eager_net, opt_factory)
te_step = generate_trace_step(trace_enable_net, opt_factory, True)
td_step = generate_trace_step(trace_disable_net, opt_factory, False)
assert_network_equvilence([eager_net, trace_enable_net, trace_disable_net])
# Use hard code number as limit, may increase if needed.
for data, label in itertools.islice(minibatch_generator(), 200):
eloss = estep(data, label)
te_loss = te_step(data, label)
td_loss = td_step(data, label)
assertTensorClose(eloss, te_loss)
assertTensorClose(eloss, td_loss)
assert_network_equvilence(
[eager_net, trace_enable_net, trace_disable_net,]
) | |
"""
populates Vivus() with all geometric quantities including
Diameter() which carries (x,y) coordinates of min/max diameter endpoints
and pixel length values
"""
import logging
import numpy as np
from itertools import product as iterp
from . import params as p
mlg = logging.getLogger(__name__)
def xys2dists(x, y, xarr, yarr):
"""
euclidean distances to each combination of coordinates from the origin
x/y = x/y-coordinate of the origin
xarr/yarr = x/y-coordinates of mark points
"""
dx = abs(xarr - x)
dy = abs(yarr - y)
return (dx*dx + dy*dy) ** 0.5
def getGeoavg(x, y, xarr, yarr):
"""
normalized sum of squared distances for a given origin coordinate
x/y = x/y-coordinates for the origin
xarr/yarr = x/y-coordinates for the marks
"""
k = len(xarr)
dists = xys2dists(x, y, xarr, yarr)
distsq = dists * dists
return sum(distsq) / k
def getRawGeocenter(xarr, yarr, imgshape):
"""
computes rough geometric center from the preset
sample of points whose even grid is specified by p.rawGeocenterGrid
xarr/yarr = x/y-coordinates of mark points
imin = index of coordinates that gives smallest normalized sum of
squared distances
"""
# preset points
rawx = np.linspace(0, imgshape[0], p.rawGeocenterGrid)
rawy = np.linspace(0, imgshape[1], p.rawGeocenterGrid)
size = len(rawx) * len(rawy)
avgs = np.zeros(size)
x, y = np.zeros(size), np.zeros(size)
i = 0
for xval, yval in iterp(rawx, rawy):
avgs[i] = getGeoavg(xval, yval, xarr, yarr)
x[i], y[i] = xval, yval
i += 1
imin = np.where(avgs == min(avgs))[0][0]
return x[imin], y[imin]
def getGoodGeocenter(xraw, yraw, xarr, yarr, imgshape):
"""
find actual geocenter from a circle with radius r
centered on raw geocenter
xraw/yraw = x/y-coordinates of raw geocenter
xarr/yarr = x/y-coordinates of mark points
rGeoCent = search circle radius [pixels]
imin = index of coordinates that gives smallest normalized sum of
squared distances
"""
# search radius
rGeoCent = max(imgshape) / p.rawGeocenterGrid
avgs, x, y = [], [], []
# start search within square enclosing the rGeocCent circle
xmin = int(xraw - rGeoCent)
xmax = int(xraw + rGeoCent)
ymin = int(yraw - rGeoCent)
ymax = int(yraw + rGeoCent)
for i in range(xmin, xmax):
for j in range(ymin, ymax):
# boundary of circle centered on rough geocenter
xval = i - xraw
yval = j - yraw
r2dot = (xval*xval + yval*yval) ** 0.5
# compute if it falls within the circle
if r2dot < rGeoCent:
avg = getGeoavg(i, j, xarr, yarr)
avgs.append(avg)
x.append(i)
y.append(j)
avgs = np.array(avgs)
imin = np.where(avgs == min(avgs))[0][0]
mlg.info(f' getGoodGeocenter: found at ({x[imin]},{y[imin]})')
return x[imin], y[imin]
def getGeocenter(xcln, ycln, imgshape):
"""
compute geometric center basen on clean contour coordinates
"""
xraw, yraw = getRawGeocenter(xcln, ycln, imgshape)
return getGoodGeocenter(xraw, yraw, xcln, ycln, imgshape)
# DEPRECATED ---------------
def getAreaold(x, y, xarr, yarr):
"""
compute area using sum of small triangles
x/y = x/y-coordinate of good geocenter
xarr/yarr = x/y-coordinates of cleaned marks
area = area [px**2] within marks
"""
area = 0
# approx angle between neighboring points (assume evenly distributed)
radangle = 360. / p.contourGrid * np.pi / 180.
for i in range(len(xarr)):
# angle between neighboring 2 points
#A = np.array([xarr[i], yarr[i]])
#B = np.array([xarr[i+1], yarr[i+1]])
#CA = A - C
#CB = B - C
#cosangle = np.dot(CA, CB) / (np.linalg.norm(CA) * np.linalg.norm(CB))
#radangle = np.arccos(cosangle)
xx = abs(xarr[i] - x)
yy = abs(yarr[i] - y)
dist = (xx*xx + yy*yy) ** 0.5
area += 0.5 * radangle * dist * dist
return area
def getArea(x,y):
"""
compute area using Shoelace formula
x, y = coordinates of clean contour points evenly distributed on the
native cartesian image grid
"""
area = 0.5 * np.abs(np.dot(x , np.roll(y,1)) - np.dot(y , np.roll(x,1)))
mlg.info(f' getArea: shoelace enclosed = {area:.0f} [px**2]')
return area
def getPerimeter(x, y):
"""
compute contour perimeter
x, y = coordinates of clean contour points evenly distributed on the
native cartesian image grid
"""
xy = np.array([ [xval,yval] for xval,yval in zip(x, y) ])
xy1 = np.roll(xy, -1, axis = 0) # shift by -1
p = np.sum(np.sqrt((xy1[:,0] - xy[:,0])**2 + (xy1[:,1] - xy[:,1])**2))
mlg.info(f' getPerimeter: {p:.0f} [px]')
return p
class Diameters():
"""
vmin/vmax = min/max diameter lengths in pixels
xmin/ymin = (x,y) coordinates of min diameter endpoints (2 elements)
xmax/ymax = (x,y) coordinates of max diameter endpoints (2 elements)
"""
def __init__(self, vmin, vmax, xmin, ymin, xmax, ymax):
self.vmin = vmin
self.vmax = vmax
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
def getDiams(x, y, xarr, yarr):
"""
compute min/max diameters
"""
# transform into polar coordinates
z2polar = lambda z: ( np.abs(z), np.angle(z, deg=True) )
z = (xarr - x) + 1j * (yarr - y)
dists, angles = z2polar(z)
angles[angles < 0] += 360
# map to even angle grid
newangles = np.arange(360)
newdists = np.interp(newangles, angles, dists, period=360)
# check inside half circle
diams = np.zeros(179)
diams = [newdists[i] + newdists[180+i] for i in range(len(diams))]
imin = np.where(diams == min(diams))[0][0]
imax = np.where(diams == max(diams))[0][0]
# min/max values [px]
vmin, vmax = diams[imin], diams[imax]
mlg.info(f' getDiams: Dmin = {vmin:.1f} and Dmax = {vmax:.1f} [px]')
# x/y coordinates of min/max diameter edges
def polar2z(alpha, dist, xoffset, yoffset):
radangle = alpha * np.pi / 180.
x = dist * np.cos(radangle) + xoffset
y = dist * np.sin(radangle) + yoffset
return x, y
def getCoords(idx):
x1, y1 = polar2z(newangles[idx], newdists[idx], x, y)
x2, y2 = polar2z(newangles[180+idx], newdists[180+idx], x, y)
return [x1, x2], [y1, y2]
xmin, ymin = getCoords(imin)
xmax, ymax = getCoords(imax)
return Diameters(vmin, vmax, xmin, ymin, xmax, ymax)
def getReq(perimeter):
"""
equivalent radius of a circle fitting the contour
"""
return perimeter / (2. * np.pi)
def process(v):
"""
populates the input Vivus() object with geometric quantities for both
inner and outer contours sets
"""
imgshape = v.img.gray.shape
def populate(obj):
x, y = obj.x, obj.y
xcnt, ycnt = getGeocenter(x, y, imgshape)
obj.area = getArea(x, y)
obj.p = getPerimeter(x, y)
obj.req = getReq(obj.p)
obj.d = getDiams(xcnt, ycnt, x, y)
obj.x0 = xcnt
obj.y0 = ycnt
# outer
populate(v.out)
# inner
populate(v.inn)
mlg.info(' process: done')
return v | |
import logging
import os
import pickle
from collections import defaultdict
from typing import Dict
import h5py # type: ignore
import numpy as np
from probing_project.utils import Observation
from rich.progress import track
from torch.nn import CrossEntropyLoss
from .pos_task import POSTask
logger = logging.getLogger(__name__)
class POSControlTask(POSTask):
def __init__(self):
super(POSControlTask, self).__init__()
self.label_name = "control_pos_tag_labels"
self.loss = CrossEntropyLoss()
self.control_token2postag_map = None
@staticmethod
def _create_random_vocab_map(vocab_file):
with open(vocab_file, "rb") as f_vocab:
pos_vocab = pickle.load(f_vocab)
choice_keys = []
choice_p = []
for k, p in pos_vocab["tag_distribution"].items():
choice_keys.append(k)
choice_p.append(p)
return defaultdict(
lambda: int(np.random.choice(range(len(choice_keys)), p=choice_p))
)
def add_task_label_dataset(
self, input_conllx_pkl: str, unformatted_output_h5: str, *_, **__
):
if not os.path.exists(self.vocab_file):
self.create_vocab(input_conllx_pkl, self.vocab_file)
self.control_token2postag_map = self._create_random_vocab_map(self.vocab_file)
for split in ["train", "dev", "test"]:
label_h5_file = unformatted_output_h5.format(split)
if os.path.exists(label_h5_file):
logging.info(
f"File for label {self.label_name} exists for split {split}."
)
continue
logger.info(f"Create label {self.label_name} file for split {split}")
with open(input_conllx_pkl.format(split), "rb") as in_pkl:
conllx_observations: Dict[int, Observation] = pickle.load(in_pkl)
num_lines = len(conllx_observations)
max_label_len = max(len(ob.index) for ob in conllx_observations.values())
with h5py.File(label_h5_file, "w", libver="latest") as f_out:
pos_label = f_out.create_dataset(
self.label_name,
(num_lines, max_label_len),
fillvalue=-1,
dtype="int",
chunks=(1, max_label_len),
compression=5,
shuffle=True,
)
for index, ob in track(
conllx_observations.items(), description="create CONTROL POS"
):
sent_l = len(ob.index)
for sent_i in range(sent_l):
token = ob.sentence[sent_i]
pos_label[index, sent_i] = self.control_token2postag_map[token]
logger.info(f"All split files for label {self.label_name} ready!") | |
# -*- coding: utf-8 -*-
"""
shepherd.calibration
~~~~~
Provides CalibrationData class, defining the format of the SHEPHERD calibration
data
:copyright: (c) 2019 by Kai Geissdoerfer.
:license: MIT, see LICENSE for more details.
"""
import yaml
import struct
from scipy import stats
import numpy as np
from pathlib import Path
from . import calibration_default
class CalibrationData(object):
"""Represents SHEPHERD calibration data.
Defines the format of calibration data and provides convenient functions
to read and write calibration data.
Args:
calib_dict (dict): Dictionary containing calibration data.
"""
def __init__(self, calib_dict: dict):
self._data = calib_dict
def __getitem__(self, key: str):
return self._data[key]
def __repr__(self):
return yaml.dump(self._data, default_flow_style=False)
@classmethod
def from_bytestr(cls, bytestr: str):
"""Instantiates calibration data based on byte string.
This is mainly used to deserialize data read from an EEPROM memory.
Args:
bytestr (str): Byte string containing calibration data.
Returns:
CalibrationData object with extracted calibration data.
"""
vals = struct.unpack(">dddddddddddd", bytestr)
calib_dict = dict()
counter = 0
for component in ["harvesting", "load", "emulation"]:
calib_dict[component] = dict()
for channel in ["voltage", "current"]:
calib_dict[component][channel] = dict()
for parameter in ["gain", "offset"]:
val = float(vals[counter])
if np.isnan(val):
raise ValueError(
f"{ component } { channel } { parameter } not a valid number"
)
calib_dict[component][channel][parameter] = val
counter += 1
return cls(calib_dict)
@classmethod
def from_default(cls):
"""Instantiates calibration data from default hardware values.
Returns:
CalibrationData object with default calibration values.
"""
calib_dict = dict()
for component in ["harvesting", "load"]:
calib_dict[component] = dict()
for channel in ["voltage", "current"]:
calib_dict[component][channel] = dict()
offset = getattr(calibration_default, f"{ channel }_to_adc")(0)
gain = (
getattr(calibration_default, f"{ channel }_to_adc")(1.0)
- offset
)
calib_dict[component][channel]["offset"] = -float(
offset
) / float(gain)
calib_dict[component][channel]["gain"] = 1.0 / float(gain)
calib_dict["emulation"] = dict()
for channel in ["voltage", "current"]:
calib_dict["emulation"][channel] = dict()
offset = getattr(calibration_default, f"dac_to_{ channel }")(0)
gain = (
getattr(calibration_default, f"dac_to_{ channel }")(1.0)
- offset
)
calib_dict["emulation"][channel]["offset"] = -float(
offset
) / float(gain)
calib_dict["emulation"][channel]["gain"] = 1.0 / float(gain)
return cls(calib_dict)
@classmethod
def from_yaml(cls, filename: Path):
"""Instantiates calibration data from YAML file.
Args:
filename (Path): Path to YAML formatted file containing calibration
values.
Returns:
CalibrationData object with extracted calibration data.
"""
with open(filename, "r") as stream:
in_data = yaml.safe_load(stream)
return cls(in_data["calibration"])
@classmethod
def from_measurements(cls, filename: Path):
"""Instantiates calibration data from calibration measurements.
Args:
filename (Path): Path to YAML formatted file containing calibration
measurement values.
Returns:
CalibrationData object with extracted calibration data.
"""
with open(filename, "r") as stream:
calib_data = yaml.safe_load(stream)
calib_dict = dict()
for component in ["harvesting", "load", "emulation"]:
calib_dict[component] = dict()
for channel in ["voltage", "current"]:
calib_dict[component][channel] = dict()
sample_points = calib_data["measurements"][component][channel]
x = np.empty(len(sample_points))
y = np.empty(len(sample_points))
for i, point in enumerate(sample_points):
x[i] = point["measured"]
y[i] = point["reference"]
slope, intercept, _, _, _ = stats.linregress(x, y)
calib_dict[component][channel]["gain"] = float(slope)
calib_dict[component][channel]["offset"] = float(intercept)
return cls(calib_dict)
def to_bytestr(self):
"""Serializes calibration data to byte string.
Used to prepare data for writing it to EEPROM.
Returns:
Byte string representation of calibration values.
"""
flattened = list()
for component in ["harvesting", "load", "emulation"]:
for channel in ["voltage", "current"]:
for parameter in ["gain", "offset"]:
flattened.append(self._data[component][channel][parameter])
return struct.pack(">dddddddddddd", *flattened) | |
import numpy as np
import copy
from operator import itemgetter
# from sympy import expand
def rollout_policy_fn(board):
"""a coarse, fast version of policy_fn used in the rollout phase."""
# rollout randomly
action_probs = np.random.rand(len(board.availables))
return zip(board.availables, action_probs)
class MCTSPlayer(object):
def __init__(self, **kwargs) -> None:
who_play = kwargs.get('who_play', 'pure_MCTS')
self.mcts = None
self.player = None
if who_play == 'pure_MCTS':
policy_value_fn = kwargs.get('policy_value_fn')
c_puct = kwargs.get('c_puct', 5)
n_playout = kwargs.get('n_playout', 2000)
num_player = kwargs.get('num_player', 3)
self.mcts = MCTS(c_puct, n_playout, num_player)
# pass
def set_player_ind(self, p):
self.player = p
def reset_player(self):
self.mcts.update_with_move(-1)
def get_action(self, board_state):
available_move = board_state.availables
if len(available_move) > 0:
next_move, _ = self.mcts.get_move(board_state)
self.mcts.update_with_move(-1)
return next_move
else:
print("board is full")
def __str__(self) -> str:
return "base MCTS player"
class MCTS(object):
def __init__(self,
c_puct,
n_playout,
num_player,
policy_value_fn=None) -> None:
self._root = TreeNode(None, 1, num_player, c_puct)
self.num_player = num_player
print("num_player", num_player)
# self._policy_value_fn=None
if policy_value_fn:
self._policy_value_fn = policy_value_fn
else:
self._policy_value_fn = self.naive_policy_value_fn
self._c_puct = c_puct
self._n_playout = n_playout
def naive_policy_value_fn(self, board_state):
available_move = board_state.availables
len_avail = len(available_move)
prior = list(np.ones(len_avail) / len_avail)
return zip(available_move, prior), 0
def get_move(self, board_state):
for i in range(self._n_playout):
cur_board_state = copy.deepcopy(board_state)
self._playout(cur_board_state)
move = max(self._root.children.items(),
key=lambda act_node: act_node[1]._n_visits)[0]
prob =None
return move, prob
def _playout(self, board_state):
cur_node = self._root
# cur_player=board_state.get_current_player()
# print("playout")
while (True):
#search leaf
# print("node_child",cur_node.children,cur_node.is_leaf())
if cur_node.is_leaf():
break
move, cur_node = cur_node.select()
# print("move",move)
board_state.do_move(move)
end, winner = board_state.game_end()
action_prior, leaf_val = self._policy_value_fn(board_state)
cur_player = board_state.get_current_player()
if not end:
# np.random.choice(p=prob)
# print(action_prior)
cur_node.expand(action_prior)
winner = self.simulation_rollout(board_state)
# else:
# print(winner)
cur_node.update_recursive(
winner, (cur_player + (self.num_player - 1)) % self.num_player)
# cur_node.update_recursive(winner,cur_player)
def simulation_rollout(self, board_state, limit=1000):
for i in range(limit):
available_move = board_state.availables
if len(available_move) > 0:
end, winner = board_state.game_end()
if end:
return winner
# max_action=np.random.choice(available_move)
action = rollout_policy_fn(board_state)
max_action = max(action, key=itemgetter(1))[0]
board_state.do_move(max_action)
else:
return -1
print("reach rollout limit")
return -1
def update_with_move(self, last_move):
if last_move == -1:
self._root = TreeNode(None, 1, self.num_player, self._c_puct)
else:
self._root = self._root.children[last_move]
self._root.parent = None
class TreeNode(object):
def __init__(self, parent, p, num_player, c_puct=5) -> None:
self.parent = parent
self.children = {}
self._n_visits = 0
self._Q = 0
self._U = 0
self._P = p
self._num_player = num_player
self._c_puct = c_puct
def expand(self, action_prior):
for action, prior in action_prior:
if action not in self.children:
self.children[action] = TreeNode(self, prior, self._num_player,
self._c_puct)
def update_recursive(self, winner, cur_player, leaf_val=1):
if self.parent != None:
self.parent.update_recursive(
winner,
(cur_player + (self._num_player - 1)) % self._num_player)
if winner == -1:
self.update(0)
else:
if winner == cur_player:
self.update((self._num_player - 1) * leaf_val)
else:
self.update(-1 * leaf_val)
def is_leaf(self):
return self.children == {}
def select(self, print=False):
return max(self.children.items(),
key=lambda act_dict: act_dict[1].get_value(toprint=print))
def get_value(self, toprint=False):
self._U = self._c_puct * self._P * np.sqrt(
self.parent._n_visits) / (1 + self._n_visits)
if (toprint):
print("Q", self._Q, "U", self._U, self._n_visits)
return self._U + self._Q
def update(self, value):
self._n_visits += 1
# if self.parent:
self._Q += 1.0 * (value - self._Q) / self._n_visits | |
#!/usr/bin/env python3
#
# Converter from Keras saved NN to JSON
"""
____________________________________________________________________
Variable specification file
In additon to the standard Keras architecture and weights files, you
must provide a "variable specification" json file with the following
format:
{
"inputs": [
{"name": variable_name,
"scale": scale,
"offset": offset,
"default": default_value},
...
],
"class_labels": [output_class_1_name, output_class_2_name, ...]
}
where `scale` and `offset` account for any scaling and shifting to the
input variables in preprocessing. The "default" value is optional.
"""
import argparse
import json
import h5py
import numpy as np
def _run():
"""Top level routine"""
args = _get_args()
with open(args.arch_file, 'r') as arch_file:
arch = json.load(arch_file)
with open(args.variables_file, 'r') as inputs_file:
inputs = json.load(inputs_file)
with h5py.File(args.hdf5_file, 'r') as h5:
out_dict = {
'layers': _get_layers(arch, inputs, h5),
}
out_dict.update(_parse_inputs(inputs))
print(json.dumps(out_dict, indent=2))
def _get_args():
parser = argparse.ArgumentParser(
description="Converter from Keras saved NN to JSON",
epilog=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('arch_file', help='architecture json file')
parser.add_argument('variables_file', help='variable spec as json')
parser.add_argument('hdf5_file', help='Keras weights file')
return parser.parse_args()
_activation_map = {
'relu': 'rectified',
'sigmoid': 'sigmoid',
None: 'linear',
# TODO: pass through unknown types rather than defining them as
# themselves?
'linear': 'linear',
'softmax': 'softmax',
'tanh': 'tanh',
}
# __________________________________________________________________________
# Layer converters
#
# Each of these converters takes two arguments:
# - An H5 Group with the layer parameters
# - The number of inputs (for error checking)
#
# Each returns two outputs:
# - A dictionary of layer information which can be serialized to JSON
# - The number of outputs (also for error checking)
def _get_dense_layer_parameters(layer_group, n_in):
"""Get weights, bias, and n-outputs for a dense layer"""
weights = layer_group['param_0']
bias = layer_group['param_1']
assert weights.shape[1] == bias.shape[0]
assert weights.shape[0] == n_in
# TODO: confirm that we should be transposing the weight
# matrix the Keras case
return_dict = {
'weights': np.asarray(weights).T.flatten('C').tolist(),
'bias': np.asarray(bias).flatten('C').tolist(),
'architecture': 'dense'
}
return return_dict, weights.shape[1]
def _get_maxout_layer_parameters(layer_group, n_in):
"""Get weights, bias, and n-outputs for a maxout layer"""
weights = np.asarray(layer_group['param_0'])
bias = np.asarray(layer_group['param_1'])
# checks (note the transposed arrays)
wt_layers, wt_in, wt_out = weights.shape
bias_layers, bias_n = bias.shape
assert wt_out == bias_n
assert wt_in == n_in, '{} != {}'.format(wt_in, n_in)
assert wt_layers == bias_layers
sublayers = []
for nnn in range(weights.shape[0]):
w_slice = weights[nnn,:,:]
b_slice = bias[nnn,:]
sublayer = {
'weights': w_slice.T.flatten().tolist(),
'bias': b_slice.flatten().tolist(),
'architecture': 'dense'
}
sublayers.append(sublayer)
return {'sublayers': sublayers, 'architecture': 'maxout'}, wt_out
def _dummy_parameters(layer_group, n_in):
"""Return dummy parameters"""
return {'weights':[], 'bias':[], 'architecture':'dense'}, n_in
_layer_converters = {
'dense': _get_dense_layer_parameters,
'maxoutdense': _get_maxout_layer_parameters,
'activation': _dummy_parameters,
'flatten': _dummy_parameters,
}
# __________________________________________________________________________
# master layer converter / inputs function
def _get_layers(network, inputs, h5):
layers = []
in_layers = network['layers']
n_out = len(inputs['inputs'])
for layer_n in range(len(in_layers)):
# get converter for this layer
layer_arch = in_layers[layer_n]
layer_type = layer_arch['name'].lower()
convert = _layer_converters[layer_type]
# get the hdf5 info
layer_group = h5['layer_{}'.format(layer_n)]
# build the out layer
out_layer, n_out = convert(layer_group, n_out)
out_layer['activation'] = _activation_map[
layer_arch.get('activation')]
layers.append(out_layer)
return layers
def _parse_inputs(keras_dict):
inputs = []
defaults = {}
for val in keras_dict['inputs']:
inputs.append({x: val[x] for x in ['offset', 'scale', 'name']})
# maybe fill default
default = val.get("default")
if default is not None:
defaults[val['name']] = default
return {
'inputs': inputs,
'outputs': keras_dict['class_labels'],
'defaults': defaults,
}
if __name__ == '__main__':
_run() | |
import pandas as pd
import numpy as np
from sklearn import datasets
from Kmeans_python.fit import fit
# Test function for center
def test_edge():
test_df = pd.DataFrame({'X1': np.zeros(10), 'X2': np.ones(10)})
centers, labels = fit(test_df, 1)
print(labels)
assert centers.all() == np.array([0, 1]).all()
# Test function for center
def test_center():
# Helper data
iris = datasets.load_iris() # loading the iris dataset
features = iris.data
test_df1 = pd.DataFrame({'X1': features[:, 2], 'X2': features[:, 3]})
test_df2 = pd.DataFrame({'X1': np.arange(9), 'X2': np.arange(9)})
# getting centers of clusters
centers1, _ = fit(test_df2, 1)
centers2, _ = fit(test_df1, 2)
assert centers2.all() == np.array([[4.92525253, 1.68181818],
[1.49215686, 0.2627451]]).all(), \
"Centers did not match"
assert centers1.all() == np.array([4, 4]).all(), "Centers did not match"
# Test function for labels
def test_label():
# Helper data
test_df3 = pd.DataFrame({'X1': np.concatenate((np.arange(5, 10),
np.arange(15, 20)),
axis=0),
'X2': np.concatenate((np.arange(5, 10),
np.arange(15, 20)),
axis=0)})
# getting the labels for the helper data
_, labels = fit(test_df3, 2)
assert labels.all() == np.concatenate((np.zeros(5),
np.ones(5)),
axis=0).all(), "labels did not match"
def test_exceptions():
# Helper data
test_df4 = "this is a python package"
test_df2 = pd.DataFrame({'X1': np.arange(9), 'X2': np.arange(9)})
test_df5 = pd.DataFrame({'X1': [1, 2, 3, 4], 'X2': [1, 2, "A", "BC"]})
K = -2
num_init = 0
max_iteration = 4.5
# checking the exception handling of the function
try:
fit(test_df4, 2)
print("Should throw an error if data is not in "
"a dataframe or numpy array")
raise
except ValueError:
pass
try:
fit(test_df2, K)
except ValueError:
pass
try:
fit(test_df5, 2)
except ValueError:
pass
try:
fit(test_df2, 1, n_init=num_init)
except ValueError:
pass
try:
fit(test_df2, 1, max_iter=max_iteration)
except ValueError:
pass | |
"""
Analytics Vidhya Jobathon
File Description: Utils + Constants
Date: 27/02/2021
Author: vishwanath.prudhivi@gmail.com
"""
#import required libraries
import pandas as pd
import numpy as np
import logging
import xgboost as xgb
from catboost import CatBoostClassifier, Pool
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, MinMaxScaler, StandardScaler
from sklearn.feature_extraction import FeatureHasher
from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.metrics import roc_auc_score
from sklearn import manifold
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import classification_report
from sklearn.model_selection import StratifiedKFold
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
#global constants
PRIMARY_KEY = 'ID'
TARGET = 'Response'
#FEATURES = None
RAW_TRAIN_DATA_PATH = r'C:\Users\vishw\Desktop\Job Prep\Analytics Vidhya - Health Insurance\train_Df64byy.csv'
RAW_TEST_DATA_PATH = r'C:\Users\vishw\Desktop\Job Prep\Analytics Vidhya - Health Insurance\test_YCcRUnU.csv'
PROCESSED_TRAIN_DATA_PATH = r'C:\Users\vishw\Desktop\Job Prep\Analytics Vidhya - Health Insurance\train_processed.csv'
PROCESSED_TEST_DATA_PATH = r'C:\Users\vishw\Desktop\Job Prep\Analytics Vidhya - Health Insurance\test_processed.csv'
SUBMISSION_FILE_PATH = r'C:\Users\vishw\Desktop\Job Prep\Analytics Vidhya - Health Insurance'
def load_data(path):
'''
Function to load data, in this case a csv file
'''
return pd.read_csv(path)
def save_data(data,path):
'''
Function to save data, in this case a csv file
Inputs:
1. data - Dataframe
2. path - string
'''
return data.to_csv(path,index = False)
def prepare_data(train_df_raw,test_df_raw,data_prep_dict):
'''
Function to process raw data into required modelling data
Inputs:
1. train_df_raw - Dataframe
2. test_df_raw - Dataframe
3. data_prep_dict - Dictionary
Outputs:
1. train_df_processed - Dataframe
2. test_df_processed - Dataframe
'''
#quick check to apply data processing on both train and test combined
#train_df_raw = pd.concat([train_df_raw,test_df_raw],axis = 0)
#override simple imputer error by manually assigning missing values
train_df_raw['Holding_Policy_Duration'].fillna('-1',inplace=True)
test_df_raw['Holding_Policy_Duration'].fillna('-1',inplace=True)
train_df_raw.fillna('missing',inplace = True)
test_df_raw.fillna('missing',inplace = True)
#modify data values to convert catergorical raw attributes to potential numeric features
train_df_raw.replace({'14+':'14'},inplace = True)
train_df_raw['Holding_Policy_Duration'] = train_df_raw['Holding_Policy_Duration'].astype(float)
test_df_raw.replace({'14+':'14'},inplace = True)
test_df_raw['Holding_Policy_Duration'] = test_df_raw['Holding_Policy_Duration'].astype(float)
#freeze data types
train_df_raw[data_prep_dict['one_hot_encode']] = train_df_raw[data_prep_dict['one_hot_encode']].astype(str)
test_df_raw[data_prep_dict['one_hot_encode']] = test_df_raw[data_prep_dict['one_hot_encode']].astype(str)
#target encode required attributes
for target_encode_col in data_prep_dict['target_encode']:
encoding_dict = train_df_raw.groupby(target_encode_col)[TARGET].mean().to_dict()
train_df_raw[target_encode_col] = train_df_raw[target_encode_col].map(encoding_dict)
test_df_raw[target_encode_col] = test_df_raw[target_encode_col].map(encoding_dict)
#fill missing Region Codes
#city_code_means = train_df_raw.groupby(['City_Code'])[TARGET].mean().reset_index()
#test_df_raw['Region_Code'] = test_df_raw.apply(
#lambda row: city_code_means[TARGET][city_code_means.City_Code ==
# row['City_Code']].values[0]
# if row['Region_Code'] not in train_df_raw['Region_Code'].unique() else row['Region_Code'],
# axis=1
# )
#define set of transformation steps per raw attribute present in the data
column_transformer_1 = ColumnTransformer(
[
('one_hot_encode', OneHotEncoder(sparse = False,drop = 'if_binary'), data_prep_dict['one_hot_encode'])
],
remainder = 'passthrough',
verbose = 'True')
#build and fit the column transformer on train data
train_df_processed = column_transformer_1.fit_transform(train_df_raw)
#apply the column transformer on test data
test_df_processed = column_transformer_1.transform(test_df_raw)
#convert numpy arrays into pandas dataframe for further analysis
train_df_processed_1 = pd.DataFrame(train_df_processed,columns = column_transformer_1.get_feature_names())
test_df_processed_1 = pd.DataFrame(test_df_processed,columns = column_transformer_1.get_feature_names())
column_transformer_2 = ColumnTransformer(
[('passthrough','passthrough',[col for col in
train_df_processed_1.columns if col not
in data_prep_dict['standard_scale']]),
('standard_scale', StandardScaler(), data_prep_dict['standard_scale'])
],
remainder = 'passthrough',
verbose = 'True')
#build and fit the column transformer on train data
train_df_processed_2 = column_transformer_2.fit_transform(train_df_processed_1)
#apply the column transformer on test data
test_df_processed_2 = column_transformer_2.transform(test_df_processed_1)
#recreate column names in the correct order, to understand feature importances
train_df_processed_out = pd.DataFrame(train_df_processed_2,columns = [col for col in
train_df_processed_1.columns if col not
in data_prep_dict['standard_scale']] + data_prep_dict['standard_scale'])
test_df_processed_out = pd.DataFrame(test_df_processed_2,columns = [col for col in
train_df_processed_1.columns if col not
in data_prep_dict['standard_scale']]+ data_prep_dict['standard_scale'])
#progress logger
print('Target encoding completed, return processed data')
return train_df_processed_out, test_df_processed_out
def train_model(modelling_data,features):
'''
Function to train a model using XGBoost
Inputs:
1. modelling_data - Dataframe
2. features - list of strings
Outputs:
1. trained_model - Xgboostmodel
'''
parameters = {'nthread':[4],
'objective':['binary:logistic'],
'eval_metric': ['logloss'],
'learning_rate': [0.05],
'max_depth': [6],
'min_child_weight': [9,10,11],
'silent': [1],
'subsample': [0.8],
'colsample_bytree': [0.5],
'n_estimators': [100],
'missing':[-999],
'seed': [1337]}
xgb_model = xgb.XGBClassifier(use_label_encoder = False)
clf = GridSearchCV(xgb_model, parameters, n_jobs=5,
cv = 10,
scoring = 'roc_auc',
verbose=2, refit=True)
clf.fit(modelling_data[features], modelling_data[TARGET])
print('AUC SCORE ----> ',clf.best_score_)
print(clf.best_params_)
return clf
def train_model_catboost(modelling_data,features,categorical_features):
'''
Function to train a model using XGBoost
Inputs:
1. modelling_data - Dataframe
2. features - list of strings
Outputs:
1. trained_model - Xgboostmodel
'''
parameters = {'iterations': [500],
'depth': [4, 5, 6],
'loss_function': ['Logloss'],
'l2_leaf_reg': np.logspace(-20, -19, 3),
'leaf_estimation_iterations': [10],
# 'eval_metric': ['Accuracy'],
# 'use_best_model': ['True'],
'logging_level':['Silent'],
'random_seed': [42]
}
model = CatBoostClassifier()
clf = GridSearchCV(model, parameters, n_jobs=5,
cv = 10,
scoring = 'accuracy',
verbose=2, refit=True)
clf.fit(modelling_data[features], modelling_data[TARGET], categorical_features)
print('AUC SCORE ----> ',clf.best_score_)
print(clf.best_params_)
return clf
def explore_algorithms(modelling_data,features):
# prepare models
models = []
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC()))
# evaluate each model in turn
results = []
names = []
scoring = 'roc_auc'
for name, model in models:
kfold = model_selection.KFold(n_splits=10)
cv_results = model_selection.cross_val_score(model, modelling_data[features], modelling_data[TARGET], cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
def prepare_train_validation_data(modelling_data):
'''
Function to generate a validation data set from training data
Inputs:
1. modelling_data - Dataframe
Outputs:
1. train_df - Dataframe
2. validation_df - Dataframe
'''
modelling_data.sample(frac = 1)
validation_df = modelling_data.tail(5000)
train_df = modelling_data.head(modelling_data.shape[0] - 5000)
return train_df, validation_df
def evaluate(data,label,features,model):
'''
Function to evaluate model performance on hold out data
Inputs:
1. data - Dataframe
2. label - string
3. features - list of strings
3. model - classifier
Outputs:
1. score - float
'''
print('Performance on Holdout Data ---->',
roc_auc_score(data[TARGET], model.predict_proba(data[features])[:, 1]))
return roc_auc_score(data[TARGET], model.predict_proba(data[features])[:, 1])
def create_submission(data,features,model, iteration_id):
'''
Function to evaluate model performance on hold out data
Inputs:
1. data - Dataframe
2. features - list of strings
3. model - classifier
4. iteration_id - string
Outputs:
4. preds - Dataframe
'''
data[TARGET] = model.predict_proba(data[features])[:,1]
data[PRIMARY_KEY] = data[PRIMARY_KEY].astype(int)
data[PRIMARY_KEY] = data[PRIMARY_KEY].astype(str)
data[[PRIMARY_KEY,TARGET]].to_csv(SUBMISSION_FILE_PATH+'\submission_{0}.csv'.format(iteration_id),index = False)
return data
def test_prediction(train,test,features):
"""Try to classify train/test samples from total dataframe"""
train['target'] = 1
test['target'] = 0
combined = pd.concat([train[features+['target']],test[features+['target']]],axis = 0)
print(combined.shape)
# Create a target which is 1 for training rows, 0 for test rows
# Perform shuffled CV predictions of train/test label
predictions = cross_val_predict(
ExtraTreesClassifier(n_estimators=100, n_jobs=4),
combined[[col for col in combined.columns if col not in [PRIMARY_KEY,TARGET,'target']]], combined['target'],
cv=StratifiedKFold(
n_splits=10,
shuffle=True,
random_state=42
)
)
# Show the classification report
print(classification_report( combined['target'], predictions)) | |
#############################################################################################################
################################################## IMPORTS ##################################################
#############################################################################################################
from tensorflow.keras.applications.resnet_v2 import ResNet50V2, ResNet101V2, ResNet152V2, preprocess_input, decode_predictions
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam, SGD, RMSprop, Nadam
from tensorflow.keras.losses import CategoricalCrossentropy, SparseCategoricalCrossentropy, BinaryCrossentropy
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Dropout
from tensorflow.keras.utils import to_categorical
from sklearn.utils.class_weight import compute_class_weight
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import LabelEncoder
from keras import backend as K
from random import shuffle
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os, math
import getpass
import seaborn as sns; sns.set()
# from trains import Task
# task = Task.init(project_name="DL_CNN_Final_Project", task_name="Test_Model")
# logger = task.get_logger()
##############################################################################################################
################################################## SETTINGS ##################################################
##############################################################################################################
classes = [ 'Animals',
'Buildings',
'Carts',
'Children',
'Corpses',
'German Symbols',
'Gravestones',
'Railroad cars',
'Signs',
'Snow',
"Uniforms",
"Vehicles",
"Views",
'Weapons',
'Women',
]
classes = sorted(classes)
IM_WIDTH, IM_HEIGHT = 150, 150
EPOCHS = 30
BATCH_SIZE = 64*8
FC_SIZE = 2048
NUM_CLASSES = len(classes)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
if getpass.getuser() == 'assafsh':
train_directory = "/mnt/data/Storage/yad-vashem-dataset/data/train"
validation_directory = "/mnt/data/Storage/yad-vashem-dataset/data/validation"
test_directory = "/mnt/data/Storage/yad-vashem-dataset/data/test"
else:
train_directory = os.path.join(BASE_DIR, "data/train")
validation_directory = os.path.join(BASE_DIR, "data/validation")
test_directory = os.path.join(BASE_DIR, "data/test")
###############################################################################################################
################################################## FUNCTIONS ##################################################
###############################################################################################################
def categorical_focal_loss(alpha, gamma=2.):
"""
Softmax version of focal loss.
When there is a skew between different categories/labels in your data set, you can try to apply this function as a
loss.
m
FL = ∑ -alpha * (1 - p_o,c)^gamma * y_o,c * log(p_o,c)
c=1
where m = number of classes, c = class and o = observation
Parameters:
alpha -- the same as weighing factor in balanced cross entropy. Alpha is used to specify the weight of different
categories/labels, the size of the array needs to be consistent with the number of classes.
gamma -- focusing parameter for modulating factor (1-p)
Default value:
gamma -- 2.0 as mentioned in the paper
alpha -- 0.25 as mentioned in the paper
References:
Official paper: https://arxiv.org/pdf/1708.02002.pdf
https://www.tensorflow.org/api_docs/python/tf/keras/backend/categorical_crossentropy
Usage:
model.compile(loss=[categorical_focal_loss(alpha=[[.25, .25, .25]], gamma=2)], metrics=["accuracy"], optimizer=adam)
"""
alpha = np.array(alpha, dtype=np.float32)
def categorical_focal_loss_fixed(y_true, y_pred):
"""
:param y_true: A tensor of the same shape as `y_pred`
:param y_pred: A tensor resulting from a softmax
:return: Output tensor.
"""
# Clip the prediction value to prevent NaN's and Inf's
epsilon = K.epsilon()
y_pred = K.clip(y_pred, epsilon, 1. - epsilon)
# Calculate Cross Entropy
cross_entropy = -y_true * K.log(y_pred)
# Calculate Focal Loss
loss = alpha * K.pow(1 - y_pred, gamma) * cross_entropy
# Compute mean loss in mini_batch
return K.mean(K.sum(loss, axis=-1))
return categorical_focal_loss_fixed
def generators():
'''
This function creates a generator for the dataset - generator for train, generator for validation and generator for test
For each image in the dataset an augmentation is being executed
'''
# Set train and test data generators
train_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
rescale=1./255,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
test_datagen = ImageDataGenerator(
rescale=1./255
)
# Get images from train directory and insert into generator
train_generator = train_datagen.flow_from_directory(
train_directory,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=BATCH_SIZE,
shuffle=True,
class_mode='binary'
)
# Get images from validation directory and insert into generator
validation_generator = test_datagen.flow_from_directory(
validation_directory,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=BATCH_SIZE,
shuffle=True,
class_mode='binary'
)
# Get images from test directory and insert into generator
test_generator = test_datagen.flow_from_directory(
test_directory,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=BATCH_SIZE,
shuffle=True,
class_mode='binary'
)
return train_generator, validation_generator, test_generator
''' End function '''
def yield_from_generators(train_generator, validation_generator, test_generator):
train_df, validation_df, test_df = [], [], []
categories_path_train, categories_path_validation, categories_path_test = [], [], []
for category in classes:
if ' ' in category:
category = category.replace(" ", "_")
categories_path_train.append(os.path.join(train_directory, category))
categories_path_validation.append(os.path.join(validation_directory, category))
categories_path_test.append(os.path.join(test_directory, category))
for class_num, path in enumerate(categories_path_train):
dir_path = os.listdir(path)
for i, child in enumerate(dir_path):
if i > 79000:
break
if i % 100 == 0:
print("number of train_df: {}". format(len(train_df)))
img = load_img(os.path.join(path, child), target_size=(IM_HEIGHT, IM_WIDTH, 3))
x = img_to_array(img)
train_df.append([x, class_num])
for class_num, path in enumerate(categories_path_validation):
dir_path = os.listdir(path)
for i, child in enumerate(dir_path):
if i > 9800:
break
if i % 100 == 0:
print("number of validation_df: {}". format(len(validation_df)))
img = load_img(os.path.join(path, child), target_size=(IM_HEIGHT, IM_WIDTH, 3))
x = img_to_array(img)
validation_df.append([x, class_num])
for class_num, path in enumerate(categories_path_test):
dir_path = os.listdir(path)
for i, child in enumerate(dir_path):
if i > 9800:
break
if i % 100 == 0:
print("number of test_df: {}". format(len(test_df)))
img = load_img(os.path.join(path, child), target_size=(IM_HEIGHT, IM_WIDTH, 3))
x = img_to_array(img)
test_df.append([x, class_num])
shuffle(train_df)
X_train, X_validation, X_test = [], [], []
Y_train, Y_validation, Y_test = [], [], []
for image, label in train_df:
# only divided by 156
if len(X_train) == 79000:
break
X_train.append(image)
Y_train.append(label)
for image, label in validation_df:
# only divided by 156
if len(X_validation) == 9800:
break
X_validation.append(image)
Y_validation.append(label)
for image, label in test_df:
# only divided by 156
if len(X_test) == 9800:
break
X_test.append(image)
Y_test.append(label)
X_train = np.array(X_train) / 255.0
Y_train = np.array(Y_train)
X_validation = np.array(X_validation) / 255.0
Y_validation = np.array(Y_validation)
X_test = np.array(X_test) / 255.0
Y_test = np.array(Y_test)
return X_train, X_validation, X_test, Y_train, Y_validation, Y_test
''' End function '''
def generate_class_weights(train_generator):
'''
Input:
Output:
'''
labels_dict = {
'Animals': 1559,
'Buildings':9052,
'Carts': 1540,
'Children': 16525,
'Corpses': 4606,
"German Symbols": 2476,
'Gravestones': 5648,
'Railroad cars': 1018,
'Signs': 2038,
'Snow': 1716,
"Uniforms": 12356,
"Vehicles": 3036,
"Views": 8776,
'Weapons': 1260,
'Women': 27642
}
class_weights_dict = dict()
total_samples = sum(labels_dict.values())
mu = 0.15
for key in labels_dict.keys():
score = math.log(mu * total_samples / float(labels_dict[key]))
class_weights_dict[classes.index(key)] = score if score > 1.0 else 1.0
print(class_weights_dict)
return class_weights_dict
''' End function '''
def create_classifier(base_model):
'''
Creates new classifiers based on ResNet50
'''
# Add global average pooling and 2 FC for fine tuning
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(FC_SIZE, activation='relu')(x)
x = Dense(FC_SIZE//2, activation='relu')(x)
predictions = Dense(NUM_CLASSES, activation='softmax')(x)
# Create the model
model = Model(base_model.input, predictions)
return model
''' End function '''
def fit_predict(X_train, X_validation, X_test, Y_train, Y_validation, Y_test, train_generator, validation_generator, test_generator, classifier, class_weight_dict, number):
'''
Input:
Output:
'''
history = classifier.fit(
X_train,
Y_train,
steps_per_epoch=X_train.shape[0] // BATCH_SIZE,
epochs=EPOCHS,
validation_data=(X_validation, Y_validation),
validation_steps=X_validation.shape[0] // BATCH_SIZE,
shuffle=True,
callbacks=[tf.keras.callbacks.CSVLogger('training_{}.log'.format(number))],
class_weight=class_weight_dict,
use_multiprocessing=True,
workers=8,
)
classifier.save_weights('train_without_base_model.h5')
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('./plots/accuracy_plot_{}.png'.format(number))
plt.clf()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('./plots/loss_plot_{}.png'.format(number))
plt.clf()
print("====================================================")
history_evaulate = classifier.evaluate(X_validation, Y_validation)
print("model evaulation on test:")
print(history_evaulate)
print("====================================================")
Y_pred = classifier.predict(X_test)
y_pred = np.argmax(Y_pred, axis=1)
print("====================================================")
print("Confusion matrix:")
conf = confusion_matrix(Y_test, y_pred)
print(conf)
plt.figure(figsize=(20,20))
ax = plt.axes()
sns.heatmap(conf, ax=ax, xticklabels=classes, yticklabels=classes, linewidths=0.5, annot=True, fmt='d')
ax.set_title('Confunsion Matrix')
b,t = plt.ylim()
b += 0.5
t -= 0.5
plt.ylim(b,t)
plt.savefig('./plots/confusion_matrix{}.png'.format(number))
plt.clf()
print("====================================================")
print("Classification report:")
class_report = classification_report(Y_test, y_pred, target_names=classes)
print(class_report)
with open("classification_report{}.log".format(number), 'w') as f:
f.write(class_report)
''' End function '''
##########################################################################################################
################################################## MAIN ##################################################
##########################################################################################################
def main():
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
train_generator, validation_generator, test_generator = generators()
class_weight_dict = generate_class_weights(train_generator)
X_train, X_validation, X_test, Y_train, Y_validation, Y_test = yield_from_generators(train_generator, validation_generator, test_generator)
# Set ResNet to be base model
base_model = ResNet152V2(weights="imagenet", include_top=False)
classifier = create_classifier(base_model)
# Freeze all base model layers
for layer in base_model.layers:
layer.trainable = False
classifier.compile(optimizer=Adam(), loss=[categorical_focal_loss(alpha=[[.25, .25, .25]], gamma=2)], metrics=['accuracy'])
classifier.summary()
print("Transfer learning")
fit_predict(X_train, X_validation, X_test, Y_train, Y_validation, Y_test, train_generator, validation_generator, test_generator, classifier, class_weight_dict, 0)
# Unfreeze all base model layers
for layer in base_model.layers:
layer.trainable = True
classifier.compile(optimizer=Adam(), loss=[categorical_focal_loss(alpha=[[.25, .25, .25]], gamma=2)], metrics=['accuracy'])
classifier.summary()
print("Fine Tuning")
fit_predict(X_train, X_validation, X_test, Y_train, Y_validation, Y_test, train_generator, validation_generator, test_generator, classifier, class_weight_dict, 1)
if __name__ == "__main__":
main() | |
import copy
import time
import numpy as np
from ray.rllib.agents.pg import PGTrainer, PGTorchPolicy
from marltoolbox.envs.matrix_sequential_social_dilemma import IteratedPrisonersDilemma
from marltoolbox.examples.rllib_api.pg_ipd import get_rllib_config
from marltoolbox.utils import log, miscellaneous
from marltoolbox.utils import rollout
CONSTANT_REWARD = 1.0
EPI_LENGTH = 33
class FakeEnvWtCstReward(IteratedPrisonersDilemma):
def step(self, actions: dict):
observations, rewards, epi_is_done, info = super().step(actions)
for k in rewards.keys():
rewards[k] = CONSTANT_REWARD
return observations, rewards, epi_is_done, info
def make_FakePolicyWtDefinedActions(list_actions_to_play):
class FakePolicyWtDefinedActions(PGTorchPolicy):
def compute_actions(self, *args, **kwargs):
action = list_actions_to_play.pop(0)
return np.array([action]), [], {}
return FakePolicyWtDefinedActions
def init_worker(actions_list=None):
train_n_replicates = 1
debug = True
stop_iters = 200
tf = False
seeds = miscellaneous.get_random_seeds(train_n_replicates)
exp_name, _ = log.log_in_current_day_dir("testing")
rllib_config, stop_config = get_rllib_config(seeds, debug, stop_iters, tf)
rllib_config['env'] = FakeEnvWtCstReward
rllib_config['env_config']['max_steps'] = EPI_LENGTH
rllib_config['seed'] = int(time.time())
if actions_list is not None:
for policy_id in FakeEnvWtCstReward({}).players_ids:
policy_to_modify = list(rllib_config['multiagent']["policies"][policy_id])
policy_to_modify[0] = make_FakePolicyWtDefinedActions(copy.deepcopy(actions_list))
rllib_config['multiagent']["policies"][policy_id] = policy_to_modify
pg_trainer = PGTrainer(rllib_config)
return pg_trainer.workers._local_worker
def test_rollout_constant_reward():
policy_agent_mapping = (lambda policy_id: policy_id)
def assert_(rollout_length, num_episodes):
worker = init_worker()
rollout_results = rollout.internal_rollout(worker,
num_steps=rollout_length,
policy_agent_mapping=policy_agent_mapping,
reset_env_before=True,
num_episodes=num_episodes)
assert rollout_results._num_episodes == num_episodes or rollout_results._total_steps == rollout_length
steps_in_last_epi = rollout_results._current_rollout
if rollout_results._total_steps == rollout_length:
n_steps_in_last_epi = rollout_results._total_steps % EPI_LENGTH
elif rollout_results._num_episodes == num_episodes:
n_steps_in_last_epi = EPI_LENGTH
# Verify rewards
for policy_id in worker.env.players_ids:
rewards = [step[3][policy_id] for step in steps_in_last_epi]
assert sum(rewards) == n_steps_in_last_epi * CONSTANT_REWARD
assert len(rewards) == n_steps_in_last_epi
all_steps = []
for epi_rollout in rollout_results._rollouts:
all_steps.extend(epi_rollout)
for policy_id in worker.env.players_ids:
rewards = [step[3][policy_id] for step in all_steps]
assert sum(rewards) == min(rollout_length, num_episodes * EPI_LENGTH) * CONSTANT_REWARD
assert len(rewards) == min(rollout_length, num_episodes * EPI_LENGTH)
assert_(rollout_length=20, num_episodes=1)
assert_(rollout_length=40, num_episodes=1)
assert_(rollout_length=77, num_episodes=2)
assert_(rollout_length=77, num_episodes=3)
assert_(rollout_length=6, num_episodes=3)
def test_rollout_specified_actions():
policy_agent_mapping = (lambda policy_id: policy_id)
def assert_(rollout_length, num_episodes, actions_list):
worker = init_worker(actions_list=actions_list)
rollout_results = rollout.internal_rollout(worker,
num_steps=rollout_length,
policy_agent_mapping=policy_agent_mapping,
reset_env_before=True,
num_episodes=num_episodes)
assert rollout_results._num_episodes == num_episodes or rollout_results._total_steps == rollout_length
steps_in_last_epi = rollout_results._current_rollout
if rollout_results._total_steps == rollout_length:
n_steps_in_last_epi = rollout_results._total_steps % EPI_LENGTH
elif rollout_results._num_episodes == num_episodes:
n_steps_in_last_epi = EPI_LENGTH
# Verify actions
all_steps = []
for epi_rollout in rollout_results._rollouts:
all_steps.extend(epi_rollout)
for policy_id in worker.env.players_ids:
actions_played = [step[1][policy_id] for step in all_steps]
assert len(actions_played) == min(rollout_length, num_episodes * EPI_LENGTH)
print(actions_list[1:1 + len(all_steps)], actions_played)
for action_required, action_played in zip(actions_list[:len(all_steps)], actions_played):
assert action_required == action_played
for policy_id in worker.env.players_ids:
actions_played = [step[1][policy_id] for step in steps_in_last_epi]
assert len(actions_played) == n_steps_in_last_epi
actions_required_during_last_epi = actions_list[:len(all_steps)][-n_steps_in_last_epi:]
for action_required, action_played in zip(actions_required_during_last_epi, actions_played):
assert action_required == action_played
assert_(rollout_length=20, num_episodes=1, actions_list=[0, 1] * 100)
assert_(rollout_length=40, num_episodes=1, actions_list=[1, 1] * 100)
assert_(rollout_length=77, num_episodes=2, actions_list=[0, 0] * 100)
assert_(rollout_length=77, num_episodes=3, actions_list=[0, 1] * 100)
assert_(rollout_length=6, num_episodes=3, actions_list=[1, 0] * 100) | |
import os
from abc import ABC, abstractmethod
from pathlib import Path
from configobj import ConfigObj
from lmfit.models import LorentzianModel, QuadraticModel, LinearModel, ConstantModel, PolynomialModel
from matplotlib import pyplot as plt
from scipy.signal import savgol_filter
try:
from plot_python_vki import apply_style
apply_style()
except ImportError:
pass
class GenericFit(ABC):
"""
Generic Fit class
This abstract class is used as base for Raman and XRD specialized classes.
Attributes
----------
data : df
experimental data
metadata : list
metadata from the experiments
peaks : list
list of peaks to be fit
other_data : configObj
as it says...other data that could be parsed, so far, almost empty
folder_out: str
folder to save the reports from the fit
"""
def __init__(self, experimental_data=None, peaks=None, other_data=None, folder_out=None):
"""
Parameters
----------
experimental_data: df with experimental data
peaks: list of peaks to be retrieved
other_data: if needed
folder_out: str folder where report will be saved.
"""
if peaks is None:
self.peaks = []
else:
self.peaks = peaks
if other_data is not None:
self.other_data = other_data
else: # here we can add some default values in a dictionary
self.other_data = dict()
self.other_data['_normalize_data'] = True
self.other_data['bkg'] = 'quadratic'
if folder_out is not None:
self.folder_out = Path(folder_out)
else: # output folder for the fitting report, which is not used normally
self.folder_out = Path('out_report')
# create the out folder. if it exists, just pass
os.makedirs(self.folder_out, exist_ok=True)
self.experimental_data = experimental_data
# extract the experimental data into two variables. Gets extended in inheritance
self.var_x = None # name of the varible x
self.var_y = None
self.x = None # values of x
self.y = None
self.model = None
self.params = None
self.filename = None
self.dict_tolerances_fit = None
def apply_normalize(self):
"""
performs the normalization.
"""
self.y = self._normalize_data(self.y)
def apply_smoothing(self):
"""
performs smoothing using _sav_gol filter.
:param inplace: bool. if False, returns a new column for the df called smoothed.
"""
win_size = self._try_get_other_data(self.other_data, 'window_size', default_value=(15,))[0]
poly_order = self._try_get_other_data(self.other_data, 'poly_order', default_value=(3,))[0]
self.y = self._sav_gol(self.y, win_size=win_size, poly_order=poly_order)
@abstractmethod
def set_tolerances_fit(self):
pass
def build_fitting_model_peaks(self):
"""
Builds the fitting model with parameters.
It uses a quadraticModel to remove background noise, even though it is not the most important.
:return:
"""
model, params = self.create_bkg_model()
for i, cen in enumerate(self.peaks):
peak, pars = self._add_peak('lz%d' % (i + 1), cen, amplitude=self.dict_tolerances_fit['amplitude'],
sigma=self.dict_tolerances_fit['sigma'],
tolerance_center=self.dict_tolerances_fit['tolerance_center'],
min_max_amplitude=self.dict_tolerances_fit['min_max_amplitude'],
min_max_sigma=self.dict_tolerances_fit['min_max_sigma'])
model = model + peak
params.update(pars)
self.model = model
self.params = params
def run_fit_model(self):
"""
Perform the fit
"""
result, components = self._fit_lorentzians(self.x, self.y, self.model, self.params)
self.result = result
self.components = components
def save_results(self):
"""
Saves 2 types of files:
report file : with a lot of data
params file : with the actual paramters and their std.
"""
# save fit report to a file:
with open(f'{self.folder_out / self.filename}_report', 'w') as fh:
fh.write(self.result.fit_report())
with open(f'{self.filename}_params.txt', 'w') as fh:
for key in self.result.params:
fh.write(key + " = " + str(self.result.params[key].value) + '\n')
fh.write(key + '_stderr = ' + str(self.result.params[key].stderr) + '\n')
def plot_results(self):
"""
Plots the results of the fit.
"""
plt.plot(self.x, self.y, label='data')
plt.plot(self.x, self.result.best_fit, label='best fit')
for name, component in self.components.items():
if isinstance(component, float):
plt.axhline(component, linestyle='--', label=name)
else:
plt.plot(self.x, component, linestyle='--', label=name)
plt.xlabel(self.var_x)
plt.ylabel(self.var_y)
plt.legend(loc='upper right')
plt.savefig(self.filename + '.png')
plt.close()
def create_bkg_model(self):
"""
Creates a bkg model for removing the background from the signals.
Gets the data from the other_data part of the input file. Otherwise it will assign quadratic.
:return: model lmfit for the bkg function.
:return: params lmfit parameters to be adjusted.
"""
bkg_model = self._choose_bkg_model(self.other_data['poly_type'])
model = bkg_model[0](**bkg_model[1])
params = model.make_params(bkg_model[2])
return model, params
#########
# the static methods in the following are basically the ones doing the tasks.
# The underscore is to treat them as private
#########
@staticmethod
def _add_peak(prefix, center, amplitude, sigma, tolerance_center,
min_max_amplitude, min_max_sigma):
"""
adds a peak using a LorentzianModel from lmfit. Peaks can be summed as a linear combination
:param prefix: str
name of the peak
:param center: float
center location
:param amplitude: float
amplitude of the peak
:param sigma: float
controls shape of the peak
:param min_max_amplitude: tuple
for the amplitude of the peak
:param tolerance_center: float
plus minus this quantity for the peak center location
:param min_max_sigma: tuple
for the sigma of the peak
:return: peak lmfit model with the peak and its properties.
:return: pars lmfit parameters to be adjusted.
"""
peak = LorentzianModel(prefix=prefix) # created a lorentzian function
pars = peak.make_params()
pars[prefix + 'center'].set(center, min=center - tolerance_center, max=center + tolerance_center)
pars[prefix + 'amplitude'].set(amplitude, min=min_max_amplitude[0], max=min_max_amplitude[1])
pars[prefix + 'sigma'].set(sigma, min=min_max_sigma[0], max=min_max_sigma[1])
return peak, pars
@staticmethod
def _fit_lorentzians(x, y, model, params):
"""
Fits the lorentzians to the experimental data.
It uses a quadraticModel to remove background noise, even though it is not the most important.
:param x: 1D array like
with the x values, namely 2theta or raman displacement
:param y: 1D array like
with intensity counts
:param model: lmfit model
to be fit
:param params: lmfit params
to be adjusted
:return:
"""
init = model.eval(params, x=x)
result = model.fit(y, params, x=x)
components = result.eval_components()
return result, components
@staticmethod
def _choose_bkg_model(poly_type):
"""
Selects a bkg model for the fit. If not available, it will use the default quadratic.
:param poly_type: str
Type of bkg: linear, quadratic, constant, cubic.
:return: lmfit model
the bkg model to be added in the fitting.
"""
poly_type = poly_type.lower() # to avoid typos
poly_type_dict = {
'quadratic': (QuadraticModel, {'prefix': 'bkg'}, {'a': 0, 'b': 0, 'c': 0}),
'linear': (LinearModel, {'prefix': 'bkg'}, {'intercept': 0, 'slope': 0}),
'constant': (ConstantModel, {'prefix': 'bkg'}, {'c': 0}),
'cubic': (PolynomialModel, {'prefix': 'bkg', 'degree': 3}, {'c': 0}),
}
try:
bkg_model = poly_type_dict[poly_type]
except KeyError:
print('Background model not available, using quadratic')
bkg_model = poly_type_dict['quadratic']
return bkg_model
@staticmethod
def _try_get_other_data(other_data, string_to_find, default_value):
"""
This method tries to get the default data for a given property. If it does not find it, the value returned
will be the default one.
:param other_data: dict
dictionary with extra data passed
:param string_to_find: str
parameter to find
:param default_value: tuple or float or else
default value if the string is not found
:return: list_numbers
either a list of numbers, or float, or else, corresponding to the values specififed for the quantity.
"""
try:
data_requested = other_data[string_to_find]
except KeyError:
list_numbers = default_value
print(f'{string_to_find} range not found, set to default: {default_value}')
return list_numbers
if isinstance(data_requested, str):
data_requested = (data_requested,)
list_numbers = tuple(map(float, data_requested))
return list_numbers
@staticmethod
def _sav_gol(intensity_data, win_size=11, poly_order=4):
"""
applies the savgol_filter for a 1D data. set as static method for convenience.
:param intensity_data:
1D array with the original data
:return: 1D array
with data smoothed
"""
data_smoothed = savgol_filter(intensity_data, window_length=int(win_size), polyorder=int(poly_order), axis=0)
return data_smoothed
@staticmethod
def _normalize_data(intensity_data):
"""
Here we normalize as z = z - min(x)/(max(x)-min(x)).
:param intensity_data
1D array with the original data
:return: intensity_data_scaled:
scaled intensity data
"""
min_intensity = min(intensity_data)
max_intensity = max(intensity_data)
intensity_data_scaled = (intensity_data - min_intensity) / (max_intensity - min_intensity)
return intensity_data_scaled
@staticmethod
def read_otherdata_configfile(config_file, default_config_file, default_folder=None):
"""
Read if there is any extra data in the configfile
:param config_file: str
name of the file with the extra data
:return: dict
with the other data.
"""
config = ConfigObj(config_file)
other_data = config.get('other data')
if not other_data:
default_folder = Path(os.path.dirname(__file__)) / 'peak_files'
config = ConfigObj(str(default_folder / default_config_file))
other_data = config.get('other data')
return other_data
@staticmethod
def read_peaks_configfile(config_file, default_peaks_file, default_folder=None):
"""
Alternate constructor from configobj file
:param file_to_analyze: filename
:param config_file: configobj file
:return:
"""
if default_folder is None:
default_folder = Path(os.path.dirname(__file__)) / 'peak_files'
try:
config = ConfigObj(config_file)
# get the peaks, transform them to floats, and put them in a list, then sort the list
peaks = list(map(float, config['peaks']))
peaks.sort()
except:
print(
'Data peaks not found or corrupted, using the one, which is in the ramanpy folder')
config = ConfigObj(str(default_folder / default_peaks_file))
# get the peaks, transform them to floats, and put them in a list, then sort the list
peaks = list(map(float, config['peaks']))
peaks.sort()
return peaks | |
"""
Data preparation for Pendigits data.
The result of this script is input for the workshop participants.
This dataset has only numerical data (16 columns), with little meaning (originating from
downsampling coordinates in time from digits written on a digital pad)
Done here:
- mapping of outliers: b'yes'/b'no' to 1/0
- shuffling of data
Necessary preparation during the workshop:
- Nothing
"""
import pandas as pd
from outlierutils import reduce_mem_usage
from scipy.io import arff
## Path definitions
X_PATH = 'data/x_pendigits.pkl'
Y_PATH = 'data/y_pendigits.pkl'
pendigits_path = r'data/PenDigits_withoutdupl_norm_v01.arff'
## Load data
data = arff.loadarff(pendigits_path)
df = pd.DataFrame(data[0])
df = df.drop(columns=['id'])
df.outlier = df.outlier.map({b"'yes'":1, b"'no'":0})
df = df.sample(frac=1, random_state=2718)
df = df.reset_index(drop=True)
## Pickle the output
df.drop(columns='outlier').to_pickle(X_PATH)
df.outlier.to_pickle(Y_PATH)
print('Written output to: {}'.format(X_PATH)) | |
import numpy as np
import matplotlib.pyplot as plt
import os, random
import json
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms, models
from collections import OrderedDict
from PIL import Image
import time
import matplotlib.image as mp
import argparse
import essentials
p = argparse.ArgumentParser()
p.add_argument('--data_dir', type=str, default='ImageClassifier/flowers')
p.add_argument('--gpu', action='store_true', default='cuda')
p.add_argument('--epochs', type=int, default=2)
p.add_argument('--arch', type=str,default='vgg16')
p.add_argument('--learning_rate', type=float, default=0.001)
p.add_argument('--checkpoint', type=str)
p.add_argument('--criterion', type=float, default=nn.NLLLoss())
p.add_argument('--save_file', type=str, default='classifier.pth')
p.add_argument('--hiddenlayer1',type=int,default=4096)
p.add_argument('--hiddenlayer2', type=int,default=102)
arg = p.parse_args()
train_loader,test_loader,valid_loader = essentials.data_loader(arg)
model = essentials.model(arg.arch,arg.hiddenlayer1,arg.hiddenlayer2)
model = essentials.model_loader(arg.save_file,arg.hiddenlayer1,arg.hiddenlayer2,arg.arch)
optimizer = optim.Adam(model.classifier.parameters(), lr=arg.learning_rate)
model = essentials.model_trainer(model,arg.epochs,train_loader,valid_loader,arg.criterion,optimizer, arg.gpu)
essentials.model_tester(train_loader,model, arg.gpu)
checkpoint = essentials.save(model,arg) | |
import abc
import logging
import pprint
import random
import typing
from operator import itemgetter
from numpy.random import RandomState
import d3m.exceptions as exceptions
from .template_hyperparams import Hyperparam
_logger = logging.getLogger(__name__)
DimensionName = typing.NewType('DimensionName', str)
class ConfigurationSpace():
"""
Defines search space, i.e. possible values for each search dimension, and weight for each value
"""
@abc.abstractmethod
def get_dimensions(self) -> typing.List[DimensionName]:
"""
Returns the dimension names of the configuration space
"""
# @abc.abstractmethod
# def get_values(self, dimension: DimensionName) -> typing.List[T]:
# """
# Returns the values associated with a dimension
# """
# @abc.abstractmethod
# def get_weight(self, dimension: DimensionName, value: typing.Any) -> float:
# """
# Returns the wieght associated with each dimension value
# """
# @abc.abstractmethod
# def get_dimension_search_ordering(self) -> typing.List[DimensionName]:
# """
# Returns the dimension names in order of search preference
# """
@abc.abstractmethod
def get_random_assignment(self) -> 'ConfigurationPoint':
"""
Randomly assigns a value for each dimension
"""
@abc.abstractmethod
def get_default_assignment(self) -> 'ConfigurationPoint':
"""
Assigns default value for each dimension
"""
def get_point(self, values: typing.Dict[DimensionName, typing.Any]) -> 'ConfigurationPoint':
"""
Returns the point asscoiate with values.
"""
return ConfigurationPoint(self, values)
class ConfigurationPoint(typing.Dict[DimensionName, typing.Any]):
def __init__(self, space: ConfigurationSpace, values: typing.Dict[DimensionName, typing.Any]) -> None:
super().__init__(values)
self.space = space
self.data: typing.Dict = {}
class SimpleConfigurationSpace(ConfigurationSpace):
'''
Implementation that explicitly enumerates all configuration space grid points
'''
def __init__(self, dimension_values: typing.Dict[DimensionName, typing.List], *,
dimension_ordering: typing.List[DimensionName] = None,
value_weights: typing.Dict[DimensionName, typing.List[float]] = None) -> None:
if dimension_ordering is not None and set(dimension_values.keys()) == set(dimension_ordering):
raise exceptions.InvalidArgumentValueError(
'The keys of dimension_values and dimesion_ordering must be the same')
if value_weights is not None and not set(dimension_values.keys()) == set(value_weights.keys()):
raise exceptions.InvalidArgumentValueError(
'The set of keys of dimension_values and value_weights must be the same')
for key in dimension_values.keys():
if not len(dimension_values[key]) == len(value_weights[key]):
raise exceptions.InvalidArgumentValueError(
'The length of dimension_values[{}] and values_weights[{}] must be the same'.format(
key, key))
if value_weights is None:
value_weights = {}
for key in dimension_values.keys():
value_weights[key] = [1.0] * len(dimension_values[key])
if dimension_ordering is None:
dimension_ordering = list(dimension_values.keys())
self._dimension_values: typing.Dict[DimensionName, typing.List] = dimension_values
self._value_weights: typing.Dict[DimensionName, typing.List[float]] = value_weights
self._dimension_ordering = dimension_ordering
def get_dimensions(self):
return list(self._dimension_values.keys())
def get_values(self, dimension: DimensionName) -> typing.List:
return self._dimension_values[dimension]
def get_weight(self, dimension: DimensionName, value: typing.Any) -> float:
return self._value_weights[dimension][self.get_values(dimension).index(value)]
def get_dimension_search_ordering(self) -> typing.List[DimensionName]:
return self._dimension_ordering
def get_point(self, values: typing.Dict[DimensionName, typing.Any]):
# TODO: SimpleConfigurationSpace should manage and reuse ConfigurationPoints
return ConfigurationPoint(self, values)
def get_first_assignment(self) -> ConfigurationPoint:
'''
Assign the first value for each dimension
'''
# print(self._dimension_ordering)
assignment: typing.Dict[DimensionName, typing.Any] = {}
for dimension in self._dimension_ordering:
assignment[dimension] = self.get_values(dimension)[0]
# print(dimension, self.get_values(dimension)[0])
return ConfigurationPoint(self, assignment)
def get_default_assignment(self) -> ConfigurationPoint:
return self.get_first_assignment()
def get_random_assignment(self) -> ConfigurationPoint:
"""
Randomly assigns a value for each dimension
"""
assignment: typing.Dict[DimensionName, typing.Any] = {}
for dimension in self._dimension_ordering:
assignment[dimension] = random.choice(self.get_values(dimension))
return ConfigurationPoint(self, assignment)
def get_dimension_length(self, kw: DimensionName) -> int:
"""
Return the length of the list a configuration point
Args:
kw:
name of the dimension
Returns:
"""
return len(self.get_values(kw))
def __str__(self):
"""
Returns: the configuration point as a human-readable string
"""
return pprint.pformat(self._dimension_values)
class PrimitiveHyperparams():
def __init__(self, primitive_name: str, hyperparams: typing.Dict[str, Hyperparam]):
self.primitive_name = primitive_name
self.hyperparams = hyperparams
def get_default_assignment(self) -> typing.Dict:
hyperparams = {}
for name, function in self.hyperparams.items():
hyperparams[name] = function.default()
result = {
"primitive": self.primitive_name,
"hyperparameters": hyperparams
}
return result
def get_random_assignment(self) -> typing.Dict:
hyperparams = {}
for name, function in self.hyperparams.items():
hyperparams[name] = function.sample()
result = {
"primitive": self.primitive_name,
"hyperparameters": hyperparams
}
return result
class TemplateStepHyperparams():
def __init__(self, primitive_hyperaparms: typing.List[PrimitiveHyperparams],
primitive_weights: typing.Optional[typing.List[float]]):
assert len(primitive_hyperaparms) > 0, "Must provided at least one PrimitiveHyperparams"
assert primitive_weights is None or len(primitive_hyperaparms)==len(primitive_weights), "Must have samme length"
self.primitive_hyperaparms = primitive_hyperaparms
self.no_weights_specified = primitive_weights is None
if primitive_weights is None:
primitive_weights = [1.0] * len(primitive_hyperaparms)
self.primitive_weights = primitive_weights
def get_default_assignment(self) -> typing.Dict:
if self.no_weights_specified:
return self.primitive_hyperaparms[0].get_default_assignment()
sorted_by_weight = sorted(zip(self.primitive_weights, self.primitive_hyperaparms),
key=itemgetter(0), reverse=True)
return sorted_by_weight[0][1].get_default_assignment()
def get_random_assignment(self) -> typing.Dict:
return random.choices(self.primitive_hyperaparms, self.primitive_weights)[0].get_random_assignment()
class ImplicitConfigurationSpace(ConfigurationSpace):
def __init__(self, conf_space: typing.Dict[DimensionName, typing.List[PrimitiveHyperparams]]):
self.conf_space = conf_space
def get_default_assignment(self):
result = {}
for (domain_name, step_hyperparams) in self.conf_space.items():
result[domain_name] = step_hyperparams[0].get_default_assignment()
return result
def get_random_assignment(self):
result = {}
for (domain_name, step_hyperparams) in self.conf_space.items():
result[domain_name] = random.choices(step_hyperparams)[0].get_random_assignment()
# msg = pprint.pformat(result)
# for line in msg.splitlines():
# _logger.debug(" | %s" % line)
return result | |
# (C) William W. Cohen and Carnegie Mellon University, 2016
import theano
import theano.tensor as T
import theano.sparse as S
import theano.sparse.basic as B
from . import matrixdb
import numpy
def debugVar(v,depth=0,maxdepth=10):
if depth>maxdepth:
print('...')
else:
print('| '*(depth+1), end=' ')
print('var: name',v.name,'type',type(v),'def',theano.pp(v))
for a in v.get_parents():
debugApply(a,depth=depth+1,maxdepth=maxdepth)
def debugApply(a,depth=0,maxdepth=10):
if depth>maxdepth:
print('...')
else:
print('| '*(depth+1), end=' ')
print('apply: ',a,'op',type(a.op),'output types',list(map(type,a.outputs)))
for v in a.inputs:
debugVar(v,depth=depth+1,maxdepth=maxdepth)
if __name__=="__main__":
db = matrixdb.MatrixDB.loadFile("test/fam.cfacts")
va = db.onehot('william')
vb = db.onehot('sarah')
print('a',va)
print('b',vb)
print('shape',va.shape)
print('f1: s = x*((x+x)+x)')
tx = S.csr_matrix('x')
r1 = B.sp_sum(tx+tx+tx,sparse_grad=True)
s = tx*r1
s.name = 's'
f1 = theano.function(inputs=[tx],outputs=[s])
w = f1(va)
print(w[0])
debugVar(s)
#print db.rowAsSymbolDict(w[0])
#
# print 'f2(w=a,c=b)'
# tw = S.csr_matrix('w') #weighter
# tc = S.csr_matrix('c') #constant
# r2 = B.sp_sum(tw*1.7,sparse_grad=True)
# s2 = tc*r2
# f2 = theano.function(inputs=[tw,tc],outputs=[s2])
# w = f2(va,vb)
# print w[0]
#
print('f3(w=a), b constant')
tw3 = S.csr_matrix('w') #weighter
#y = sparse.CSR(data, indices, indptr, shape)
# tc3 = S.CSR(vb.data, vb.indices, vb.indptr, vb.shape)
# r3 = B.sp_sum(tw3*1.7,sparse_grad=True)
# s3 = tc3*r3
# f3 = theano.function(inputs=[tw3],outputs=[s3])
# w = f3(va)
# print w[0]
# debugVar(tw3,maxdepth=5) | |
# Copyright 2017-2020 Lawrence Livermore National Security, LLC and other
# Hatchet Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: MIT
import glob
import struct
import re
import os
import traceback
import numpy as np
import pandas as pd
import multiprocessing as mp
import multiprocessing.sharedctypes
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
# cython imports
try:
import hatchet.cython_modules.libs.subtract_metrics as smc
except ImportError:
print("-" * 80)
print(
"""Error: Shared object (.so) not found for cython module.\n\tPlease run install.sh from the hatchet root directory to build modules."""
)
print("-" * 80)
traceback.print_exc()
raise
import hatchet.graphframe
from hatchet.node import Node
from hatchet.graph import Graph
from hatchet.util.profiler import Timer
from hatchet.frame import Frame
src_file = 0
def init_shared_array(buf_):
"""Initialize shared array."""
global shared_metrics
shared_metrics = buf_
def read_metricdb_file(args):
"""Read a single metricdb file into a 1D array."""
filename, num_nodes, num_threads_per_rank, num_metrics, shape = args
rank = int(
re.search(r"\-(\d+)\-(\d+)\-([\w\d]+)\-(\d+)\-\d.metric-db$", filename).group(1)
)
thread = int(
re.search(r"\-(\d+)\-(\d+)\-([\w\d]+)\-(\d+)\-\d.metric-db$", filename).group(2)
)
with open(filename, "rb") as metricdb:
metricdb.seek(32)
arr1d = np.fromfile(
metricdb, dtype=np.dtype(">f8"), count=num_nodes * num_metrics
)
arr = np.frombuffer(shared_metrics).reshape(shape)
# copy the data in the right place in the larger 2D array of metrics
rank_offset = (rank * num_threads_per_rank + thread) * num_nodes
arr[rank_offset : rank_offset + num_nodes, :2].flat = arr1d.flat
arr[rank_offset : rank_offset + num_nodes, 2] = range(1, num_nodes + 1)
arr[rank_offset : rank_offset + num_nodes, 3] = rank
arr[rank_offset : rank_offset + num_nodes, 4] = thread
class HPCToolkitReader:
"""Read in the various sections of an HPCToolkit experiment.xml file and
metric-db files.
"""
def __init__(self, dir_name):
# this is the name of the HPCToolkit database directory. The directory
# contains an experiment.xml and some metric-db files
self.dir_name = dir_name
root = ET.parse(self.dir_name + "/experiment.xml").getroot()
self.loadmodule_table = next(root.iter("LoadModuleTable"))
self.file_table = next(root.iter("FileTable"))
self.procedure_table = next(root.iter("ProcedureTable"))
self.metricdb_table = next(root.iter("MetricDBTable"))
self.callpath_profile = next(root.iter("SecCallPathProfileData"))
# For a parallel run, there should be one metric-db file per MPI
# process
metricdb_files = glob.glob(self.dir_name + "/*.metric-db")
self.num_metricdb_files = len(metricdb_files)
# We need to know how many threads per rank there are. This counts the
# number of thread 0 metric-db files (i.e., number of ranks), then
# uses this as the divisor to the total number of metric-db files.
metricdb_numranks_files = glob.glob(self.dir_name + "/*-000-*.metric-db")
self.num_ranks = len(metricdb_numranks_files)
self.num_threads_per_rank = int(
self.num_metricdb_files / len(metricdb_numranks_files)
)
# Read one metric-db file to extract the number of nodes in the CCT
# and the number of metrics
with open(metricdb_files[0], "rb") as metricdb:
metricdb.read(18) # skip tag
metricdb.read(5) # skip version TODO: should we?
endian = metricdb.read(1)
if endian == b"b":
self.num_nodes = struct.unpack(">i", metricdb.read(4))[0]
self.num_metrics = struct.unpack(">i", metricdb.read(4))[0]
else:
raise ValueError(
"HPCToolkitReader doesn't support endian '%s'" % endian
)
self.load_modules = {}
self.src_files = {}
self.procedure_names = {}
self.metric_names = {}
# this list of dicts will hold all the node information such as
# procedure name, load module, filename, etc. for all the nodes
self.node_dicts = []
self.timer = Timer()
def fill_tables(self):
"""Read certain sections of the experiment.xml file to create dicts of load
modules, src_files, procedure_names, and metric_names.
"""
for loadm in (self.loadmodule_table).iter("LoadModule"):
self.load_modules[loadm.get("i")] = loadm.get("n")
for filename in (self.file_table).iter("File"):
self.src_files[filename.get("i")] = filename.get("n")
for procedure in (self.procedure_table).iter("Procedure"):
self.procedure_names[procedure.get("i")] = procedure.get("n")
for metric in (self.metricdb_table).iter("MetricDB"):
self.metric_names[metric.get("i")] = metric.get("n")
return (
self.load_modules,
self.src_files,
self.procedure_names,
self.metric_names,
)
def read_all_metricdb_files(self):
"""Read all the metric-db files and create a dataframe with num_nodes X
num_metricdb_files rows and num_metrics columns. Three additional columns
store the node id, MPI process rank, and thread id (if applicable).
"""
metricdb_files = glob.glob(self.dir_name + "/*.metric-db")
metricdb_files.sort()
# All the metric data per node and per process is read into the metrics
# array below. The three additional columns are for storing the implicit
# node id (nid), MPI process rank, and thread id (if applicable).
shape = [self.num_nodes * self.num_metricdb_files, self.num_metrics + 3]
size = int(np.prod(shape))
# shared memory buffer for multiprocessing
shared_buffer = mp.sharedctypes.RawArray("d", size)
pool = mp.Pool(initializer=init_shared_array, initargs=(shared_buffer,))
self.metrics = np.frombuffer(shared_buffer).reshape(shape)
args = [
(
filename,
self.num_nodes,
self.num_threads_per_rank,
self.num_metrics,
shape,
)
for filename in metricdb_files
]
try:
pool.map(read_metricdb_file, args)
finally:
pool.close()
# once all files have been read, create a dataframe of metrics
metric_names = [
self.metric_names[key] for key in sorted(self.metric_names.keys())
]
for idx, name in enumerate(metric_names):
if name == "CPUTIME (usec) (E)" or name == "CPUTIME (sec) (E)":
metric_names[idx] = "time"
if name == "CPUTIME (usec) (I)" or name == "CPUTIME (sec) (I)":
metric_names[idx] = "time (inc)"
self.metric_columns = metric_names
df_columns = self.metric_columns + ["nid", "rank", "thread"]
self.df_metrics = pd.DataFrame(self.metrics, columns=df_columns)
self.df_metrics["nid"] = self.df_metrics["nid"].astype(int, copy=False)
self.df_metrics["rank"] = self.df_metrics["rank"].astype(int, copy=False)
self.df_metrics["thread"] = self.df_metrics["thread"].astype(int, copy=False)
# if number of threads per rank is 1, we do not need to keep the thread ID column
if self.num_threads_per_rank == 1:
del self.df_metrics["thread"]
# used to speedup parse_xml_node
self.np_metrics = self.df_metrics[self.metric_columns].values
# getting the number of execution threads for our stride in
# subtract_exclusive_metric_vals/ num nodes is already calculated
self.total_execution_threads = self.num_threads_per_rank * self.num_ranks
def read(self):
"""Read the experiment.xml file to extract the calling context tree and create
a dataframe out of it. Then merge the two dataframes to create the final
dataframe.
Return:
(GraphFrame): new GraphFrame with HPCToolkit data.
"""
with self.timer.phase("fill tables"):
self.fill_tables()
with self.timer.phase("read metric db"):
self.read_all_metricdb_files()
list_roots = []
# parse the ElementTree to generate a calling context tree
for root in self.callpath_profile.findall("PF"):
global src_file
nid = int(root.get("i"))
src_file = root.get("f")
# start with the root and create the callpath and node for the root
# also a corresponding node_dict to be inserted into the dataframe
graph_root = Node(
Frame(
{"type": "function", "name": self.procedure_names[root.get("n")]}
),
None,
)
node_dict = self.create_node_dict(
nid,
graph_root,
self.procedure_names[root.get("n")],
"PF",
self.src_files[src_file],
int(root.get("l")),
self.load_modules[root.get("lm")],
)
self.node_dicts.append(node_dict)
list_roots.append(graph_root)
# start graph construction at the root
with self.timer.phase("graph construction"):
self.parse_xml_children(root, graph_root)
# put updated metrics back in dataframe
for i, column in enumerate(self.metric_columns):
if "(inc)" not in column:
self.df_metrics[column] = self.np_metrics.T[i]
with self.timer.phase("graph construction"):
graph = Graph(list_roots)
graph.enumerate_traverse()
# create a dataframe for all the nodes in the graph
self.df_nodes = pd.DataFrame.from_dict(data=self.node_dicts)
# merge the metrics and node dataframes
with self.timer.phase("data frame"):
dataframe = pd.merge(self.df_metrics, self.df_nodes, on="nid")
# set the index to be a MultiIndex
if self.num_threads_per_rank > 1:
indices = ["node", "rank", "thread"]
# if number of threads per rank is 1, do not make thread an index
elif self.num_threads_per_rank == 1:
indices = ["node", "rank"]
dataframe.set_index(indices, inplace=True)
dataframe.sort_index(inplace=True)
# create list of exclusive and inclusive metric columns
exc_metrics = []
inc_metrics = []
for column in self.metric_columns:
if "(inc)" in column:
inc_metrics.append(column)
else:
exc_metrics.append(column)
return hatchet.graphframe.GraphFrame(graph, dataframe, exc_metrics, inc_metrics)
def parse_xml_children(self, xml_node, hnode):
"""Parses all children of an XML node."""
for xml_child in xml_node:
if xml_child.tag != "M":
nid = int(xml_node.get("i"))
line = int(xml_node.get("l"))
self.parse_xml_node(xml_child, nid, line, hnode)
def parse_xml_node(self, xml_node, parent_nid, parent_line, hparent):
"""Parses an XML node and its children recursively."""
nid = int(xml_node.get("i"))
global src_file
xml_tag = xml_node.tag
if xml_tag == "PF" or xml_tag == "Pr":
# procedure
name = self.procedure_names[xml_node.get("n")]
if parent_line != 0:
name = str(parent_line) + ":" + name
src_file = xml_node.get("f")
line = int(xml_node.get("l"))
hnode = Node(Frame({"type": "function", "name": name}), hparent)
node_dict = self.create_node_dict(
nid,
hnode,
name,
xml_tag,
self.src_files[src_file],
line,
self.load_modules[xml_node.get("lm")],
)
elif xml_tag == "L":
# loop
src_file = xml_node.get("f")
line = int(xml_node.get("l"))
name = (
"Loop@" + os.path.basename(self.src_files[src_file]) + ":" + str(line)
)
hnode = Node(
Frame({"type": "loop", "file": self.src_files[src_file], "line": line}),
hparent,
)
node_dict = self.create_node_dict(
nid, hnode, name, xml_tag, self.src_files[src_file], line, None
)
elif xml_tag == "S":
# statement
line = int(xml_node.get("l"))
# this might not be required for resolving conflicts
name = os.path.basename(self.src_files[src_file]) + ":" + str(line)
hnode = Node(
Frame(
{
"type": "statement",
"file": self.src_files[src_file],
"line": line,
}
),
hparent,
)
node_dict = self.create_node_dict(
nid, hnode, name, xml_tag, self.src_files[src_file], line, None
)
# when we reach statement nodes, we subtract their exclusive
# metric values from the parent's values
for i, column in enumerate(self.metric_columns):
if "(inc)" not in column:
smc.subtract_exclusive_metric_vals(
nid,
parent_nid,
self.np_metrics.T[i],
self.total_execution_threads,
self.num_nodes,
)
if xml_tag == "C" or (
xml_tag == "Pr" and self.procedure_names[xml_node.get("n")] == ""
):
# do not add a node to the graph if the xml_tag is a callsite
# or if its a procedure with no name
# for Prs, the preceding Pr has the calling line number and for
# PFs, the preceding C has the line number
line = int(xml_node.get("l"))
self.parse_xml_children(xml_node, hparent)
else:
self.node_dicts.append(node_dict)
hparent.add_child(hnode)
self.parse_xml_children(xml_node, hnode)
def create_node_dict(self, nid, hnode, name, node_type, src_file, line, module):
"""Create a dict with all the node attributes."""
node_dict = {
"nid": nid,
"name": name,
"type": node_type,
"file": src_file,
"line": line,
"module": module,
"node": hnode,
}
return node_dict | |
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2016 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
# USAGE:
# export DURATION=2.0 # use 2s sequences
# export EPOCH=50 # use model after 50 epochs
# python same_different_experiment.py $DURATION $EPOCH
# ---- <edit> -----------------------------------------------------------------
# environment
WAV_TEMPLATE = '/path/to/where/files/are/stored/{uri}.wav'
LOG_DIR = '/path/to/where/trained/models/are/stored'
# ---- </edit> ---------------------------------------------------------------
# sequence duration (in seconds)
import sys
duration = float(sys.argv[1])
# number of epoch
nb_epoch = int(sys.argv[2])
LOG_DIR = LOG_DIR + '/{duration:.1f}s'.format(duration=duration)
import numpy as np
np.random.seed(1337) # for reproducibility
# feature extraction
from pyannote.audio.features.yaafe import YaafeMFCC
feature_extractor = YaafeMFCC(e=False, De=True, DDe=True,
coefs=11, D=True, DD=True)
# ETAPE database
medium_template = {'wav': WAV_TEMPLATE}
from pyannote.database import Etape
database = Etape(medium_template=medium_template)
# experimental protocol (ETAPE TV subset)
protocol = database.get_protocol('SpeakerDiarization', 'TV')
from pyannote.audio.embedding.base import SequenceEmbedding
batch_size = 32
# generate set of labeled sequences
from pyannote.audio.generators.labels import \
LabeledFixedDurationSequencesBatchGenerator
generator = LabeledFixedDurationSequencesBatchGenerator(
feature_extractor, duration=duration, step=duration, batch_size=-1)
X, y = zip(*generator(protocol.development()))
X, y = np.vstack(X), np.hstack(y)
# randomly select (at most) 100 sequences from each speaker to ensure
# all speakers have the same importance in the evaluation
unique, y, counts = np.unique(y, return_inverse=True, return_counts=True)
n_speakers = len(unique)
indices = []
for speaker in range(n_speakers):
i = np.random.choice(np.where(y == speaker)[0], size=min(100, counts[speaker]), replace=False)
indices.append(i)
indices = np.hstack(indices)
X, y = X[indices], y[indices, np.newaxis]
# load pre-trained embedding
architecture_yml = LOG_DIR + '/architecture.yml'
weights_h5 = LOG_DIR + '/weights/{epoch:04d}.h5'.format(epoch=nb_epoch - 1)
embedding = SequenceEmbedding.from_disk(architecture_yml, weights_h5)
# embed all sequences
fX = embedding.transform(X, batch_size=batch_size, verbose=0)
# compute euclidean distance between every pair of sequences
from scipy.spatial.distance import pdist
distances = pdist(fX, metric='euclidean')
# compute same/different groundtruth
y_true = pdist(y, metric='chebyshev') < 1
# plot positive/negative scores distribution
# plot DET curve and return equal error rate
from pyannote.metrics.plot.binary_classification import \
plot_det_curve, plot_distributions
prefix = LOG_DIR + '/plot.{epoch:04d}'.format(epoch=nb_epoch - 1)
plot_distributions(y_true, distances, prefix, xlim=(0, 2), ymax=3, nbins=100)
eer = plot_det_curve(y_true, -distances, prefix)
print('EER = {eer:.2f}%'.format(eer=100*eer)) | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# Monkey-patch because I trained with a newer version.
# This can be removed once PyTorch 0.4.x is out.
# See https://discuss.pytorch.org/t/question-about-rebuild-tensor-v2/14560
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F
import torch.utils.data
from torch.autograd import Variable
from torch.utils.data.dataloader import default_collate
import torchvision
import torchvision.transforms as transforms
from torchvision.utils import save_image
import sys
import os
import time
import numpy as np
import cv2
import argparse
import yaml
import json
import random
import math
import copy
import shutil
import logging
import scipy.sparse
from tqdm import tqdm
from collections import namedtuple
from easydict import EasyDict as edict
from config import CONFIG, config_load
# Load CONFIG
parser = argparse.ArgumentParser(description='Training code')
parser.add_argument('--config', default='config.yaml', type=str, help='yaml config file')
args = parser.parse_args()
config_load(args.config)
# config_load('config.yaml')
print ('==> CONFIG is: \n', CONFIG, '\n')
# Set logger
logger = logging.getLogger(__name__)
format = logging.Formatter("%(asctime)s - %(message)s") # output format
sh = logging.StreamHandler(stream=sys.stdout) # output to standard output
sh.setFormatter(format)
logger.addHandler(sh)
if CONFIG.DEBUG:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# Create LOG_DIR and SNAPSHOT_DIR
LOGDIR = os.path.join(CONFIG.LOGS.LOG_DIR, '%s_%d'%(CONFIG.NAME, int(time.time())))
SNAPSHOTDIR = os.path.join(CONFIG.LOGS.SNAPSHOT_DIR, '%s_%d'%(CONFIG.NAME, int(time.time())))
if not os.path.exists(LOGDIR):
os.makedirs(LOGDIR)
if not os.path.exists(SNAPSHOTDIR):
os.makedirs(SNAPSHOTDIR)
# Store the code into LOG_DIR/shutil
if CONFIG.LOGS.LOG_SHUTIL_ON:
SHUTILDIR = os.path.join(LOGDIR, 'shutil')
if os.path.exists(SHUTILDIR):
shutil.rmtree(SHUTILDIR)
SHUTIL_IGNORELIST = [CONFIG.LOGS.SNAPSHOT_DIR, CONFIG.LOGS.LOG_DIR] + \
CONFIG.LOGS.LOG_SHUTIL_IGNORELIST
if os.path.exists(CONFIG.LOGS.LOG_SHUTIL_IGNOREFILE):
lines = open(CONFIG.LOGS.LOG_SHUTIL_IGNOREFILE).readlines()
SHUTIL_IGNORELIST += [l.strip() for l in lines]
print ('==> Shutil Code to File: %s \n'%(SHUTILDIR))
print ('==> Shutil Ignore Patterns: ', SHUTIL_IGNORELIST, '\n')
shutil.copytree('./', SHUTILDIR, ignore=shutil.ignore_patterns(*SHUTIL_IGNORELIST))
####################################################################################################
# COCO Dataset
####################################################################################################
from datasets import CocoDatasetMiniBatch, MinibatchSampler
####################################################################################################
# Network Model
####################################################################################################
from resnetXtFPN import resnet50C4
from generate_anchors import generate_anchors
class RPN(nn.Module):
def __init__(self, dim_in, spatial_scale, pretrainfile=None):
super(RPN, self).__init__()
self.dim_in = dim_in
self.dim_out = dim_in
spatial_scale = 1.0 / CONFIG.RPN.STRIDE
anchors = generate_anchors(
stride=CONFIG.RPN.STRIDE,
sizes=CONFIG.RPN.SIZES,
aspect_ratios=CONFIG.RPN.ASPECT_RATIOS)
num_anchors = anchors.shape[0]
# RPN hidden representation
self.RPN_conv = nn.Conv2d(self.dim_in, self.dim_out, 3, 1, 1)
# Proposal classification scores
self.n_score_out = num_anchors # for sigmoid.
self.RPN_cls_score = nn.Conv2d(self.dim_out, self.n_score_out, 1, 1, 0)
# Proposal bbox regression deltas
self.RPN_bbox_pred = nn.Conv2d(self.dim_out, num_anchors * 4, 1, 1, 0)
#self.RPN_GenerateProposals = GenerateProposalsOp(anchors, spatial_scale)
#self.RPN_GenerateProposalLabels = GenerateProposalLabelsOp()
def _init_weights(self):
init.normal_(self.RPN_conv.weight, std=0.01)
init.constant_(self.RPN_conv.bias, 0)
init.normal_(self.RPN_cls_score.weight, std=0.01)
init.constant_(self.RPN_cls_score.bias, 0)
init.normal_(self.RPN_bbox_pred.weight, std=0.01)
init.constant_(self.RPN_bbox_pred.bias, 0)
class MaskRCNN(nn.Module):
def __init__(self, pretrainfile=None):
super(MaskRCNN, self).__init__()
# Backbone
if CONFIG.MODEL.CONV_BODY == 'resnet50C4':
self.Conv_Body = resnet50C4(pretrained=True, num_classes=None)
spatial_scale = 1. / 16.
dim_out = 1024
elif CONFIG.MODEL.CONV_BODY == 'resnet50FPN':
# FPN : out is in the order of [p1, p2, p3, p4]. from 1./4. to 1./32.
# The order is different here. **REMENBER** to transpose the order in the forward().
self.Conv_Body = resnet50C4(pretrained=True, num_classes=None)
spatial_scale = (1. / 32., 1. / 16., 1. / 8., 1. / 4.)
dim_out = (2048, 1024, 512, 256)
else:
raise NotImplementedError
# TODO: here
# Region Proposal Network
if CONFIG.RPN.RPN_ON:
self.rpn = RPN(dim_in, spatial_scale)
if CONFIG.FPN.FPN_ON:
# Only supports case when RPN and ROI min levels are the same
assert CONFIG.FPN.RPN_MIN_LEVEL == CONFIG.FPN.ROI_MIN_LEVEL
# RPN max level can be >= to ROI max level
assert CONFIG.FPN.RPN_MAX_LEVEL >= CONFIG.FPN.ROI_MAX_LEVEL
# FPN RPN max level might be > FPN ROI max level in which case we
# need to discard some leading conv blobs (blobs are ordered from
# max/coarsest level to min/finest level)
self.num_roi_levels = CONFIG.FPN.ROI_MAX_LEVEL - CONFIG.FPN.ROI_MIN_LEVEL + 1
# Retain only the spatial scales that will be used for RoI heads. `Conv_Body.spatial_scale`
# may include extra scales that are used for RPN proposals, but not for RoI heads.
self.Conv_Body.spatial_scale = self.Conv_Body.spatial_scale[-self.num_roi_levels:]
# BBOX Branch
if not CONFIG.MODEL.RPN_ONLY:
self.Box_Head = get_func(CONFIG.FAST_RCNN.ROI_BOX_HEAD)(
self.RPN.dim_out, self.roi_feature_transform, self.Conv_Body.spatial_scale)
self.Box_Outs = fast_rcnn_heads.fast_rcnn_outputs(
self.Box_Head.dim_out)
# Mask Branch
if CONFIG.MODEL.MASK_ON:
self.Mask_Head = get_func(CONFIG.MRCNN.ROI_MASK_HEAD)(
self.RPN.dim_out, self.roi_feature_transform, self.Conv_Body.spatial_scale)
if getattr(self.Mask_Head, 'SHARE_RES5', False):
self.Mask_Head.share_res5_module(self.Box_Head.res5)
self.Mask_Outs = mask_rcnn_heads.mask_rcnn_outputs(self.Mask_Head.dim_out)
# Keypoints Branch
if CONFIG.MODEL.KEYPOINTS_ON:
self.Keypoint_Head = get_func(CONFIG.KRCNN.ROI_KEYPOINTS_HEAD)(
self.RPN.dim_out, self.roi_feature_transform, self.Conv_Body.spatial_scale)
if getattr(self.Keypoint_Head, 'SHARE_RES5', False):
self.Keypoint_Head.share_res5_module(self.Box_Head.res5)
self.Keypoint_Outs = keypoint_rcnn_heads.keypoint_outputs(self.Keypoint_Head.dim_out)
self._init_modules()
self.init(pretrainfile)
def init(self, pretrainfile=None):
if pretrainfile is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, .1)
m.bias.data.zero_()
else:
self.load_state_dict(torch.load(pretrainfile, map_location=lambda storage, loc: storage))
print ('==> load self-train weights as pretrain.')
def forward(self, input_local, input_global):
pass
def calc_loss(self, pred, gt):
loss = nn.BCEWithLogitsLoss()(pred, gt)
return loss
## main ##
if __name__ == '__main__':
print ('===>dataset proprocess.... ')
dataset = CocoDatasetMiniBatch(CONFIG.MYDATASET.TRAIN_DIR, CONFIG.MYDATASET.TRAIN_ANNOFILE,
gt=True, crowd_filter_thresh=CONFIG.TRAIN.CROWD_FILTER_THRESH)
sampler = MinibatchSampler(dataset.ratio_list, dataset.ratio_index)
print ('===>done. ')
batch_size = len(CONFIG.MYSOLVER.GPU_IDS) * CONFIG.TRAIN.IMS_PER_BATCH
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=CONFIG.MYSOLVER.WORKERS,
collate_fn=dataset.collate_minibatch)
print ('===>model building .... ')
maskRCNN = MaskRCNN(pretrainfile=None)
print ('===>done. ')
print ('===>start training .... ')
for input_data in tqdm(dataloader):
for key in input_data:
if key != 'roidb': # roidb is a list of ndarrays with inconsistent length
input_data[key] = list(map(Variable, input_data[key]))
# net_outputs = maskRCNN(**input_data)
# loss = net_outputs['total_loss']
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
pass |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.